code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
from django.test import TestCase, SimpleTestCase
from django.urls import reverse, resolve
from .views import profile, order_history
""" Url Testing """
class TestUrls(SimpleTestCase):
def test_profile_resolves(self):
url = reverse('profile')
self.assertEqual(resolve(url).func, profile)
def test_order_history_resolves(self):
url = reverse('order_history', args='1')
self.assertEqual(resolve(url).func, order_history)
|
normal
|
{
"blob_id": "5dc6b54357df87077d8159192cd52697b2616db8",
"index": 9186,
"step-1": "<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n\n def test_order_history_resolves(self):\n url = reverse('order_history', args='1')\n self.assertEqual(resolve(url).func, order_history)\n",
"step-4": "from django.test import TestCase, SimpleTestCase\nfrom django.urls import reverse, resolve\nfrom .views import profile, order_history\n<mask token>\n\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n\n def test_order_history_resolves(self):\n url = reverse('order_history', args='1')\n self.assertEqual(resolve(url).func, order_history)\n",
"step-5": "from django.test import TestCase, SimpleTestCase\nfrom django.urls import reverse, resolve\nfrom .views import profile, order_history\n\n\"\"\" Url Testing \"\"\"\n\nclass TestUrls(SimpleTestCase):\n\n def test_profile_resolves(self):\n url = reverse('profile')\n self.assertEqual(resolve(url).func, profile)\n\n def test_order_history_resolves(self):\n url = reverse('order_history', args='1')\n self.assertEqual(resolve(url).func, order_history)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import sys
a = 3
b = 4
c = 5.66
d = 8.0
e = complex(c,d)
f = complex(float(a),float(b))
print("a is type:",type(a))
print("c is type:",type(c))
print("e is type:",type(e))
print(a + b)
print(d / c)
print(b / a)
#2个除约成整型
print(b // a)
print(e)
print(e + f)
print(sys.float_info)
|
normal
|
{
"blob_id": "2876c9f8db0395143b165b855b22e364e3cc8121",
"index": 9008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('a is type:', type(a))\nprint('c is type:', type(c))\nprint('e is type:', type(e))\nprint(a + b)\nprint(d / c)\nprint(b / a)\nprint(b // a)\nprint(e)\nprint(e + f)\nprint(sys.float_info)\n",
"step-3": "<mask token>\na = 3\nb = 4\nc = 5.66\nd = 8.0\ne = complex(c, d)\nf = complex(float(a), float(b))\nprint('a is type:', type(a))\nprint('c is type:', type(c))\nprint('e is type:', type(e))\nprint(a + b)\nprint(d / c)\nprint(b / a)\nprint(b // a)\nprint(e)\nprint(e + f)\nprint(sys.float_info)\n",
"step-4": "import sys\na = 3\nb = 4\nc = 5.66\nd = 8.0\ne = complex(c, d)\nf = complex(float(a), float(b))\nprint('a is type:', type(a))\nprint('c is type:', type(c))\nprint('e is type:', type(e))\nprint(a + b)\nprint(d / c)\nprint(b / a)\nprint(b // a)\nprint(e)\nprint(e + f)\nprint(sys.float_info)\n",
"step-5": "import sys\n\na = 3\nb = 4\n\nc = 5.66\nd = 8.0\n\ne = complex(c,d)\nf = complex(float(a),float(b))\n\nprint(\"a is type:\",type(a))\nprint(\"c is type:\",type(c))\nprint(\"e is type:\",type(e))\n\nprint(a + b)\nprint(d / c)\n\nprint(b / a)\n#2个除约成整型\nprint(b // a)\n\nprint(e)\nprint(e + f)\n\nprint(sys.float_info)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- encoding: utf-8 -*-
"""
Created by eniocc at 11/10/2020
"""
import ctypes
from py_dss_interface.models.Base import Base
class MonitorsS(Base):
"""
This interface can be used to read/write certain properties of the active DSS object.
The structure of the interface is as follows:
CStr MonitorsS(int32_t Parameter, CStr Argument);
This interface returns a string according to the number sent in the variable “parameter”. The parameter can be
one of the following.
"""
def monitors_file_name(self) -> str:
"""Returns the name of the CSV file associated with active monitor."""
result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(0), ctypes.c_int32(0)))
return result.value.decode('ascii')
def monitors_read_name(self) -> str:
"""Returns the active Monitor object by name."""
result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(1), ctypes.c_int32(0)))
return result.value.decode('ascii')
def monitors_write_name(self, argument) -> str:
"""Sets the active Monitor object by name."""
result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2), argument.encode('ascii')))
return result.value.decode('ascii')
def monitors_read_element(self) -> str:
"""Returns the full name of element being monitored by the active Monitor."""
result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(3), ctypes.c_int32(0)))
return result.value.decode('ascii')
def monitors_write_element(self, argument) -> str:
"""Sets the full name of element being monitored by the active Monitor."""
result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4), argument.encode('ascii')))
return result.value.decode('ascii')
|
normal
|
{
"blob_id": "f6f0dcb806fbc1e14c0907dd500fdc6a609a19f7",
"index": 5598,
"step-1": "<mask token>\n\n\nclass MonitorsS(Base):\n <mask token>\n <mask token>\n <mask token>\n\n def monitors_write_name(self, argument) ->str:\n \"\"\"Sets the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n <mask token>\n\n def monitors_write_element(self, argument) ->str:\n \"\"\"Sets the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n",
"step-2": "<mask token>\n\n\nclass MonitorsS(Base):\n <mask token>\n\n def monitors_file_name(self) ->str:\n \"\"\"Returns the name of the CSV file associated with active monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(0),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n <mask token>\n\n def monitors_write_name(self, argument) ->str:\n \"\"\"Sets the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n <mask token>\n\n def monitors_write_element(self, argument) ->str:\n \"\"\"Sets the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n",
"step-3": "<mask token>\n\n\nclass MonitorsS(Base):\n <mask token>\n\n def monitors_file_name(self) ->str:\n \"\"\"Returns the name of the CSV file associated with active monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(0),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_read_name(self) ->str:\n \"\"\"Returns the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(1),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_name(self, argument) ->str:\n \"\"\"Sets the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n\n def monitors_read_element(self) ->str:\n \"\"\"Returns the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(3),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_element(self, argument) ->str:\n \"\"\"Sets the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n",
"step-4": "<mask token>\n\n\nclass MonitorsS(Base):\n \"\"\"\n This interface can be used to read/write certain properties of the active DSS object.\n\n The structure of the interface is as follows:\n CStr MonitorsS(int32_t Parameter, CStr Argument);\n\n This interface returns a string according to the number sent in the variable “parameter”. The parameter can be\n one of the following.\n \"\"\"\n\n def monitors_file_name(self) ->str:\n \"\"\"Returns the name of the CSV file associated with active monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(0),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_read_name(self) ->str:\n \"\"\"Returns the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(1),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_name(self, argument) ->str:\n \"\"\"Sets the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n\n def monitors_read_element(self) ->str:\n \"\"\"Returns the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(3),\n ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_element(self, argument) ->str:\n \"\"\"Sets the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4),\n argument.encode('ascii')))\n return result.value.decode('ascii')\n",
"step-5": "# -*- encoding: utf-8 -*-\n\"\"\"\n Created by eniocc at 11/10/2020\n\"\"\"\nimport ctypes\n\nfrom py_dss_interface.models.Base import Base\n\n\nclass MonitorsS(Base):\n \"\"\"\n This interface can be used to read/write certain properties of the active DSS object.\n\n The structure of the interface is as follows:\n CStr MonitorsS(int32_t Parameter, CStr Argument);\n\n This interface returns a string according to the number sent in the variable “parameter”. The parameter can be\n one of the following.\n \"\"\"\n\n def monitors_file_name(self) -> str:\n \"\"\"Returns the name of the CSV file associated with active monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(0), ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_read_name(self) -> str:\n \"\"\"Returns the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(1), ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_name(self, argument) -> str:\n \"\"\"Sets the active Monitor object by name.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(2), argument.encode('ascii')))\n return result.value.decode('ascii')\n\n def monitors_read_element(self) -> str:\n \"\"\"Returns the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(3), ctypes.c_int32(0)))\n return result.value.decode('ascii')\n\n def monitors_write_element(self, argument) -> str:\n \"\"\"Sets the full name of element being monitored by the active Monitor.\"\"\"\n result = ctypes.c_char_p(self.dss_obj.MonitorsS(ctypes.c_int32(4), argument.encode('ascii')))\n return result.value.decode('ascii')\n",
"step-ids": [
3,
4,
6,
7,
9
]
}
|
[
3,
4,
6,
7,
9
] |
def say_hi(argument):
return f"Hello {argument}"
def call_func(some_func, argument):
return some_func(argument)
def main(argument):
"""docstring"""
return call_func(say_hi, argument)
if __name__ == "__main__":
print(main(1))
|
normal
|
{
"blob_id": "2a3c3112122dee5574a1569155287ea3e5f8c7b2",
"index": 6120,
"step-1": "<mask token>\n\n\ndef call_func(some_func, argument):\n return some_func(argument)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef call_func(some_func, argument):\n return some_func(argument)\n\n\ndef main(argument):\n \"\"\"docstring\"\"\"\n return call_func(say_hi, argument)\n\n\n<mask token>\n",
"step-3": "def say_hi(argument):\n return f'Hello {argument}'\n\n\ndef call_func(some_func, argument):\n return some_func(argument)\n\n\ndef main(argument):\n \"\"\"docstring\"\"\"\n return call_func(say_hi, argument)\n\n\n<mask token>\n",
"step-4": "def say_hi(argument):\n return f'Hello {argument}'\n\n\ndef call_func(some_func, argument):\n return some_func(argument)\n\n\ndef main(argument):\n \"\"\"docstring\"\"\"\n return call_func(say_hi, argument)\n\n\nif __name__ == '__main__':\n print(main(1))\n",
"step-5": "def say_hi(argument):\n return f\"Hello {argument}\"\n\ndef call_func(some_func, argument):\n return some_func(argument)\n\ndef main(argument):\n \"\"\"docstring\"\"\"\n return call_func(say_hi, argument)\n\nif __name__ == \"__main__\":\n print(main(1))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Faça um algoritmo que solicita ao usuário as notas de três provas. Calcule a média aritmética e
informe se o aluno foi Aprovado ou Reprovado (o aluno é considerado aprovado com a média igual ou superior a 6).
"""
nota1 = float(input("Digite sua primeira nota: "))
nota2 = float(input("Digite sua segunda nota: "))
nota3 = float(input("Digite sua terceira nota: "))
media = (nota1 + nota2 + nota3)/3
if media >= 6:
print("Parabéns!! Você foi aprovado.")
else:
print("Que pena!! Você foi reprovado.")
|
normal
|
{
"blob_id": "033d1b39dd3ebaa81c8c6c52386909acf076ef47",
"index": 2011,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif media >= 6:\n print('Parabéns!! Você foi aprovado.')\nelse:\n print('Que pena!! Você foi reprovado.')\n",
"step-3": "<mask token>\nnota1 = float(input('Digite sua primeira nota: '))\nnota2 = float(input('Digite sua segunda nota: '))\nnota3 = float(input('Digite sua terceira nota: '))\nmedia = (nota1 + nota2 + nota3) / 3\nif media >= 6:\n print('Parabéns!! Você foi aprovado.')\nelse:\n print('Que pena!! Você foi reprovado.')\n",
"step-4": "\"\"\"\r\nFaça um algoritmo que solicita ao usuário as notas de três provas. Calcule a média aritmética e\r\ninforme se o aluno foi Aprovado ou Reprovado (o aluno é considerado aprovado com a média igual ou superior a 6).\r\n\"\"\"\r\n\r\nnota1 = float(input(\"Digite sua primeira nota: \"))\r\nnota2 = float(input(\"Digite sua segunda nota: \"))\r\nnota3 = float(input(\"Digite sua terceira nota: \"))\r\n\r\nmedia = (nota1 + nota2 + nota3)/3\r\n\r\nif media >= 6:\r\n print(\"Parabéns!! Você foi aprovado.\")\r\nelse:\r\n print(\"Que pena!! Você foi reprovado.\")\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
# -*-coding:utf-8-*-
__author__ = '李晓波'
from linux import sysinfo
#调用相应收集处理函数
def LinuxSysInfo():
#print __file__
return sysinfo.collect()
def WindowsSysInfo():
from windows import sysinfo as win_sysinfo
return win_sysinfo.collect()
|
normal
|
{
"blob_id": "30a2e4aa88b286179e2870205e90fab4a7474e12",
"index": 2969,
"step-1": "<mask token>\n\n\ndef LinuxSysInfo():\n return sysinfo.collect()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef LinuxSysInfo():\n return sysinfo.collect()\n\n\ndef WindowsSysInfo():\n from windows import sysinfo as win_sysinfo\n return win_sysinfo.collect()\n",
"step-3": "__author__ = '李晓波'\n<mask token>\n\n\ndef LinuxSysInfo():\n return sysinfo.collect()\n\n\ndef WindowsSysInfo():\n from windows import sysinfo as win_sysinfo\n return win_sysinfo.collect()\n",
"step-4": "__author__ = '李晓波'\nfrom linux import sysinfo\n\n\ndef LinuxSysInfo():\n return sysinfo.collect()\n\n\ndef WindowsSysInfo():\n from windows import sysinfo as win_sysinfo\n return win_sysinfo.collect()\n",
"step-5": "#!/usr/bin/env python\n# -*-coding:utf-8-*-\n__author__ = '李晓波'\n\nfrom linux import sysinfo\n\n\n#调用相应收集处理函数\n\ndef LinuxSysInfo():\n #print __file__\n return sysinfo.collect()\n\n\ndef WindowsSysInfo():\n from windows import sysinfo as win_sysinfo\n return win_sysinfo.collect()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from os import getenv
config_env = {'api_port': int(getenv('API_PORT')), 'psg_uri': getenv('PSG_URI')
}
|
normal
|
{
"blob_id": "21dd3d1deb00e9bc09803d01f1c05673ea8d25d2",
"index": 3771,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nconfig_env = {'api_port': int(getenv('API_PORT')), 'psg_uri': getenv('PSG_URI')\n }\n",
"step-3": "from os import getenv\nconfig_env = {'api_port': int(getenv('API_PORT')), 'psg_uri': getenv('PSG_URI')\n }\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# from suiron.core.SuironIO import SuironIO
# import cv2
# import os
# import time
# import json
# import numpy as np
# suironio = SuironIO(serial_location='/dev/ttyUSB0', baudrate=57600, port=5050)
# if __name__ == "__main__":
# while True:
# # suironio.record_inputs()
# print('turn90')
# suironio.servo_test(90)
# print('turn0')
# suironio.servo_test(0)
# print('turn-90')
# suironio.servo_test(-90)
# import socket
# import struct
# import pandas as pd
# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# host = raw_input("Server hostname or ip? ")
# port = input("Server port? ")
# # sock.connect((host,port))
# sock.connect(('192.168.0.164',5051))
# while True:
# data = raw_input("message: ")
# # sock.send(data)
# raw_data = {
# 'image': [2,4,2,5,6,3,2,3],
# 'servo': [22,42,5,45,34,534,2,3],
# 'motor': [23423,324,32,324,324,2,4,2]
# }
# df = pd.DataFrame(raw_data, columns=['image', 'servo', 'motor'])
# df = df.to_csv()
# sock.sendall(struct.pack('>i', len(df))+df)
# # sock.sendall(struct.pack('>i', len(data))+data)
# print("response: ", sock.recv(1024))
import numpy as np
import cv2
import pandas as pd
from suiron.utils.functions import raw_to_cnn, cnn_to_raw, raw_motor_to_rgb
from suiron.utils.img_serializer import deserialize_image
# Visualize images
# With and without any predictions
def visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):
"""
When cnn_model is specified it'll show what the cnn_model predicts (red)
as opposed to what inputs it actually received (green)
"""
data = pd.DataFrame.from_csv(filename)
for i in range(30):
cur_img = data['image'][i]
cur_steer = int(data['servo'][i])
cur_throttle = int(data['motor'][i])
# [1:-1] is used to remove '[' and ']' from string
cur_img_array = deserialize_image(cur_img)
# cur_img_array = cv2.resize(cur_img_array, (480, 320), interpolation=cv2.INTER_CUBIC)
image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)
print(i)
cv2.imwrite('test'+str(i)+'.jpg', image)
import sys
import json
# from suiron.core.SuironVZ import visualize_data
from suiron.utils.file_finder import get_latest_filename
# Load image settings
with open('settings.json') as d:
SETTINGS = json.load(d)
# Visualize latest filename
filename = get_latest_filename()
# If we specified which file
if len(sys.argv) > 1:
filename = sys.argv[1]
visualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'], depth=SETTINGS['depth'])
|
normal
|
{
"blob_id": "bf8ffe603b7c1e90deed6a69500ea5b7671e7270",
"index": 879,
"step-1": "<mask token>\n\n\ndef visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n \"\"\"\n When cnn_model is specified it'll show what the cnn_model predicts (red)\n as opposed to what inputs it actually received (green)\n \"\"\"\n data = pd.DataFrame.from_csv(filename)\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n cur_img_array = deserialize_image(cur_img)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test' + str(i) + '.jpg', image)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n \"\"\"\n When cnn_model is specified it'll show what the cnn_model predicts (red)\n as opposed to what inputs it actually received (green)\n \"\"\"\n data = pd.DataFrame.from_csv(filename)\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n cur_img_array = deserialize_image(cur_img)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test' + str(i) + '.jpg', image)\n\n\n<mask token>\nwith open('settings.json') as d:\n SETTINGS = json.load(d)\n<mask token>\nif len(sys.argv) > 1:\n filename = sys.argv[1]\nvisualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],\n depth=SETTINGS['depth'])\n",
"step-3": "<mask token>\n\n\ndef visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n \"\"\"\n When cnn_model is specified it'll show what the cnn_model predicts (red)\n as opposed to what inputs it actually received (green)\n \"\"\"\n data = pd.DataFrame.from_csv(filename)\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n cur_img_array = deserialize_image(cur_img)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test' + str(i) + '.jpg', image)\n\n\n<mask token>\nwith open('settings.json') as d:\n SETTINGS = json.load(d)\nfilename = get_latest_filename()\nif len(sys.argv) > 1:\n filename = sys.argv[1]\nvisualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],\n depth=SETTINGS['depth'])\n",
"step-4": "import numpy as np\nimport cv2\nimport pandas as pd\nfrom suiron.utils.functions import raw_to_cnn, cnn_to_raw, raw_motor_to_rgb\nfrom suiron.utils.img_serializer import deserialize_image\n\n\ndef visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n \"\"\"\n When cnn_model is specified it'll show what the cnn_model predicts (red)\n as opposed to what inputs it actually received (green)\n \"\"\"\n data = pd.DataFrame.from_csv(filename)\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n cur_img_array = deserialize_image(cur_img)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test' + str(i) + '.jpg', image)\n\n\nimport sys\nimport json\nfrom suiron.utils.file_finder import get_latest_filename\nwith open('settings.json') as d:\n SETTINGS = json.load(d)\nfilename = get_latest_filename()\nif len(sys.argv) > 1:\n filename = sys.argv[1]\nvisualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'],\n depth=SETTINGS['depth'])\n",
"step-5": "# from suiron.core.SuironIO import SuironIO\n# import cv2\n# import os\n# import time\n# import json\n# import numpy as np\n\n# suironio = SuironIO(serial_location='/dev/ttyUSB0', baudrate=57600, port=5050)\n\n# if __name__ == \"__main__\":\n# while True:\n# \t# suironio.record_inputs()\n# \tprint('turn90')\n# suironio.servo_test(90)\n# print('turn0')\n# suironio.servo_test(0)\n# print('turn-90')\n# suironio.servo_test(-90)\n\n# import socket\n# import struct\n# import pandas as pd\n\n# sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n# host = raw_input(\"Server hostname or ip? \")\n# port = input(\"Server port? \")\n# # sock.connect((host,port))\n# sock.connect(('192.168.0.164',5051))\n\n# while True:\n# data = raw_input(\"message: \")\n# # sock.send(data)\n# raw_data = {\n# \t 'image': [2,4,2,5,6,3,2,3], \n# \t 'servo': [22,42,5,45,34,534,2,3],\n# \t 'motor': [23423,324,32,324,324,2,4,2]\n# \t }\n# df = pd.DataFrame(raw_data, columns=['image', 'servo', 'motor'])\n# df = df.to_csv()\n# sock.sendall(struct.pack('>i', len(df))+df)\n# # sock.sendall(struct.pack('>i', len(data))+data)\n# print(\"response: \", sock.recv(1024))\n\nimport numpy as np\nimport cv2\nimport pandas as pd\n\nfrom suiron.utils.functions import raw_to_cnn, cnn_to_raw, raw_motor_to_rgb\nfrom suiron.utils.img_serializer import deserialize_image\n\n# Visualize images\n# With and without any predictions\ndef visualize_data(filename, width=72, height=48, depth=3, cnn_model=None):\n \"\"\"\n When cnn_model is specified it'll show what the cnn_model predicts (red)\n as opposed to what inputs it actually received (green)\n \"\"\"\n data = pd.DataFrame.from_csv(filename) \n\n for i in range(30):\n cur_img = data['image'][i]\n cur_steer = int(data['servo'][i])\n cur_throttle = int(data['motor'][i])\n \n # [1:-1] is used to remove '[' and ']' from string \n cur_img_array = deserialize_image(cur_img)\n # cur_img_array = cv2.resize(cur_img_array, (480, 320), interpolation=cv2.INTER_CUBIC)\n image = cv2.cvtColor(cur_img_array, cv2.COLOR_RGB2BGR)\n print(i)\n cv2.imwrite('test'+str(i)+'.jpg', image)\n\nimport sys\nimport json\n\n# from suiron.core.SuironVZ import visualize_data\nfrom suiron.utils.file_finder import get_latest_filename\n\n# Load image settings\nwith open('settings.json') as d:\n SETTINGS = json.load(d)\n\n# Visualize latest filename\nfilename = get_latest_filename() \n\n# If we specified which file\nif len(sys.argv) > 1:\n filename = sys.argv[1]\n\nvisualize_data(filename, width=SETTINGS['width'], height=SETTINGS['height'], depth=SETTINGS['depth'])",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import random
import math
import time
import pygame
pygame.init()
scr = pygame.display.set_mode((700,700))
enemies = []
#music = pygame.mixer.music.load('ENERGETIC CHIPTUNE Thermal - Evan King.mp3')
#pygame.mixer.music.play(-1)
hit = []
class Player:
def __init__(self):
self.x = 275
self.y = 275
self.image = pygame.image.load('player.jpg')
self.image1 = pygame.image.load('hearts.png')
self.lives = 5
def draw(self):
scr.blit(self.image,(self.x,self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos,adjac)
sin = oppos/hypot
radians = math.asin(sin)
angle = radians * (180/3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
class Bullet:
def __init__(self, color):
self.x = 0
self.y = 0
self.angle = 0
self.color = color
def draw(self):
pygame.draw.rect(scr,self.color,pygame.Rect(self.x,self.y,5,5))
class Gun:
def __init__(self):
self.x = 0
self.y = 0
self.bullets = []
self.bullets2 = []
def shoot1(self,x,y,angle):
self.bullets.append(Bullet((0,255,255)))
self.bullets[-1].x = x
self.bullets[-1].y = y
self.bullets[-1].angle = angle
def shoot2(self,x,y,angle):
self.bullets2.append(Bullet((255,255,0)))
self.bullets2[-1].x = x
self.bullets2[-1].y = y
self.bullets2[-1].angle = angle
class Enemy:
def __init__(self):
self.x = 100
self.y = 100
self.speed = 2
self.hearts = 3
self.image = pygame.image.load('enemy.png')
def draw(self):
scr.blit(self.image,(self.x,self.y))
def rotate(self, x, y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos,adjac)
sin = oppos/hypot
radians = math.asin(sin)
angle = radians * (180/3.14)
if x > self.x:
if y > self.y:
angle -= angle + angle
if x < self.x:
angle = 180 + (angle - (angle + angle))
if y > self.y:
angle -= angle + angle
return angle - 90
def distance(self,x,y):
oppos = math.fabs(y - self.y)
adjac = math.fabs(x - self.x)
hypot = math.hypot(oppos,adjac)
return hypot
def spawn(self):
enemies.append(Enemy())
enemies[-1].x = random.randint(0,600)
enemies[-1].y = random.randint(0,600)
cmd = Enemy()
gun = Gun()
player = Player()
cmd.spawn()
cmd.spawn()
last = 0
frames = 0
fro = 1
while True:
frames += 1
scr.fill((0,0,0))
for event in pygame.event.get():
key = pygame.key.get_pressed()
Mpos = pygame.mouse.get_pos()
if event.type == 5:
gun.shoot1(player.x + 12.5,player.y + 12.5,angle)
for i in range(0,player.lives):
scr.blit(player.image1,(i*35,1))
for i in range(len(gun.bullets)):
try:
gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians(gun.bullets[i].angle + 90))
gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians(gun.bullets[i].angle - 90))
if gun.bullets[i].x > 600:
del gun.bullets[i]
if gun.bullets[i].x < 0:
del gun.bullets[i]
if gun.bullets[i].y > 600:
del gun.bullets[i]
if gun.bullets[i].y < 0:
del gun.bullets[i]
gun.bullets[i].draw()
except IndexError:
pass
for i in range(len(gun.bullets2)):
try:
gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.radians(gun.bullets2[i].angle + 90))
gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.radians(gun.bullets2[i].angle - 90))
if gun.bullets2[i].x > 600:
del gun.bullets2[i]
if gun.bullets2[i].x < 0:
del gun.bullets2[i]
if gun.bullets2[i].y > 600:
del gun.bullets2[i]
if gun.bullets2[i].y < 0:
del gun.bullets2[i]
gun.bullets2[i].draw()
except IndexError:
pass
for i in range(len(enemies)):
if enemies[i].distance(player.x,player.y) > 100:
enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.radians(enemies[i].rotate(player.x,player.y) + 90))
enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.radians(enemies[i].rotate(player.x,player.y) - 90))
enemies[i].image = pygame.image.load("enemy.png").convert()
enemies[i].image = enemies[i].image.copy()
enemies[i].image = pygame.transform.rotate(enemies[i].image,enemies[i].rotate(player.x,player.y))
angle2 = enemies[i].rotate(player.x,player.y)
if frames % 100 == 0:
gun.shoot2(enemies[i].x + 12.5,enemies[i].y + 12.5,angle2)
enemies[i].draw()
for j in range(len(gun.bullets)):
for i in range(len(gun.bullets)):
try:
if gun.bullets[j].x > enemies[i].x and gun.bullets[j].x < enemies[i].x+25 and gun.bullets[j].y > enemies[i].y and gun.bullets[j].y < enemies[i].y + 25:
del enemies[i]
except IndexError:
pass
for j in range(len(gun.bullets2)):
for i in range(len(gun.bullets2)):
try:
if gun.bullets2[j].x > player.x and gun.bullets2[j].x < player.x+25 and gun.bullets2[j].y > player.y and gun.bullets2[j].y < player.y + 25:
for i in range(len(hit)-1):
if not (hit[i].x > player.x or hit[i].x < player.x+25 or hit[i].y > player.y or hit[i].y < player.y):
del hit[i]
if hit.count(gun.bullets2[j]) == 0:
hit.append(gun.bullets2[j])
player.lives = 5 - len(hit)
except IndexError:
pass
if key[pygame.K_a]:
player.x -= 3
if key[pygame.K_d]:
player.x += 3
if key[pygame.K_w]:
player.y -= 3
if key[pygame.K_s]:
player.y += 3
if frames % 150 == 0:
cmd.spawn()
if player.lives < 1:
pygame.quit()
break
player.image = pygame.image.load("player.jpg").convert()
player.image = player.image.copy()
player.image = pygame.transform.rotate(player.image,player.rotate(Mpos[0],Mpos[1]))
angle = player.rotate(Mpos[0],Mpos[1])
player.draw()
pygame.display.update()
time.sleep(0.005)
quit()
|
normal
|
{
"blob_id": "54e04d740ef46fca04cf4169d2e7c05083414bd8",
"index": 11,
"step-1": "<mask token>\n\n\nclass Player:\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Player:\n\n def __init__(self):\n self.x = 275\n self.y = 275\n self.image = pygame.image.load('player.jpg')\n self.image1 = pygame.image.load('hearts.png')\n self.lives = 5\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\n<mask token>\n",
"step-3": "<mask token>\npygame.init()\nscr = pygame.display.set_mode((700, 700))\nenemies = []\nhit = []\n\n\nclass Player:\n\n def __init__(self):\n self.x = 275\n self.y = 275\n self.image = pygame.image.load('player.jpg')\n self.image1 = pygame.image.load('hearts.png')\n self.lives = 5\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\ncmd = Enemy()\ngun = Gun()\nplayer = Player()\ncmd.spawn()\ncmd.spawn()\nlast = 0\nframes = 0\nfro = 1\nwhile True:\n frames += 1\n scr.fill((0, 0, 0))\n for event in pygame.event.get():\n key = pygame.key.get_pressed()\n Mpos = pygame.mouse.get_pos()\n if event.type == 5:\n gun.shoot1(player.x + 12.5, player.y + 12.5, angle)\n for i in range(0, player.lives):\n scr.blit(player.image1, (i * 35, 1))\n for i in range(len(gun.bullets)):\n try:\n gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians\n (gun.bullets[i].angle + 90))\n gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians\n (gun.bullets[i].angle - 90))\n if gun.bullets[i].x > 600:\n del gun.bullets[i]\n if gun.bullets[i].x < 0:\n del gun.bullets[i]\n if gun.bullets[i].y > 600:\n del gun.bullets[i]\n if gun.bullets[i].y < 0:\n del gun.bullets[i]\n gun.bullets[i].draw()\n except IndexError:\n pass\n for i in range(len(gun.bullets2)):\n try:\n gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.\n radians(gun.bullets2[i].angle + 90))\n gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.\n radians(gun.bullets2[i].angle - 90))\n if gun.bullets2[i].x > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].x < 0:\n del gun.bullets2[i]\n if gun.bullets2[i].y > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].y < 0:\n del gun.bullets2[i]\n gun.bullets2[i].draw()\n except IndexError:\n pass\n for i in range(len(enemies)):\n if enemies[i].distance(player.x, player.y) > 100:\n enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.\n radians(enemies[i].rotate(player.x, player.y) + 90))\n enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.\n radians(enemies[i].rotate(player.x, player.y) - 90))\n enemies[i].image = pygame.image.load('enemy.png').convert()\n enemies[i].image = enemies[i].image.copy()\n enemies[i].image = pygame.transform.rotate(enemies[i].image,\n enemies[i].rotate(player.x, player.y))\n angle2 = enemies[i].rotate(player.x, player.y)\n if frames % 100 == 0:\n gun.shoot2(enemies[i].x + 12.5, enemies[i].y + 12.5, angle2)\n enemies[i].draw()\n for j in range(len(gun.bullets)):\n for i in range(len(gun.bullets)):\n try:\n if gun.bullets[j].x > enemies[i].x and gun.bullets[j\n ].x < enemies[i].x + 25 and gun.bullets[j].y > enemies[i\n ].y and gun.bullets[j].y < enemies[i].y + 25:\n del enemies[i]\n except IndexError:\n pass\n for j in range(len(gun.bullets2)):\n for i in range(len(gun.bullets2)):\n try:\n if gun.bullets2[j].x > player.x and gun.bullets2[j\n ].x < player.x + 25 and gun.bullets2[j\n ].y > player.y and gun.bullets2[j].y < player.y + 25:\n for i in range(len(hit) - 1):\n if not (hit[i].x > player.x or hit[i].x < player.x +\n 25 or hit[i].y > player.y or hit[i].y < player.y):\n del hit[i]\n if hit.count(gun.bullets2[j]) == 0:\n hit.append(gun.bullets2[j])\n player.lives = 5 - len(hit)\n except IndexError:\n pass\n if key[pygame.K_a]:\n player.x -= 3\n if key[pygame.K_d]:\n player.x += 3\n if key[pygame.K_w]:\n player.y -= 3\n if key[pygame.K_s]:\n player.y += 3\n if frames % 150 == 0:\n cmd.spawn()\n if player.lives < 1:\n pygame.quit()\n break\n player.image = pygame.image.load('player.jpg').convert()\n player.image = player.image.copy()\n player.image = pygame.transform.rotate(player.image, player.rotate(Mpos\n [0], Mpos[1]))\n angle = player.rotate(Mpos[0], Mpos[1])\n player.draw()\n pygame.display.update()\n time.sleep(0.005)\nquit()\n",
"step-4": "import random\nimport math\nimport time\nimport pygame\npygame.init()\nscr = pygame.display.set_mode((700, 700))\nenemies = []\nhit = []\n\n\nclass Player:\n\n def __init__(self):\n self.x = 275\n self.y = 275\n self.image = pygame.image.load('player.jpg')\n self.image1 = pygame.image.load('hearts.png')\n self.lives = 5\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n\nclass Bullet:\n\n def __init__(self, color):\n self.x = 0\n self.y = 0\n self.angle = 0\n self.color = color\n\n def draw(self):\n pygame.draw.rect(scr, self.color, pygame.Rect(self.x, self.y, 5, 5))\n\n\nclass Gun:\n\n def __init__(self):\n self.x = 0\n self.y = 0\n self.bullets = []\n self.bullets2 = []\n\n def shoot1(self, x, y, angle):\n self.bullets.append(Bullet((0, 255, 255)))\n self.bullets[-1].x = x\n self.bullets[-1].y = y\n self.bullets[-1].angle = angle\n\n def shoot2(self, x, y, angle):\n self.bullets2.append(Bullet((255, 255, 0)))\n self.bullets2[-1].x = x\n self.bullets2[-1].y = y\n self.bullets2[-1].angle = angle\n\n\nclass Enemy:\n\n def __init__(self):\n self.x = 100\n self.y = 100\n self.speed = 2\n self.hearts = 3\n self.image = pygame.image.load('enemy.png')\n\n def draw(self):\n scr.blit(self.image, (self.x, self.y))\n\n def rotate(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n sin = oppos / hypot\n radians = math.asin(sin)\n angle = radians * (180 / 3.14)\n if x > self.x:\n if y > self.y:\n angle -= angle + angle\n if x < self.x:\n angle = 180 + (angle - (angle + angle))\n if y > self.y:\n angle -= angle + angle\n return angle - 90\n\n def distance(self, x, y):\n oppos = math.fabs(y - self.y)\n adjac = math.fabs(x - self.x)\n hypot = math.hypot(oppos, adjac)\n return hypot\n\n def spawn(self):\n enemies.append(Enemy())\n enemies[-1].x = random.randint(0, 600)\n enemies[-1].y = random.randint(0, 600)\n\n\ncmd = Enemy()\ngun = Gun()\nplayer = Player()\ncmd.spawn()\ncmd.spawn()\nlast = 0\nframes = 0\nfro = 1\nwhile True:\n frames += 1\n scr.fill((0, 0, 0))\n for event in pygame.event.get():\n key = pygame.key.get_pressed()\n Mpos = pygame.mouse.get_pos()\n if event.type == 5:\n gun.shoot1(player.x + 12.5, player.y + 12.5, angle)\n for i in range(0, player.lives):\n scr.blit(player.image1, (i * 35, 1))\n for i in range(len(gun.bullets)):\n try:\n gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians\n (gun.bullets[i].angle + 90))\n gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians\n (gun.bullets[i].angle - 90))\n if gun.bullets[i].x > 600:\n del gun.bullets[i]\n if gun.bullets[i].x < 0:\n del gun.bullets[i]\n if gun.bullets[i].y > 600:\n del gun.bullets[i]\n if gun.bullets[i].y < 0:\n del gun.bullets[i]\n gun.bullets[i].draw()\n except IndexError:\n pass\n for i in range(len(gun.bullets2)):\n try:\n gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.\n radians(gun.bullets2[i].angle + 90))\n gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.\n radians(gun.bullets2[i].angle - 90))\n if gun.bullets2[i].x > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].x < 0:\n del gun.bullets2[i]\n if gun.bullets2[i].y > 600:\n del gun.bullets2[i]\n if gun.bullets2[i].y < 0:\n del gun.bullets2[i]\n gun.bullets2[i].draw()\n except IndexError:\n pass\n for i in range(len(enemies)):\n if enemies[i].distance(player.x, player.y) > 100:\n enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.\n radians(enemies[i].rotate(player.x, player.y) + 90))\n enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.\n radians(enemies[i].rotate(player.x, player.y) - 90))\n enemies[i].image = pygame.image.load('enemy.png').convert()\n enemies[i].image = enemies[i].image.copy()\n enemies[i].image = pygame.transform.rotate(enemies[i].image,\n enemies[i].rotate(player.x, player.y))\n angle2 = enemies[i].rotate(player.x, player.y)\n if frames % 100 == 0:\n gun.shoot2(enemies[i].x + 12.5, enemies[i].y + 12.5, angle2)\n enemies[i].draw()\n for j in range(len(gun.bullets)):\n for i in range(len(gun.bullets)):\n try:\n if gun.bullets[j].x > enemies[i].x and gun.bullets[j\n ].x < enemies[i].x + 25 and gun.bullets[j].y > enemies[i\n ].y and gun.bullets[j].y < enemies[i].y + 25:\n del enemies[i]\n except IndexError:\n pass\n for j in range(len(gun.bullets2)):\n for i in range(len(gun.bullets2)):\n try:\n if gun.bullets2[j].x > player.x and gun.bullets2[j\n ].x < player.x + 25 and gun.bullets2[j\n ].y > player.y and gun.bullets2[j].y < player.y + 25:\n for i in range(len(hit) - 1):\n if not (hit[i].x > player.x or hit[i].x < player.x +\n 25 or hit[i].y > player.y or hit[i].y < player.y):\n del hit[i]\n if hit.count(gun.bullets2[j]) == 0:\n hit.append(gun.bullets2[j])\n player.lives = 5 - len(hit)\n except IndexError:\n pass\n if key[pygame.K_a]:\n player.x -= 3\n if key[pygame.K_d]:\n player.x += 3\n if key[pygame.K_w]:\n player.y -= 3\n if key[pygame.K_s]:\n player.y += 3\n if frames % 150 == 0:\n cmd.spawn()\n if player.lives < 1:\n pygame.quit()\n break\n player.image = pygame.image.load('player.jpg').convert()\n player.image = player.image.copy()\n player.image = pygame.transform.rotate(player.image, player.rotate(Mpos\n [0], Mpos[1]))\n angle = player.rotate(Mpos[0], Mpos[1])\n player.draw()\n pygame.display.update()\n time.sleep(0.005)\nquit()\n",
"step-5": "import random\r\nimport math\r\nimport time\r\nimport pygame\r\npygame.init()\r\nscr = pygame.display.set_mode((700,700))\r\nenemies = []\r\n#music = pygame.mixer.music.load('ENERGETIC CHIPTUNE Thermal - Evan King.mp3')\r\n#pygame.mixer.music.play(-1)\r\nhit = []\r\nclass Player:\r\n def __init__(self):\r\n self.x = 275\r\n self.y = 275\r\n self.image = pygame.image.load('player.jpg')\r\n self.image1 = pygame.image.load('hearts.png')\r\n self.lives = 5\r\n def draw(self):\r\n scr.blit(self.image,(self.x,self.y))\r\n def rotate(self, x, y):\r\n oppos = math.fabs(y - self.y)\r\n adjac = math.fabs(x - self.x)\r\n hypot = math.hypot(oppos,adjac)\r\n sin = oppos/hypot\r\n radians = math.asin(sin)\r\n angle = radians * (180/3.14)\r\n if x > self.x: \r\n if y > self.y:\r\n angle -= angle + angle\r\n if x < self.x:\r\n angle = 180 + (angle - (angle + angle))\r\n if y > self.y:\r\n angle -= angle + angle\r\n return angle - 90\r\nclass Bullet:\r\n def __init__(self, color):\r\n self.x = 0\r\n self.y = 0\r\n self.angle = 0\r\n self.color = color\r\n def draw(self):\r\n pygame.draw.rect(scr,self.color,pygame.Rect(self.x,self.y,5,5))\r\nclass Gun:\r\n def __init__(self):\r\n self.x = 0\r\n self.y = 0\r\n self.bullets = []\r\n self.bullets2 = []\r\n def shoot1(self,x,y,angle):\r\n self.bullets.append(Bullet((0,255,255)))\r\n self.bullets[-1].x = x\r\n self.bullets[-1].y = y\r\n self.bullets[-1].angle = angle\r\n def shoot2(self,x,y,angle):\r\n self.bullets2.append(Bullet((255,255,0)))\r\n self.bullets2[-1].x = x\r\n self.bullets2[-1].y = y\r\n self.bullets2[-1].angle = angle\r\nclass Enemy:\r\n def __init__(self):\r\n self.x = 100\r\n self.y = 100\r\n self.speed = 2\r\n self.hearts = 3\r\n self.image = pygame.image.load('enemy.png')\r\n def draw(self):\r\n scr.blit(self.image,(self.x,self.y))\r\n def rotate(self, x, y):\r\n oppos = math.fabs(y - self.y)\r\n adjac = math.fabs(x - self.x)\r\n hypot = math.hypot(oppos,adjac)\r\n sin = oppos/hypot\r\n radians = math.asin(sin)\r\n angle = radians * (180/3.14)\r\n if x > self.x: \r\n if y > self.y:\r\n angle -= angle + angle\r\n if x < self.x:\r\n angle = 180 + (angle - (angle + angle))\r\n if y > self.y:\r\n angle -= angle + angle\r\n return angle - 90\r\n def distance(self,x,y):\r\n oppos = math.fabs(y - self.y)\r\n adjac = math.fabs(x - self.x)\r\n hypot = math.hypot(oppos,adjac)\r\n return hypot\r\n def spawn(self):\r\n enemies.append(Enemy())\r\n enemies[-1].x = random.randint(0,600)\r\n enemies[-1].y = random.randint(0,600)\r\ncmd = Enemy()\r\ngun = Gun() \r\nplayer = Player()\r\ncmd.spawn()\r\ncmd.spawn()\r\nlast = 0\r\nframes = 0\r\nfro = 1\r\nwhile True:\r\n frames += 1\r\n scr.fill((0,0,0))\r\n for event in pygame.event.get():\r\n key = pygame.key.get_pressed()\r\n Mpos = pygame.mouse.get_pos()\r\n if event.type == 5:\r\n gun.shoot1(player.x + 12.5,player.y + 12.5,angle)\r\n for i in range(0,player.lives):\r\n scr.blit(player.image1,(i*35,1))\r\n \r\n for i in range(len(gun.bullets)):\r\n try:\r\n gun.bullets[i].x = gun.bullets[i].x + 4 * math.cos(math.radians(gun.bullets[i].angle + 90))\r\n gun.bullets[i].y = gun.bullets[i].y + 4 * math.sin(math.radians(gun.bullets[i].angle - 90))\r\n if gun.bullets[i].x > 600:\r\n del gun.bullets[i]\r\n if gun.bullets[i].x < 0:\r\n del gun.bullets[i]\r\n if gun.bullets[i].y > 600:\r\n del gun.bullets[i]\r\n if gun.bullets[i].y < 0:\r\n del gun.bullets[i]\r\n gun.bullets[i].draw()\r\n except IndexError:\r\n pass\r\n for i in range(len(gun.bullets2)):\r\n try:\r\n gun.bullets2[i].x = gun.bullets2[i].x + 4 * math.cos(math.radians(gun.bullets2[i].angle + 90))\r\n gun.bullets2[i].y = gun.bullets2[i].y + 4 * math.sin(math.radians(gun.bullets2[i].angle - 90))\r\n if gun.bullets2[i].x > 600:\r\n del gun.bullets2[i]\r\n if gun.bullets2[i].x < 0:\r\n del gun.bullets2[i]\r\n if gun.bullets2[i].y > 600:\r\n del gun.bullets2[i]\r\n if gun.bullets2[i].y < 0:\r\n del gun.bullets2[i]\r\n gun.bullets2[i].draw()\r\n except IndexError:\r\n pass\r\n for i in range(len(enemies)):\r\n if enemies[i].distance(player.x,player.y) > 100:\r\n enemies[i].x = enemies[i].x + enemies[i].speed * math.cos(math.radians(enemies[i].rotate(player.x,player.y) + 90))\r\n enemies[i].y = enemies[i].y + enemies[i].speed * math.sin(math.radians(enemies[i].rotate(player.x,player.y) - 90))\r\n enemies[i].image = pygame.image.load(\"enemy.png\").convert()\r\n enemies[i].image = enemies[i].image.copy()\r\n enemies[i].image = pygame.transform.rotate(enemies[i].image,enemies[i].rotate(player.x,player.y))\r\n angle2 = enemies[i].rotate(player.x,player.y)\r\n if frames % 100 == 0:\r\n gun.shoot2(enemies[i].x + 12.5,enemies[i].y + 12.5,angle2)\r\n enemies[i].draw()\r\n for j in range(len(gun.bullets)):\r\n for i in range(len(gun.bullets)):\r\n try:\r\n if gun.bullets[j].x > enemies[i].x and gun.bullets[j].x < enemies[i].x+25 and gun.bullets[j].y > enemies[i].y and gun.bullets[j].y < enemies[i].y + 25:\r\n del enemies[i]\r\n except IndexError:\r\n pass\r\n for j in range(len(gun.bullets2)):\r\n for i in range(len(gun.bullets2)):\r\n try:\r\n if gun.bullets2[j].x > player.x and gun.bullets2[j].x < player.x+25 and gun.bullets2[j].y > player.y and gun.bullets2[j].y < player.y + 25:\r\n for i in range(len(hit)-1):\r\n if not (hit[i].x > player.x or hit[i].x < player.x+25 or hit[i].y > player.y or hit[i].y < player.y):\r\n del hit[i]\r\n if hit.count(gun.bullets2[j]) == 0:\r\n hit.append(gun.bullets2[j])\r\n player.lives = 5 - len(hit)\r\n except IndexError:\r\n pass\r\n if key[pygame.K_a]:\r\n player.x -= 3\r\n if key[pygame.K_d]:\r\n player.x += 3\r\n if key[pygame.K_w]:\r\n player.y -= 3\r\n if key[pygame.K_s]:\r\n player.y += 3\r\n if frames % 150 == 0:\r\n cmd.spawn()\r\n if player.lives < 1:\r\n pygame.quit()\r\n break\r\n player.image = pygame.image.load(\"player.jpg\").convert()\r\n player.image = player.image.copy()\r\n player.image = pygame.transform.rotate(player.image,player.rotate(Mpos[0],Mpos[1]))\r\n angle = player.rotate(Mpos[0],Mpos[1])\r\n player.draw()\r\n pygame.display.update()\r\n time.sleep(0.005)\r\nquit()\r\n\r\n",
"step-ids": [
14,
17,
19,
20,
21
]
}
|
[
14,
17,
19,
20,
21
] |
#rules used for pattern matching
# #1. x='[abc]' either a,b or c
#eg:
# import re
# x="[abc]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#2. x='[^abc]' except abc
#eg:
# import re
# x="[^abc]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#3. x='[a-z]' a to z ^ cap means that is not included
#eg
# import re
# x="[a-z]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#eg with ^
# import re
# x="[^a-z]"
# matcher=re.finditer(x,"abt cq5kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#4. x='[A-Z]' A TO Z
# import re
# x="[A-Z]"
# matcher=re.finditer(x,"abt SC5kZ")
# for match in matcher:
# print(match.start())
# print(match.group())
#5.X="[a-zA-Z]" BOTH LOWER AND UPPERCASE ARE CHECKED
import re
x="[a-zA-Z]"
matcher=re.finditer(x,"abtABIkz")
for match in matcher:
print(match.start())
print(match.group())
#6. X="[0-9]"
# import re
# x="[0-9]"
# matcher=re.finditer(x,"ab1z7")
# for match in matcher:
# print(match.start())
# print(match.group())
#7.x="[a-zA-Z0-9]"
# import re
# x="[a-zA-Z0-9]"
# matcher=re.finditer(x,"ab72ABIkz")
# for match in matcher:
# print(match.start())
# print(match.group())
#8.x='\s' check space
# import re
# x="\s"
# matcher=re.finditer(x,"ab tAB Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
#9.x='\d' check the digits
# import re
# x="\d"
# matcher=re.finditer(x,"ab7tAB12kz")
# for match in matcher:
# print(match.start())
# print(match.group())
#9. x='\D' except digits
# import re
# x="\D"
# matcher=re.finditer(x,"ab001tAB5236Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
#10. x='\w' all words except special characters
# import re
# x="\w"
# matcher=re.finditer(x,"ab %tAB @Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
#11.x='\W' for special characters
# import re
# x="\W"
# matcher=re.finditer(x,"ab!!tAB@Ikz")
# for match in matcher:
# print(match.start())
# print(match.group())
|
normal
|
{
"blob_id": "1ddc261cf174c109583fd0ead1f537673d29090a",
"index": 1433,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor match in matcher:\n print(match.start())\n print(match.group())\n",
"step-3": "<mask token>\nx = '[a-zA-Z]'\nmatcher = re.finditer(x, 'abtABIkz')\nfor match in matcher:\n print(match.start())\n print(match.group())\n",
"step-4": "import re\nx = '[a-zA-Z]'\nmatcher = re.finditer(x, 'abtABIkz')\nfor match in matcher:\n print(match.start())\n print(match.group())\n",
"step-5": " #rules used for pattern matching\n # #1. x='[abc]' either a,b or c\n#eg:\n# import re\n# x=\"[abc]\"\n# matcher=re.finditer(x,\"abt cq5kz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#2. x='[^abc]' except abc\n#eg:\n# import re\n# x=\"[^abc]\"\n# matcher=re.finditer(x,\"abt cq5kz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#3. x='[a-z]' a to z ^ cap means that is not included\n#eg\n# import re\n# x=\"[a-z]\"\n# matcher=re.finditer(x,\"abt cq5kz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#eg with ^\n# import re\n# x=\"[^a-z]\"\n# matcher=re.finditer(x,\"abt cq5kz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#4. x='[A-Z]' A TO Z\n# import re\n# x=\"[A-Z]\"\n# matcher=re.finditer(x,\"abt SC5kZ\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#5.X=\"[a-zA-Z]\" BOTH LOWER AND UPPERCASE ARE CHECKED\nimport re\nx=\"[a-zA-Z]\"\nmatcher=re.finditer(x,\"abtABIkz\")\nfor match in matcher:\n print(match.start())\n print(match.group())\n\n#6. X=\"[0-9]\"\n# import re\n# x=\"[0-9]\"\n# matcher=re.finditer(x,\"ab1z7\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#7.x=\"[a-zA-Z0-9]\"\n# import re\n# x=\"[a-zA-Z0-9]\"\n# matcher=re.finditer(x,\"ab72ABIkz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#8.x='\\s' check space\n# import re\n# x=\"\\s\"\n# matcher=re.finditer(x,\"ab tAB Ikz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#9.x='\\d' check the digits\n# import re\n# x=\"\\d\"\n# matcher=re.finditer(x,\"ab7tAB12kz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#9. x='\\D' except digits\n# import re\n# x=\"\\D\"\n# matcher=re.finditer(x,\"ab001tAB5236Ikz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n#10. x='\\w' all words except special characters\n# import re\n# x=\"\\w\"\n# matcher=re.finditer(x,\"ab %tAB @Ikz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n\n\n#11.x='\\W' for special characters\n# import re\n# x=\"\\W\"\n# matcher=re.finditer(x,\"ab!!tAB@Ikz\")\n# for match in matcher:\n# print(match.start())\n# print(match.group())\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
@author: Victor Barrera Burgos
Created on 09 Feb 2014
Description: This script permits the obtention of the
methylation profile of a CpGRegion indicating the
methylation status of each CpG dinucleotide.
Addition on 02 March 2014
Description: permits the obtention of the
methylation profile of the whole genome using methylationMap.
'''
# Imports
import sys
import pysam
import re
# Defining types
# Structures from the first level of abstraction
class CpGRegion:
def __init__(self,id,chrom,start,end,sequence):
self.id=id
self.chrom=chrom
self.start=start
self.end=end
self.sequence=sequence
self.methCoef=-1
self.nCG=0
self.cpgList=[]
# Structures from the second level of abstraction
class CpGdinucleotide:
def __init__(self,chrom,firstPos,secondPos):
self.chrom=chrom
self.firstPos=firstPos
self.secondPos=secondPos
self.firstPosMethCoef=-1.0
self.secondPosMethCoef=-1.0
self.meanMethCoef=-1.0
# Defining functions
# Functions from the first level of abstraction
def methylationMap(cgR,filter):
# Pre: The function receives an object from the class CpGRegion and a filter value
upSequence=(cgR.sequence).upper()
# Look for CG positions
starts = [match.start() for match in re.finditer('CG',upSequence)]
for i in starts:
cpgDin=CpGdinucleotide(cgR.chrom,int(i)+cgR.start-1,int(i)+cgR.start)
# Call the methCG function
methCG(cpgDin,filter)
cgR.nCG=cgR.nCG+1
(cgR.cpgList).append(cpgDin)
cgRPositions=""
for j in cgR.cpgList:
if (j.meanMethCoef>=0):
if (j.meanMethCoef<=0.2):
cgRPositions=cgRPositions+str(j.firstPos)+";"+"0"+"\n"
elif (j.meanMethCoef>=0.8):
cgRPositions=cgRPositions+str(j.firstPos)+";"+"1"+"\n"
else:
cgRPositions=cgRPositions+str(j.firstPos)+";"+"X"+"\n"
else:
cgRPositions=cgRPositions+str(j.firstPos)+";"+"N"+"\n"
print "%s" % (cgRPositions)
# Post:
def methylationProfile(cgR,filter):
# Pre: The function receives an object from the class CpGRegion and a filter value
upSequence=(cgR.sequence).upper()
# Look for CG positions
starts = [match.start() for match in re.finditer('CG',upSequence)]
for i in starts:
cpgDin=CpGdinucleotide(cgR.chrom,int(i)+cgR.start-1,int(i)+cgR.start)
# Call the methCG function
methCG(cpgDin,filter)
cgR.nCG=cgR.nCG+1
(cgR.cpgList).append(cpgDin)
# Generate the profile using the code (0,1,X,N)
# 0 For unmethylated, 1 for methylated
# X for intermediate methylation, N for not informative
cgRProf=""
infCpG=0
cgRAcumMethCoef=0
for j in cgR.cpgList:
if (j.meanMethCoef>=0):
infCpG=infCpG+1
cgRAcumMethCoef=cgRAcumMethCoef+j.meanMethCoef
if (j.meanMethCoef<=0.2):
cgRProf=cgRProf+"0"
elif (j.meanMethCoef>=0.8):
cgRProf=cgRProf+"1"
else:
cgRProf=cgRProf+"X"
else:
cgRProf=cgRProf+"N"
if (infCpG>0):
cgR.methCoef=cgRAcumMethCoef/infCpG
print "%s;%s;%i;%i;%i;%i;%f;%s" % (cgR.id,cgR.chrom,cgR.start,cgR.end,cgR.nCG,infCpG,cgR.methCoef,cgRProf)
# Post: The id, chrom, start, end, total number of CG, number of informative CpG
# and a ternary profile for each of its CpG corresponding to the CpGRegion object
# have been printed
# Functions from the second level of abstraction
def methCG(cpgDin,filter):
# Pre: The function receives an object from the class CpGdinucleotide and a filter value
seq=""
for pileupcolumn in samfile.pileup(cpgDin.chrom,cpgDin.firstPos,cpgDin.firstPos+1):
if not (pileupcolumn.pos==cpgDin.firstPos and pileupcolumn.n>=filter):
continue
for pileupread in pileupcolumn.pileups:
seq+=pileupread.alignment.seq[pileupread.qpos]
seq=seq.upper()
numA=int(seq.count("A"))
numT=int(seq.count("T"))
numC=int(seq.count("C"))
numG=int(seq.count("G"))
reads=numA+numT+numC+numG
if ((numT+numC)>=filter):
cpgDin.firstPosMethCoef=(float(numC)/float(numC+numT))
seq=""
for pileupcolumn in samfile.pileup(cpgDin.chrom,cpgDin.secondPos,cpgDin.secondPos+1):
if not (pileupcolumn.pos==cpgDin.secondPos and pileupcolumn.n>=filter):
continue
for pileupread in pileupcolumn.pileups:
seq+=pileupread.alignment.seq[pileupread.qpos]
seq=seq.upper()
numA=int(seq.count("A"))
numT=int(seq.count("T"))
numC=int(seq.count("C"))
numG=int(seq.count("G"))
reads=numA+numT+numC+numG
if ((numT+numC)>=filter):
cpgDin.secondPosMethCoef=(float(numC)/float(numC+numT))
if (((cpgDin.firstPosMethCoef)!=-1) & ((cpgDin.secondPosMethCoef)==-1)):
cpgDin.meanMethCoef=cpgDin.firstPosMethCoef
elif (((cpgDin.firstPosMethCoef)==-1) & ((cpgDin.secondPosMethCoef)!=-1)):
cpgDin.meanMethCoef=cpgDin.secondPosMethCoef
else:
cpgDin.meanMethCoef=float(cpgDin.firstPosMethCoef+cpgDin.secondPosMethCoef)/2.0
# Post: The object is returned with its methylation Coefficients recalculated according
# to the data present in the alignment file and using a minimum read filter.
####################
### Main ###########
####################
# Obtain the files
cpgr_sec_path=sys.argv[1]
sam_path=sys.argv[2]
filter=int(sys.argv[3])
# Load the files
cpgr_sec_file=open(cpgr_sec_path,'r')
samfile = pysam.Samfile(sam_path, "rb" )
for cpgr in cpgr_sec_file:
cgRTuple=cpgr.split()
cgR=CpGRegion(cgRTuple[0],str(cgRTuple[1]),int(cgRTuple[2]),int(cgRTuple[3]),str(cgRTuple[4]))
# We can use methylationMap or methylationProfile
methylationMap(cgR,filter)
|
normal
|
{
"blob_id": "67509ce426fd572b22d5059d98e5439e87cdc591",
"index": 4541,
"step-1": "'''\n@author: Victor Barrera Burgos\nCreated on 09 Feb 2014\nDescription: This script permits the obtention of the\nmethylation profile of a CpGRegion indicating the \nmethylation status of each CpG dinucleotide.\n\nAddition on 02 March 2014\nDescription: permits the obtention of the\nmethylation profile of the whole genome using methylationMap.\n'''\n\n# Imports\n\nimport sys\nimport pysam\nimport re\n\n# Defining types\n# Structures from the first level of abstraction\nclass CpGRegion:\n def __init__(self,id,chrom,start,end,sequence):\n self.id=id\n self.chrom=chrom\n self.start=start\n self.end=end\n self.sequence=sequence\n self.methCoef=-1\n self.nCG=0\n self.cpgList=[]\n\n# Structures from the second level of abstraction\n\nclass CpGdinucleotide:\n def __init__(self,chrom,firstPos,secondPos):\n self.chrom=chrom\n self.firstPos=firstPos\n self.secondPos=secondPos\n self.firstPosMethCoef=-1.0\n self.secondPosMethCoef=-1.0\n self.meanMethCoef=-1.0\n \n\n# Defining functions\n# Functions from the first level of abstraction\n\ndef methylationMap(cgR,filter):\n\n# Pre: The function receives an object from the class CpGRegion and a filter value\n\n upSequence=(cgR.sequence).upper()\n\n# Look for CG positions\n starts = [match.start() for match in re.finditer('CG',upSequence)]\n for i in starts:\n cpgDin=CpGdinucleotide(cgR.chrom,int(i)+cgR.start-1,int(i)+cgR.start)\n# Call the methCG function\n methCG(cpgDin,filter)\n cgR.nCG=cgR.nCG+1\n (cgR.cpgList).append(cpgDin)\n cgRPositions=\"\"\n for j in cgR.cpgList:\n if (j.meanMethCoef>=0):\n if (j.meanMethCoef<=0.2):\n cgRPositions=cgRPositions+str(j.firstPos)+\";\"+\"0\"+\"\\n\"\n elif (j.meanMethCoef>=0.8):\n cgRPositions=cgRPositions+str(j.firstPos)+\";\"+\"1\"+\"\\n\"\n else:\n cgRPositions=cgRPositions+str(j.firstPos)+\";\"+\"X\"+\"\\n\"\n else:\n cgRPositions=cgRPositions+str(j.firstPos)+\";\"+\"N\"+\"\\n\"\n print \"%s\" % (cgRPositions)\n# Post: \n\ndef methylationProfile(cgR,filter):\n\n# Pre: The function receives an object from the class CpGRegion and a filter value\n\n upSequence=(cgR.sequence).upper()\n\n# Look for CG positions\n starts = [match.start() for match in re.finditer('CG',upSequence)]\n for i in starts:\n cpgDin=CpGdinucleotide(cgR.chrom,int(i)+cgR.start-1,int(i)+cgR.start)\n# Call the methCG function\n methCG(cpgDin,filter)\n cgR.nCG=cgR.nCG+1\n (cgR.cpgList).append(cpgDin)\n# Generate the profile using the code (0,1,X,N)\n# 0 For unmethylated, 1 for methylated\n# X for intermediate methylation, N for not informative\n cgRProf=\"\"\n infCpG=0\n cgRAcumMethCoef=0\n for j in cgR.cpgList:\n if (j.meanMethCoef>=0):\n infCpG=infCpG+1\n cgRAcumMethCoef=cgRAcumMethCoef+j.meanMethCoef\n if (j.meanMethCoef<=0.2):\n cgRProf=cgRProf+\"0\"\n elif (j.meanMethCoef>=0.8):\n cgRProf=cgRProf+\"1\"\n else:\n cgRProf=cgRProf+\"X\"\n else:\n cgRProf=cgRProf+\"N\"\n if (infCpG>0):\n cgR.methCoef=cgRAcumMethCoef/infCpG\n \n print \"%s;%s;%i;%i;%i;%i;%f;%s\" % (cgR.id,cgR.chrom,cgR.start,cgR.end,cgR.nCG,infCpG,cgR.methCoef,cgRProf)\n# Post: The id, chrom, start, end, total number of CG, number of informative CpG\n# and a ternary profile for each of its CpG corresponding to the CpGRegion object\n# have been printed\n\n# Functions from the second level of abstraction\n\ndef methCG(cpgDin,filter):\n# Pre: The function receives an object from the class CpGdinucleotide and a filter value\n seq=\"\"\n for pileupcolumn in samfile.pileup(cpgDin.chrom,cpgDin.firstPos,cpgDin.firstPos+1):\n if not (pileupcolumn.pos==cpgDin.firstPos and pileupcolumn.n>=filter):\n continue\n for pileupread in pileupcolumn.pileups:\n seq+=pileupread.alignment.seq[pileupread.qpos]\n seq=seq.upper()\n numA=int(seq.count(\"A\"))\n numT=int(seq.count(\"T\"))\n numC=int(seq.count(\"C\"))\n numG=int(seq.count(\"G\"))\n reads=numA+numT+numC+numG\n if ((numT+numC)>=filter):\n cpgDin.firstPosMethCoef=(float(numC)/float(numC+numT))\n\n seq=\"\"\n for pileupcolumn in samfile.pileup(cpgDin.chrom,cpgDin.secondPos,cpgDin.secondPos+1):\n if not (pileupcolumn.pos==cpgDin.secondPos and pileupcolumn.n>=filter):\n continue\n for pileupread in pileupcolumn.pileups:\n seq+=pileupread.alignment.seq[pileupread.qpos]\n seq=seq.upper()\n numA=int(seq.count(\"A\"))\n numT=int(seq.count(\"T\"))\n numC=int(seq.count(\"C\"))\n numG=int(seq.count(\"G\"))\n reads=numA+numT+numC+numG\n if ((numT+numC)>=filter):\n cpgDin.secondPosMethCoef=(float(numC)/float(numC+numT))\n \n if (((cpgDin.firstPosMethCoef)!=-1) & ((cpgDin.secondPosMethCoef)==-1)):\n cpgDin.meanMethCoef=cpgDin.firstPosMethCoef\n\n elif (((cpgDin.firstPosMethCoef)==-1) & ((cpgDin.secondPosMethCoef)!=-1)):\n cpgDin.meanMethCoef=cpgDin.secondPosMethCoef\n\n else:\n cpgDin.meanMethCoef=float(cpgDin.firstPosMethCoef+cpgDin.secondPosMethCoef)/2.0\n# Post: The object is returned with its methylation Coefficients recalculated according\n# to the data present in the alignment file and using a minimum read filter.\n \n\n####################\n### Main ###########\n####################\n\n# Obtain the files\ncpgr_sec_path=sys.argv[1]\nsam_path=sys.argv[2]\nfilter=int(sys.argv[3])\n\n# Load the files\ncpgr_sec_file=open(cpgr_sec_path,'r')\nsamfile = pysam.Samfile(sam_path, \"rb\" )\n\nfor cpgr in cpgr_sec_file:\n cgRTuple=cpgr.split()\n cgR=CpGRegion(cgRTuple[0],str(cgRTuple[1]),int(cgRTuple[2]),int(cgRTuple[3]),str(cgRTuple[4]))\n# We can use methylationMap or methylationProfile\n methylationMap(cgR,filter)\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
'''''''''''''''''''''''''''''
> Filename: lv6.py
> Author: Kadrick, BoGwon Kang
> Created at: 2021/10/11 16:07
> Description: zip
'''''''''''''''''''''''''''''
import zipfile
import re
# open zipfile
zfile = zipfile.ZipFile('./channel.zip')
# check list
'''
print(zfile.namelist())
print(zfile.read("readme.txt"))
print(zfile.read("90052.txt"))
'''
# follow nothing & collect the comments
comments = ""
nothing = "90052.txt"
target = r"[0-9]+"
while True:
answer = zfile.read(nothing).decode('utf-8')
# collect comment
comments += zfile.getinfo(nothing).comment.decode('utf-8')
print(answer)
findRet = re.findall(target, answer)
if len(findRet) == 0:
break
nothing = findRet[0] + ".txt"
print("answer is ...")
print(comments)
|
normal
|
{
"blob_id": "b1fe7e318c361930c8ad00758bcb86597fd8f3bd",
"index": 2567,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile True:\n answer = zfile.read(nothing).decode('utf-8')\n comments += zfile.getinfo(nothing).comment.decode('utf-8')\n print(answer)\n findRet = re.findall(target, answer)\n if len(findRet) == 0:\n break\n nothing = findRet[0] + '.txt'\nprint('answer is ...')\nprint(comments)\n",
"step-3": "<mask token>\nzfile = zipfile.ZipFile('./channel.zip')\n<mask token>\ncomments = ''\nnothing = '90052.txt'\ntarget = '[0-9]+'\nwhile True:\n answer = zfile.read(nothing).decode('utf-8')\n comments += zfile.getinfo(nothing).comment.decode('utf-8')\n print(answer)\n findRet = re.findall(target, answer)\n if len(findRet) == 0:\n break\n nothing = findRet[0] + '.txt'\nprint('answer is ...')\nprint(comments)\n",
"step-4": "<mask token>\nimport zipfile\nimport re\nzfile = zipfile.ZipFile('./channel.zip')\n<mask token>\ncomments = ''\nnothing = '90052.txt'\ntarget = '[0-9]+'\nwhile True:\n answer = zfile.read(nothing).decode('utf-8')\n comments += zfile.getinfo(nothing).comment.decode('utf-8')\n print(answer)\n findRet = re.findall(target, answer)\n if len(findRet) == 0:\n break\n nothing = findRet[0] + '.txt'\nprint('answer is ...')\nprint(comments)\n",
"step-5": "'''''''''''''''''''''''''''''\n> Filename: lv6.py\n> Author: Kadrick, BoGwon Kang\n> Created at: 2021/10/11 16:07\n> Description: zip\n'''''''''''''''''''''''''''''\nimport zipfile\nimport re\n\n# open zipfile\nzfile = zipfile.ZipFile('./channel.zip')\n\n# check list\n'''\nprint(zfile.namelist())\nprint(zfile.read(\"readme.txt\"))\nprint(zfile.read(\"90052.txt\"))\n'''\n\n# follow nothing & collect the comments\ncomments = \"\"\n\nnothing = \"90052.txt\"\ntarget = r\"[0-9]+\"\n\nwhile True:\n answer = zfile.read(nothing).decode('utf-8')\n # collect comment\n comments += zfile.getinfo(nothing).comment.decode('utf-8')\n print(answer)\n\n findRet = re.findall(target, answer)\n\n if len(findRet) == 0:\n break\n \n nothing = findRet[0] + \".txt\"\n\nprint(\"answer is ...\")\nprint(comments)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cv2
import numpy as np
frameWidth = 640
frameHeight = 480
# capturing Video from Webcam
cap = cv2.VideoCapture(0)
cap.set(3, frameWidth)
cap.set(4, frameHeight)
cap.set(10, 150)
myColors = [[20,40,40,70,255,255],
[100,169,121,135,255,255],
[0, 90, 90, 41, 255, 255]]
color_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]
x, y, w, h = 0, 0, 0, 0
my_points = []
def find_color(img, color_value, myColors):
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
count = 0
new_points = []
for color in myColors:
lower = np.array(color[0:3])
upper = np.array(color[3:6])
mask = cv2.inRange(hsv, lower, upper)
x, y = contour_detect(mask)
cv2.circle(frame_copy, (x,y), 20,color_value[count], -1)
if x != 0 and y != 0:
new_points.append([x,y,count])
count += 1
return new_points
def contour_detect(mask):
x,y,w,h = 0, 0, 0, 0
contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 100:
perimeter = cv2.arcLength(cnt, True)
approx = cv2.approxPolyDP(cnt, 0.01*perimeter, True)
x, y, w, h = cv2.boundingRect(approx)
return x + w // 2, y
def canvas(my_points, color_value):
for point in my_points:
cv2.circle(frame_copy, (point[0], point[1]),
15, color_value[point[2]], -1)
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
frame_copy = frame.copy()
new_point = find_color(frame, color_value, myColors)
if len(new_point) != 0:
for i in new_point:
my_points.append(i)
if len(my_points) != 0:
canvas(my_points, color_value)
cv2.imshow('frame', frame_copy)
if cv2.waitKey(1) and 0xFF == ord('q'):
break
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "836c1d2083d18c68fe551278d2df4155edc64c8c",
"index": 5298,
"step-1": "<mask token>\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\n<mask token>\n",
"step-2": "<mask token>\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\ncap.set(10, 150)\n<mask token>\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\nwhile True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nframeWidth = 640\nframeHeight = 480\ncap = cv2.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\ncap.set(10, 150)\nmyColors = [[20, 40, 40, 70, 255, 255], [100, 169, 121, 135, 255, 255], [0,\n 90, 90, 41, 255, 255]]\ncolor_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]\nx, y, w, h = 0, 0, 0, 0\nmy_points = []\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\nwhile True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nframeWidth = 640\nframeHeight = 480\ncap = cv2.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\ncap.set(10, 150)\nmyColors = [[20, 40, 40, 70, 255, 255], [100, 169, 121, 135, 255, 255], [0,\n 90, 90, 41, 255, 255]]\ncolor_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]\nx, y, w, h = 0, 0, 0, 0\nmy_points = []\n\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n cv2.circle(frame_copy, (x, y), 20, color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x, y, count])\n count += 1\n return new_points\n\n\ndef contour_detect(mask):\n x, y, w, h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.\n CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01 * perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]), 15, color_value[point[\n 2]], -1)\n\n\nwhile True:\n ret, frame = cap.read()\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 255 == ord('q'):\n break\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\n\nframeWidth = 640\nframeHeight = 480\n\n# capturing Video from Webcam\ncap = cv2.VideoCapture(0)\ncap.set(3, frameWidth)\ncap.set(4, frameHeight)\n\ncap.set(10, 150)\n\nmyColors = [[20,40,40,70,255,255],\n [100,169,121,135,255,255],\n [0, 90, 90, 41, 255, 255]]\ncolor_value = [[255, 0, 0], [0, 255, 0], [14, 107, 237]]\nx, y, w, h = 0, 0, 0, 0\nmy_points = []\n\ndef find_color(img, color_value, myColors):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n count = 0\n new_points = []\n for color in myColors:\n lower = np.array(color[0:3])\n upper = np.array(color[3:6])\n mask = cv2.inRange(hsv, lower, upper)\n x, y = contour_detect(mask)\n\n cv2.circle(frame_copy, (x,y), 20,color_value[count], -1)\n if x != 0 and y != 0:\n new_points.append([x,y,count])\n count += 1\n return new_points\n\n\n\ndef contour_detect(mask):\n x,y,w,h = 0, 0, 0, 0\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 100:\n perimeter = cv2.arcLength(cnt, True)\n approx = cv2.approxPolyDP(cnt, 0.01*perimeter, True)\n x, y, w, h = cv2.boundingRect(approx)\n return x + w // 2, y\n\ndef canvas(my_points, color_value):\n for point in my_points:\n cv2.circle(frame_copy, (point[0], point[1]),\n 15, color_value[point[2]], -1)\n\n\nwhile True:\n\n ret, frame = cap.read()\n\n frame = cv2.flip(frame, 1)\n frame_copy = frame.copy()\n\n new_point = find_color(frame, color_value, myColors)\n if len(new_point) != 0:\n for i in new_point:\n my_points.append(i)\n if len(my_points) != 0:\n canvas(my_points, color_value)\n\n cv2.imshow('frame', frame_copy)\n if cv2.waitKey(1) and 0xFF == ord('q'):\n break\n\ncv2.destroyAllWindows()\n\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
# myapp/serializers.py
from rest_framework import serializers
from rest_framework.authtoken.models import Token
from .models import *
# Serializers define the API representation.
class GeneralSerializer(serializers.ModelSerializer):
class Meta:
model = None
fields = '__all__'
class V2OfUsersSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = V2OfUsers
fields = ('firstname', 'lastname', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User(
email=validated_data['email'],
username=validated_data['username']
)
user.set_password(validated_data['password'])
user.save()
Token.objects.create(user=user)
return user
class MeasurementsSerializer(serializers.ModelSerializer):
class Meta:
model = Measurements
fields = '__all__'
def __init__(self, *args, **kwargs):
super(MeasurementsSerializer, self).__init__(*args, **kwargs)
request = self.context.get("request")
if request and request.query_params.get('fields'):
fields = request.query_params.get('fields')
if fields:
fields = fields.split(',')
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
# Serializer for Counting Providers
# and Network Type e.g 2G, 3G, 4G
class CountSerializer(serializers.Serializer):
key = serializers.CharField(max_length=20)
value = serializers.IntegerField()
# Serializer for Mobile Operating System
class OperatingSystemSerializer(serializers.ModelSerializer):
value = serializers.CharField(max_length=30)
key = serializers.CharField(source='versionname', max_length=30)
class Meta:
model = Measurements
fields = ('key', 'value')
# Serializer for Vendors
class VendorsSerializer(serializers.ModelSerializer):
value = serializers.CharField(max_length=30)
key = serializers.CharField(source='devicemanufacturer', max_length=30)
class Meta:
model = Measurements
fields = ('key', 'value')
# General Serializer for DownLink and UpLink for all
# Providers and Network Types with date range parameters
class GlobalSerializer(serializers.Serializer):
key = serializers.CharField(max_length=20)
avg = serializers.IntegerField()
min = serializers.IntegerField()
max = serializers.IntegerField()
|
normal
|
{
"blob_id": "44cbe1face91d3ac7edcd93d0b470bce90c8b674",
"index": 2916,
"step-1": "<mask token>\n\n\nclass MeasurementsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Measurements\n fields = '__all__'\n <mask token>\n\n\nclass CountSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n value = serializers.IntegerField()\n\n\nclass OperatingSystemSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='versionname', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass VendorsSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='devicemanufacturer', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass GlobalSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n avg = serializers.IntegerField()\n min = serializers.IntegerField()\n max = serializers.IntegerField()\n",
"step-2": "<mask token>\n\n\nclass MeasurementsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Measurements\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MeasurementsSerializer, self).__init__(*args, **kwargs)\n request = self.context.get('request')\n if request and request.query_params.get('fields'):\n fields = request.query_params.get('fields')\n if fields:\n fields = fields.split(',')\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in (existing - allowed):\n self.fields.pop(field_name)\n\n\nclass CountSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n value = serializers.IntegerField()\n\n\nclass OperatingSystemSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='versionname', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass VendorsSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='devicemanufacturer', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass GlobalSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n avg = serializers.IntegerField()\n min = serializers.IntegerField()\n max = serializers.IntegerField()\n",
"step-3": "<mask token>\n\n\nclass V2OfUsersSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = V2OfUsers\n fields = 'firstname', 'lastname', 'username', 'email', 'password'\n extra_kwargs = {'password': {'write_only': True}}\n <mask token>\n\n\nclass MeasurementsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Measurements\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MeasurementsSerializer, self).__init__(*args, **kwargs)\n request = self.context.get('request')\n if request and request.query_params.get('fields'):\n fields = request.query_params.get('fields')\n if fields:\n fields = fields.split(',')\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in (existing - allowed):\n self.fields.pop(field_name)\n\n\nclass CountSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n value = serializers.IntegerField()\n\n\nclass OperatingSystemSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='versionname', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass VendorsSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='devicemanufacturer', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass GlobalSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n avg = serializers.IntegerField()\n min = serializers.IntegerField()\n max = serializers.IntegerField()\n",
"step-4": "<mask token>\n\n\nclass V2OfUsersSerializer(serializers.HyperlinkedModelSerializer):\n\n\n class Meta:\n model = V2OfUsers\n fields = 'firstname', 'lastname', 'username', 'email', 'password'\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n user = User(email=validated_data['email'], username=validated_data[\n 'username'])\n user.set_password(validated_data['password'])\n user.save()\n Token.objects.create(user=user)\n return user\n\n\nclass MeasurementsSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Measurements\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MeasurementsSerializer, self).__init__(*args, **kwargs)\n request = self.context.get('request')\n if request and request.query_params.get('fields'):\n fields = request.query_params.get('fields')\n if fields:\n fields = fields.split(',')\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in (existing - allowed):\n self.fields.pop(field_name)\n\n\nclass CountSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n value = serializers.IntegerField()\n\n\nclass OperatingSystemSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='versionname', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass VendorsSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='devicemanufacturer', max_length=30)\n\n\n class Meta:\n model = Measurements\n fields = 'key', 'value'\n\n\nclass GlobalSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n avg = serializers.IntegerField()\n min = serializers.IntegerField()\n max = serializers.IntegerField()\n",
"step-5": "# myapp/serializers.py\nfrom rest_framework import serializers\nfrom rest_framework.authtoken.models import Token\nfrom .models import *\n\n\n# Serializers define the API representation.\nclass GeneralSerializer(serializers.ModelSerializer):\n class Meta:\n model = None\n fields = '__all__'\n\n\nclass V2OfUsersSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = V2OfUsers\n fields = ('firstname', 'lastname', 'username', 'email', 'password')\n extra_kwargs = {'password': {'write_only': True}}\n\n def create(self, validated_data):\n user = User(\n email=validated_data['email'],\n username=validated_data['username']\n )\n user.set_password(validated_data['password'])\n user.save()\n Token.objects.create(user=user)\n return user\n\n\nclass MeasurementsSerializer(serializers.ModelSerializer):\n class Meta:\n model = Measurements\n fields = '__all__'\n\n def __init__(self, *args, **kwargs):\n super(MeasurementsSerializer, self).__init__(*args, **kwargs)\n request = self.context.get(\"request\")\n if request and request.query_params.get('fields'):\n fields = request.query_params.get('fields')\n if fields:\n fields = fields.split(',')\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n\n# Serializer for Counting Providers\n# and Network Type e.g 2G, 3G, 4G\n\n\nclass CountSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n value = serializers.IntegerField()\n\n# Serializer for Mobile Operating System\n\n\nclass OperatingSystemSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='versionname', max_length=30)\n\n class Meta:\n model = Measurements\n fields = ('key', 'value')\n\n\n# Serializer for Vendors\n\n\nclass VendorsSerializer(serializers.ModelSerializer):\n value = serializers.CharField(max_length=30)\n key = serializers.CharField(source='devicemanufacturer', max_length=30)\n\n class Meta:\n model = Measurements\n fields = ('key', 'value')\n\n\n# General Serializer for DownLink and UpLink for all\n# Providers and Network Types with date range parameters\n\nclass GlobalSerializer(serializers.Serializer):\n key = serializers.CharField(max_length=20)\n avg = serializers.IntegerField()\n min = serializers.IntegerField()\n max = serializers.IntegerField()\n",
"step-ids": [
9,
10,
11,
12,
15
]
}
|
[
9,
10,
11,
12,
15
] |
import unittest
from collections import Counter
class Solution(object):
def findOriginalArray(self, changed):
"""
:type changed: List[int]
:rtype: List[int]
"""
n = len(changed)
if n % 2 != 0:
return []
freq = Counter(changed)
changed.sort()
ans = []
for num in changed:
if num in freq and freq[num] > 0:
freq[num] -= 1
double_num = 2 * num
if double_num in freq and freq[double_num] > 0:
ans.append(num)
freq[double_num] -= 1
else:
return []
return ans
class TestSolution(unittest.TestCase):
def test_findOriginalArray(self):
solution = Solution()
self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,
3, 4])
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "d5acda0d5d066d381a7f6310eb4fe6280d7e84de",
"index": 5309,
"step-1": "<mask token>\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def findOriginalArray(self, changed):\n \"\"\"\n :type changed: List[int]\n :rtype: List[int]\n \"\"\"\n n = len(changed)\n if n % 2 != 0:\n return []\n freq = Counter(changed)\n changed.sort()\n ans = []\n for num in changed:\n if num in freq and freq[num] > 0:\n freq[num] -= 1\n double_num = 2 * num\n if double_num in freq and freq[double_num] > 0:\n ans.append(num)\n freq[double_num] -= 1\n else:\n return []\n return ans\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom collections import Counter\n\n\nclass Solution(object):\n\n def findOriginalArray(self, changed):\n \"\"\"\n :type changed: List[int]\n :rtype: List[int]\n \"\"\"\n n = len(changed)\n if n % 2 != 0:\n return []\n freq = Counter(changed)\n changed.sort()\n ans = []\n for num in changed:\n if num in freq and freq[num] > 0:\n freq[num] -= 1\n double_num = 2 * num\n if double_num in freq and freq[double_num] > 0:\n ans.append(num)\n freq[double_num] -= 1\n else:\n return []\n return ans\n\n\nclass TestSolution(unittest.TestCase):\n\n def test_findOriginalArray(self):\n solution = Solution()\n self.assertEqual(solution.findOriginalArray([1, 3, 4, 2, 6, 8]), [1,\n 3, 4])\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": null,
"step-ids": [
2,
3,
5,
6
]
}
|
[
2,
3,
5,
6
] |
# -*- encoding: utf-8 -*-
# @Version : 1.0
# @Time : 2018/8/29 9:59
# @Author : wanghuodong
# @note : 生成一个简单窗口
import sys
from PyQt5.QtWidgets import QApplication, QWidget
if __name__ == '__main__':
'''所有的PyQt5应用必须创建一个应用(Application)对象。sys.argv参数是一个来自命令行的参数列表。Python脚本可以在shell中运行'''
app = QApplication(sys.argv)
'''Qwidget组件是PyQt5中所有用户界面类的基础类。我们给QWidget提供了默认的构造方法。默认构造方法没有父类。没有父类的widget组件将被作为窗口使用'''
w = QWidget()
'''resize()方法调整了widget组件的大小。它现在是250px宽,150px高。'''
w.resize(500, 150)
'''move()方法移动widget组件到一个位置,这个位置是屏幕上x=300,y=300的坐标。'''
w.move(300, 300)
'''setWindowTitle()设置了我们窗口的标题。这个标题显示在标题栏中。'''
w.setWindowTitle('Simple')
'''show()方法在屏幕上显示出widget。一个widget对象在内存中创建'''
w.show()
'''sys.exit()方法确保一个不留垃圾的退出'''
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "6ff300bbd7866466d1992445e46c5ee54f73d0d7",
"index": 9167,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n \"\"\"所有的PyQt5应用必须创建一个应用(Application)对象。sys.argv参数是一个来自命令行的参数列表。Python脚本可以在shell中运行\"\"\"\n app = QApplication(sys.argv)\n \"\"\"Qwidget组件是PyQt5中所有用户界面类的基础类。我们给QWidget提供了默认的构造方法。默认构造方法没有父类。没有父类的widget组件将被作为窗口使用\"\"\"\n w = QWidget()\n \"\"\"resize()方法调整了widget组件的大小。它现在是250px宽,150px高。\"\"\"\n w.resize(500, 150)\n \"\"\"move()方法移动widget组件到一个位置,这个位置是屏幕上x=300,y=300的坐标。\"\"\"\n w.move(300, 300)\n \"\"\"setWindowTitle()设置了我们窗口的标题。这个标题显示在标题栏中。\"\"\"\n w.setWindowTitle('Simple')\n \"\"\"show()方法在屏幕上显示出widget。一个widget对象在内存中创建\"\"\"\n w.show()\n \"\"\"sys.exit()方法确保一个不留垃圾的退出\"\"\"\n sys.exit(app.exec_())\n",
"step-3": "import sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\nif __name__ == '__main__':\n \"\"\"所有的PyQt5应用必须创建一个应用(Application)对象。sys.argv参数是一个来自命令行的参数列表。Python脚本可以在shell中运行\"\"\"\n app = QApplication(sys.argv)\n \"\"\"Qwidget组件是PyQt5中所有用户界面类的基础类。我们给QWidget提供了默认的构造方法。默认构造方法没有父类。没有父类的widget组件将被作为窗口使用\"\"\"\n w = QWidget()\n \"\"\"resize()方法调整了widget组件的大小。它现在是250px宽,150px高。\"\"\"\n w.resize(500, 150)\n \"\"\"move()方法移动widget组件到一个位置,这个位置是屏幕上x=300,y=300的坐标。\"\"\"\n w.move(300, 300)\n \"\"\"setWindowTitle()设置了我们窗口的标题。这个标题显示在标题栏中。\"\"\"\n w.setWindowTitle('Simple')\n \"\"\"show()方法在屏幕上显示出widget。一个widget对象在内存中创建\"\"\"\n w.show()\n \"\"\"sys.exit()方法确保一个不留垃圾的退出\"\"\"\n sys.exit(app.exec_())\n",
"step-4": "# -*- encoding: utf-8 -*-\n# @Version : 1.0 \n# @Time : 2018/8/29 9:59\n# @Author : wanghuodong \n# @note : 生成一个简单窗口\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QWidget\n\n\nif __name__ == '__main__':\n\n '''所有的PyQt5应用必须创建一个应用(Application)对象。sys.argv参数是一个来自命令行的参数列表。Python脚本可以在shell中运行'''\n app = QApplication(sys.argv)\n\n '''Qwidget组件是PyQt5中所有用户界面类的基础类。我们给QWidget提供了默认的构造方法。默认构造方法没有父类。没有父类的widget组件将被作为窗口使用'''\n w = QWidget()\n '''resize()方法调整了widget组件的大小。它现在是250px宽,150px高。'''\n w.resize(500, 150)\n '''move()方法移动widget组件到一个位置,这个位置是屏幕上x=300,y=300的坐标。'''\n w.move(300, 300)\n '''setWindowTitle()设置了我们窗口的标题。这个标题显示在标题栏中。'''\n w.setWindowTitle('Simple')\n '''show()方法在屏幕上显示出widget。一个widget对象在内存中创建'''\n w.show()\n\n '''sys.exit()方法确保一个不留垃圾的退出'''\n sys.exit(app.exec_())",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import shell
def executeUpgrade():
shell.executeCommand('pkg upgrade')
def executeInstall(pkg_name):
shell.executeCommand('pkg install ' + pkg_name)
def executeRemove(pkg_name):
shell.executeCommand('pkg remove ' + pkg_name)
shell.executeCommand('pkg autoremove')
def executeFindByName(name):
shell.executeCommand('pkg search ' + name)
|
normal
|
{
"blob_id": "db55a603615c7d896569ada84f3110dd6c0ce45f",
"index": 1250,
"step-1": "<mask token>\n\n\ndef executeUpgrade():\n shell.executeCommand('pkg upgrade')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef executeUpgrade():\n shell.executeCommand('pkg upgrade')\n\n\n<mask token>\n\n\ndef executeFindByName(name):\n shell.executeCommand('pkg search ' + name)\n",
"step-3": "<mask token>\n\n\ndef executeUpgrade():\n shell.executeCommand('pkg upgrade')\n\n\n<mask token>\n\n\ndef executeRemove(pkg_name):\n shell.executeCommand('pkg remove ' + pkg_name)\n shell.executeCommand('pkg autoremove')\n\n\ndef executeFindByName(name):\n shell.executeCommand('pkg search ' + name)\n",
"step-4": "<mask token>\n\n\ndef executeUpgrade():\n shell.executeCommand('pkg upgrade')\n\n\ndef executeInstall(pkg_name):\n shell.executeCommand('pkg install ' + pkg_name)\n\n\ndef executeRemove(pkg_name):\n shell.executeCommand('pkg remove ' + pkg_name)\n shell.executeCommand('pkg autoremove')\n\n\ndef executeFindByName(name):\n shell.executeCommand('pkg search ' + name)\n",
"step-5": "import shell\n\n\ndef executeUpgrade():\n shell.executeCommand('pkg upgrade')\n\n\ndef executeInstall(pkg_name):\n shell.executeCommand('pkg install ' + pkg_name)\n\n\ndef executeRemove(pkg_name):\n shell.executeCommand('pkg remove ' + pkg_name)\n shell.executeCommand('pkg autoremove')\n\n\ndef executeFindByName(name):\n shell.executeCommand('pkg search ' + name)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from PyQt5.QtWidgets import *
import sys
import math
Data = ''
class Button:
def __init__(self, text, results):
self.b = QPushButton(str(text))
self.text = text
self.results = results
self.b.clicked.connect(lambda: self.handleInput(
self.text)) # Important because we need to pass only function name with arguments here that is why we use lambda here
def handleInput(self, v):
global Data
try:
if self.results.text() == 'INVALID!':
self.results.setText("")
if self.results.text() != '':
if self.results.text()[-1] in ['*', '+', '-', '/'] and v in ['-', '*', '+', '/', '√', 'CBRT', "SIN",
"COS", "LOG", "MOD", "TAN", "MOD"]:
return
elif v == 'CBRT':
self.results.setText(str(round(float(eval(self.results.text())) ** (1 / 3), 4), ))
elif v == 'MOD':
if '.' in self.results.text():
self.results.setText(str(abs(float(self.results.text()))))
else:
self.results.setText(str(abs(int(self.results.text()))))
elif v == 'LOG':
self.results.setText(str(math.log10(abs(float(eval(self.results.text()))))))
elif v == 'SQUARE':
if '.' in self.results.text():
self.results.setText(str(float(self.results.text()) ** 2))
else:
self.results.setText(str(int(self.results.text()) ** 2))
elif v == "SIN":
self.results.setText(str(math.sin(float(eval(self.results.text())))))
elif v == "COS":
self.results.setText(str(math.cos(float(eval(self.results.text())))))
elif v == "TAN":
self.results.setText(str(math.tan(float(eval(self.results.text())))))
elif v == 'x!':
if '.' in str(eval(self.results.text())):
self.results.setText("INVALID!")
else:
self.results.setText(str(math.factorial(abs(int(eval(self.results.text()))))))
elif self.results.text()[-1] == '/' and v == 0:
return
elif v == "=":
if self.results.text()[-1] in ['*', '-', '.', '+', '/']:
return
res = eval(self.results.text())
self.results.setText(str(res))
elif v == "AC":
self.results.setText("")
elif v == "DEL":
self.results.setText(self.results.text()[:-1])
elif v == "√" and self.results.text() != '':
self.results.setText(str(float(self.results.text()) ** 0.5))
elif v == "√" and self.results.text() == '':
return
else:
current_value = self.results.text()
new_value = current_value + str(v)
self.results.setText(new_value)
else:
if type(v) == int:
current_value = self.results.text()
new_value = current_value + str(v)
self.results.setText(new_value)
except:
self.results.setText("INVALID!")
Data = self.results.text()
class Widget1():
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(["Basic Mode", "Advanced Mode"])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ["AC", "DEL", "√", "/",
7, 8, 9, "*",
4, 5, 6, "-",
1, 2, 3, "+",
0, ".", "="]
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
if button == 0:
self.grid.addWidget(buttonObject.b, row, col, 1, 2)
col += 1
else:
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class Widget2():
def setup(self, MainWindow, res):
self.widget = QWidget()
self.grid = QGridLayout()
self.results = QLineEdit()
self.results.setText(res)
row = 3
col = 0
self.cb = QComboBox()
self.cb.addItems(["Advance Mode", "Normal Mode"])
self.grid.addWidget(self.cb, 0, 1, 1, 2)
self.grid.addWidget(self.results, 1, 0, 2, 4)
buttons = ["AC", "DEL", "SIN", "COS",
7, 8, 9, "MOD",
4, 5, 6, "TAN",
1, 2, 3, "LOG",
0, "SQUARE", "CBRT", 'x!']
for button in buttons:
if col > 3:
col = 0
row += 1
buttonObject = Button(button, self.results)
self.grid.addWidget(buttonObject.b, row, col, 1, 1)
col += 1
self.widget.setLayout(self.grid)
MainWindow.setCentralWidget(self.widget)
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("Calculator")
self.widget1 = Widget1()
self.widget2 = Widget2()
self.startWidget1("")
def startWidget1(self, res):
global Data
self.widget1.setup(self, res)
Data = self.widget1.results.text()
self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)
self.show()
def startWidget2(self, res):
global Data
self.widget2.setup(self, res)
Data = self.widget2.results.text()
self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)
self.show()
def selectionchange1(self, i):
global Data
res = Data
self.startWidget2(res)
def selectionchange2(self, i):
global Data
res = Data
self.startWidget1(res)
if __name__ == "__main__":
app = QApplication(sys.argv)
w = MainWindow()
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "b08cface601ee07125090f3ae03a3120974688f2",
"index": 8765,
"step-1": "<mask token>\n\n\nclass Widget2:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Advance Mode', 'Normal Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,\n 'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle('Calculator')\n self.widget1 = Widget1()\n self.widget2 = Widget2()\n self.startWidget1('')\n\n def startWidget1(self, res):\n global Data\n self.widget1.setup(self, res)\n Data = self.widget1.results.text()\n self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)\n self.show()\n\n def startWidget2(self, res):\n global Data\n self.widget2.setup(self, res)\n Data = self.widget2.results.text()\n self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)\n self.show()\n\n def selectionchange1(self, i):\n global Data\n res = Data\n self.startWidget2(res)\n\n def selectionchange2(self, i):\n global Data\n res = Data\n self.startWidget1(res)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Widget1:\n <mask token>\n\n\nclass Widget2:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Advance Mode', 'Normal Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,\n 'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle('Calculator')\n self.widget1 = Widget1()\n self.widget2 = Widget2()\n self.startWidget1('')\n\n def startWidget1(self, res):\n global Data\n self.widget1.setup(self, res)\n Data = self.widget1.results.text()\n self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)\n self.show()\n\n def startWidget2(self, res):\n global Data\n self.widget2.setup(self, res)\n Data = self.widget2.results.text()\n self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)\n self.show()\n\n def selectionchange1(self, i):\n global Data\n res = Data\n self.startWidget2(res)\n\n def selectionchange2(self, i):\n global Data\n res = Data\n self.startWidget1(res)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Button:\n <mask token>\n <mask token>\n\n\nclass Widget1:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Basic Mode', 'Advanced Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', '√', '/', 7, 8, 9, '*', 4, 5, 6, '-', 1, 2,\n 3, '+', 0, '.', '=']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n if button == 0:\n self.grid.addWidget(buttonObject.b, row, col, 1, 2)\n col += 1\n else:\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass Widget2:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Advance Mode', 'Normal Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,\n 'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle('Calculator')\n self.widget1 = Widget1()\n self.widget2 = Widget2()\n self.startWidget1('')\n\n def startWidget1(self, res):\n global Data\n self.widget1.setup(self, res)\n Data = self.widget1.results.text()\n self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)\n self.show()\n\n def startWidget2(self, res):\n global Data\n self.widget2.setup(self, res)\n Data = self.widget2.results.text()\n self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)\n self.show()\n\n def selectionchange1(self, i):\n global Data\n res = Data\n self.startWidget2(res)\n\n def selectionchange2(self, i):\n global Data\n res = Data\n self.startWidget1(res)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Button:\n\n def __init__(self, text, results):\n self.b = QPushButton(str(text))\n self.text = text\n self.results = results\n self.b.clicked.connect(lambda : self.handleInput(self.text))\n\n def handleInput(self, v):\n global Data\n try:\n if self.results.text() == 'INVALID!':\n self.results.setText('')\n if self.results.text() != '':\n if self.results.text()[-1] in ['*', '+', '-', '/'] and v in [\n '-', '*', '+', '/', '√', 'CBRT', 'SIN', 'COS', 'LOG',\n 'MOD', 'TAN', 'MOD']:\n return\n elif v == 'CBRT':\n self.results.setText(str(round(float(eval(self.results.\n text())) ** (1 / 3), 4)))\n elif v == 'MOD':\n if '.' in self.results.text():\n self.results.setText(str(abs(float(self.results.\n text()))))\n else:\n self.results.setText(str(abs(int(self.results.text())))\n )\n elif v == 'LOG':\n self.results.setText(str(math.log10(abs(float(eval(self\n .results.text()))))))\n elif v == 'SQUARE':\n if '.' in self.results.text():\n self.results.setText(str(float(self.results.text()) **\n 2))\n else:\n self.results.setText(str(int(self.results.text()) ** 2)\n )\n elif v == 'SIN':\n self.results.setText(str(math.sin(float(eval(self.\n results.text())))))\n elif v == 'COS':\n self.results.setText(str(math.cos(float(eval(self.\n results.text())))))\n elif v == 'TAN':\n self.results.setText(str(math.tan(float(eval(self.\n results.text())))))\n elif v == 'x!':\n if '.' in str(eval(self.results.text())):\n self.results.setText('INVALID!')\n else:\n self.results.setText(str(math.factorial(abs(int(\n eval(self.results.text()))))))\n elif self.results.text()[-1] == '/' and v == 0:\n return\n elif v == '=':\n if self.results.text()[-1] in ['*', '-', '.', '+', '/']:\n return\n res = eval(self.results.text())\n self.results.setText(str(res))\n elif v == 'AC':\n self.results.setText('')\n elif v == 'DEL':\n self.results.setText(self.results.text()[:-1])\n elif v == '√' and self.results.text() != '':\n self.results.setText(str(float(self.results.text()) ** 0.5)\n )\n elif v == '√' and self.results.text() == '':\n return\n else:\n current_value = self.results.text()\n new_value = current_value + str(v)\n self.results.setText(new_value)\n elif type(v) == int:\n current_value = self.results.text()\n new_value = current_value + str(v)\n self.results.setText(new_value)\n except:\n self.results.setText('INVALID!')\n Data = self.results.text()\n\n\nclass Widget1:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Basic Mode', 'Advanced Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', '√', '/', 7, 8, 9, '*', 4, 5, 6, '-', 1, 2,\n 3, '+', 0, '.', '=']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n if button == 0:\n self.grid.addWidget(buttonObject.b, row, col, 1, 2)\n col += 1\n else:\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass Widget2:\n\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems(['Advance Mode', 'Normal Mode'])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = ['AC', 'DEL', 'SIN', 'COS', 7, 8, 9, 'MOD', 4, 5, 6,\n 'TAN', 1, 2, 3, 'LOG', 0, 'SQUARE', 'CBRT', 'x!']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n col += 1\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass MainWindow(QMainWindow):\n\n def __init__(self):\n super().__init__()\n self.setWindowTitle('Calculator')\n self.widget1 = Widget1()\n self.widget2 = Widget2()\n self.startWidget1('')\n\n def startWidget1(self, res):\n global Data\n self.widget1.setup(self, res)\n Data = self.widget1.results.text()\n self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)\n self.show()\n\n def startWidget2(self, res):\n global Data\n self.widget2.setup(self, res)\n Data = self.widget2.results.text()\n self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)\n self.show()\n\n def selectionchange1(self, i):\n global Data\n res = Data\n self.startWidget2(res)\n\n def selectionchange2(self, i):\n global Data\n res = Data\n self.startWidget1(res)\n\n\n<mask token>\n",
"step-5": "from PyQt5.QtWidgets import *\nimport sys\nimport math\n\nData = ''\n\n\nclass Button:\n def __init__(self, text, results):\n self.b = QPushButton(str(text))\n self.text = text\n self.results = results\n self.b.clicked.connect(lambda: self.handleInput(\n self.text)) # Important because we need to pass only function name with arguments here that is why we use lambda here\n\n def handleInput(self, v):\n global Data\n try:\n if self.results.text() == 'INVALID!':\n self.results.setText(\"\")\n if self.results.text() != '':\n if self.results.text()[-1] in ['*', '+', '-', '/'] and v in ['-', '*', '+', '/', '√', 'CBRT', \"SIN\",\n \"COS\", \"LOG\", \"MOD\", \"TAN\", \"MOD\"]:\n return\n elif v == 'CBRT':\n self.results.setText(str(round(float(eval(self.results.text())) ** (1 / 3), 4), ))\n elif v == 'MOD':\n if '.' in self.results.text():\n self.results.setText(str(abs(float(self.results.text()))))\n else:\n self.results.setText(str(abs(int(self.results.text()))))\n elif v == 'LOG':\n self.results.setText(str(math.log10(abs(float(eval(self.results.text()))))))\n elif v == 'SQUARE':\n if '.' in self.results.text():\n self.results.setText(str(float(self.results.text()) ** 2))\n else:\n self.results.setText(str(int(self.results.text()) ** 2))\n elif v == \"SIN\":\n self.results.setText(str(math.sin(float(eval(self.results.text())))))\n elif v == \"COS\":\n self.results.setText(str(math.cos(float(eval(self.results.text())))))\n elif v == \"TAN\":\n self.results.setText(str(math.tan(float(eval(self.results.text())))))\n elif v == 'x!':\n if '.' in str(eval(self.results.text())):\n self.results.setText(\"INVALID!\")\n else:\n self.results.setText(str(math.factorial(abs(int(eval(self.results.text()))))))\n elif self.results.text()[-1] == '/' and v == 0:\n return\n elif v == \"=\":\n if self.results.text()[-1] in ['*', '-', '.', '+', '/']:\n return\n res = eval(self.results.text())\n self.results.setText(str(res))\n elif v == \"AC\":\n self.results.setText(\"\")\n elif v == \"DEL\":\n self.results.setText(self.results.text()[:-1])\n elif v == \"√\" and self.results.text() != '':\n self.results.setText(str(float(self.results.text()) ** 0.5))\n elif v == \"√\" and self.results.text() == '':\n return\n else:\n current_value = self.results.text()\n new_value = current_value + str(v)\n self.results.setText(new_value)\n else:\n if type(v) == int:\n current_value = self.results.text()\n new_value = current_value + str(v)\n self.results.setText(new_value)\n except:\n self.results.setText(\"INVALID!\")\n Data = self.results.text()\n\n\nclass Widget1():\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems([\"Basic Mode\", \"Advanced Mode\"])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = [\"AC\", \"DEL\", \"√\", \"/\",\n 7, 8, 9, \"*\",\n 4, 5, 6, \"-\",\n 1, 2, 3, \"+\",\n 0, \".\", \"=\"]\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n\n buttonObject = Button(button, self.results)\n\n if button == 0:\n self.grid.addWidget(buttonObject.b, row, col, 1, 2)\n col += 1\n else:\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n\n col += 1\n\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass Widget2():\n def setup(self, MainWindow, res):\n self.widget = QWidget()\n self.grid = QGridLayout()\n self.results = QLineEdit()\n self.results.setText(res)\n\n row = 3\n col = 0\n self.cb = QComboBox()\n self.cb.addItems([\"Advance Mode\", \"Normal Mode\"])\n self.grid.addWidget(self.cb, 0, 1, 1, 2)\n self.grid.addWidget(self.results, 1, 0, 2, 4)\n buttons = [\"AC\", \"DEL\", \"SIN\", \"COS\",\n 7, 8, 9, \"MOD\",\n 4, 5, 6, \"TAN\",\n 1, 2, 3, \"LOG\",\n 0, \"SQUARE\", \"CBRT\", 'x!']\n for button in buttons:\n if col > 3:\n col = 0\n row += 1\n buttonObject = Button(button, self.results)\n\n self.grid.addWidget(buttonObject.b, row, col, 1, 1)\n\n col += 1\n\n self.widget.setLayout(self.grid)\n MainWindow.setCentralWidget(self.widget)\n\n\nclass MainWindow(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Calculator\")\n self.widget1 = Widget1()\n self.widget2 = Widget2()\n self.startWidget1(\"\")\n\n def startWidget1(self, res):\n global Data\n self.widget1.setup(self, res)\n Data = self.widget1.results.text()\n self.widget1.cb.currentIndexChanged.connect(self.selectionchange1)\n self.show()\n\n def startWidget2(self, res):\n global Data\n self.widget2.setup(self, res)\n Data = self.widget2.results.text()\n self.widget2.cb.currentIndexChanged.connect(self.selectionchange2)\n self.show()\n\n def selectionchange1(self, i):\n global Data\n res = Data\n self.startWidget2(res)\n\n def selectionchange2(self, i):\n global Data\n res = Data\n self.startWidget1(res)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n w = MainWindow()\n sys.exit(app.exec_())\n\n\n\n\n",
"step-ids": [
8,
9,
11,
13,
17
]
}
|
[
8,
9,
11,
13,
17
] |
# -*- coding: utf-8 -*-
"""
Animation practical output
The code that follows builds on the "Communications.py" file
Additional code that follows has in part been modified from that of
https://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/index.html
https://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/examples/animatedmodel.py
https://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/examples/animatedmodel2.py
"""
import random
import operator
import matplotlib.pyplot
import matplotlib.animation
import agentframeworkanimate
import csv
# Reading the in.txt file to create the environment.
with open("in.txt", newline="") as raster:
dataset = csv.reader(raster, quoting=csv.QUOTE_NONNUMERIC)
environment = []
for row in dataset:
rowlist = []
for value in row:
rowlist.append(value)
environment.append(rowlist)
# Setting initial parameters.
num_of_agents = 10
num_of_iterations = 100
neighbourhood = 20
agents = []
# Variables to animate the model.
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
ax.set_autoscale_on(False)
# Make the agents.
# Addition of environment as argument for Agent class to allow interaction between agents and environment.
# Addition of agents as argument for Agent class to allow agents to interact with each other.
for i in range(num_of_agents):
agents.append(agentframeworkanimate.Agent(environment, agents))
carry_on = True
# Creating model animation.
def update(frame_number):
fig.clear()
global carry_on
# Move the agents and store what they eat
for j in range(num_of_iterations):
# Shuffle function used to randomise the order agents are processed with each iteration.
random.shuffle(agents)
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
# Stopping condition for animation when all agents have 100 in their store.
if agents[i].store == 100:
carry_on = False
print("Stopping condition met")
# Generate scatterplot of agents after model iterations.
matplotlib.pyplot.xlim(0, 99)
matplotlib.pyplot.ylim(0, 99)
matplotlib.pyplot.imshow(environment)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i].x,agents[i].y)
# Generator function to stop animation.
# Will stop animation after 10 iterations unless carry_on variable is set to False.
def gen_function(b = [0]):
a = 0
global carry_on
while (a < 100) & (carry_on):
yield a
a = a + 1
# Animation will run until generator function condition is met
#animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat=False, frames=10)
animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
matplotlib.pyplot.show()
# Writing the final environment to a text file.
with open("out.txt", "w", newline="") as finalenviron:
writer = csv.writer(finalenviron, delimiter=",")
for row in environment:
writer.writerow(row)
|
normal
|
{
"blob_id": "4ea266d4f4c18efbba4204d7301652f8966c18a5",
"index": 9724,
"step-1": "<mask token>\n\n\ndef gen_function(b=[0]):\n a = 0\n global carry_on\n while (a < 100) & carry_on:\n yield a\n a = a + 1\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('in.txt', newline='') as raster:\n dataset = csv.reader(raster, quoting=csv.QUOTE_NONNUMERIC)\n environment = []\n for row in dataset:\n rowlist = []\n for value in row:\n rowlist.append(value)\n environment.append(rowlist)\n<mask token>\nax.set_autoscale_on(False)\nfor i in range(num_of_agents):\n agents.append(agentframeworkanimate.Agent(environment, agents))\n<mask token>\n\n\ndef update(frame_number):\n fig.clear()\n global carry_on\n for j in range(num_of_iterations):\n random.shuffle(agents)\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat()\n agents[i].share_with_neighbours(neighbourhood)\n if agents[i].store == 100:\n carry_on = False\n print('Stopping condition met')\n matplotlib.pyplot.xlim(0, 99)\n matplotlib.pyplot.ylim(0, 99)\n matplotlib.pyplot.imshow(environment)\n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x, agents[i].y)\n\n\ndef gen_function(b=[0]):\n a = 0\n global carry_on\n while (a < 100) & carry_on:\n yield a\n a = a + 1\n\n\n<mask token>\nmatplotlib.pyplot.show()\nwith open('out.txt', 'w', newline='') as finalenviron:\n writer = csv.writer(finalenviron, delimiter=',')\n for row in environment:\n writer.writerow(row)\n",
"step-3": "<mask token>\nwith open('in.txt', newline='') as raster:\n dataset = csv.reader(raster, quoting=csv.QUOTE_NONNUMERIC)\n environment = []\n for row in dataset:\n rowlist = []\n for value in row:\n rowlist.append(value)\n environment.append(rowlist)\nnum_of_agents = 10\nnum_of_iterations = 100\nneighbourhood = 20\nagents = []\nfig = matplotlib.pyplot.figure(figsize=(7, 7))\nax = fig.add_axes([0, 0, 1, 1])\nax.set_autoscale_on(False)\nfor i in range(num_of_agents):\n agents.append(agentframeworkanimate.Agent(environment, agents))\ncarry_on = True\n\n\ndef update(frame_number):\n fig.clear()\n global carry_on\n for j in range(num_of_iterations):\n random.shuffle(agents)\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat()\n agents[i].share_with_neighbours(neighbourhood)\n if agents[i].store == 100:\n carry_on = False\n print('Stopping condition met')\n matplotlib.pyplot.xlim(0, 99)\n matplotlib.pyplot.ylim(0, 99)\n matplotlib.pyplot.imshow(environment)\n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x, agents[i].y)\n\n\ndef gen_function(b=[0]):\n a = 0\n global carry_on\n while (a < 100) & carry_on:\n yield a\n a = a + 1\n\n\nanimation = matplotlib.animation.FuncAnimation(fig, update, frames=\n gen_function, repeat=False)\nmatplotlib.pyplot.show()\nwith open('out.txt', 'w', newline='') as finalenviron:\n writer = csv.writer(finalenviron, delimiter=',')\n for row in environment:\n writer.writerow(row)\n",
"step-4": "<mask token>\nimport random\nimport operator\nimport matplotlib.pyplot\nimport matplotlib.animation\nimport agentframeworkanimate\nimport csv\nwith open('in.txt', newline='') as raster:\n dataset = csv.reader(raster, quoting=csv.QUOTE_NONNUMERIC)\n environment = []\n for row in dataset:\n rowlist = []\n for value in row:\n rowlist.append(value)\n environment.append(rowlist)\nnum_of_agents = 10\nnum_of_iterations = 100\nneighbourhood = 20\nagents = []\nfig = matplotlib.pyplot.figure(figsize=(7, 7))\nax = fig.add_axes([0, 0, 1, 1])\nax.set_autoscale_on(False)\nfor i in range(num_of_agents):\n agents.append(agentframeworkanimate.Agent(environment, agents))\ncarry_on = True\n\n\ndef update(frame_number):\n fig.clear()\n global carry_on\n for j in range(num_of_iterations):\n random.shuffle(agents)\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat()\n agents[i].share_with_neighbours(neighbourhood)\n if agents[i].store == 100:\n carry_on = False\n print('Stopping condition met')\n matplotlib.pyplot.xlim(0, 99)\n matplotlib.pyplot.ylim(0, 99)\n matplotlib.pyplot.imshow(environment)\n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x, agents[i].y)\n\n\ndef gen_function(b=[0]):\n a = 0\n global carry_on\n while (a < 100) & carry_on:\n yield a\n a = a + 1\n\n\nanimation = matplotlib.animation.FuncAnimation(fig, update, frames=\n gen_function, repeat=False)\nmatplotlib.pyplot.show()\nwith open('out.txt', 'w', newline='') as finalenviron:\n writer = csv.writer(finalenviron, delimiter=',')\n for row in environment:\n writer.writerow(row)\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nAnimation practical output\n\nThe code that follows builds on the \"Communications.py\" file\n\nAdditional code that follows has in part been modified from that of\nhttps://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/index.html\nhttps://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/examples/animatedmodel.py\nhttps://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part8/examples/animatedmodel2.py\n\"\"\"\n\nimport random\nimport operator\nimport matplotlib.pyplot\nimport matplotlib.animation\nimport agentframeworkanimate\nimport csv\n\n\n# Reading the in.txt file to create the environment.\nwith open(\"in.txt\", newline=\"\") as raster:\n dataset = csv.reader(raster, quoting=csv.QUOTE_NONNUMERIC)\n environment = []\n for row in dataset:\n rowlist = []\n for value in row:\n rowlist.append(value)\n environment.append(rowlist)\n \n# Setting initial parameters.\nnum_of_agents = 10\nnum_of_iterations = 100\nneighbourhood = 20\nagents = []\n\n# Variables to animate the model.\nfig = matplotlib.pyplot.figure(figsize=(7, 7))\nax = fig.add_axes([0, 0, 1, 1])\n\nax.set_autoscale_on(False)\n\n# Make the agents.\n# Addition of environment as argument for Agent class to allow interaction between agents and environment.\n# Addition of agents as argument for Agent class to allow agents to interact with each other.\nfor i in range(num_of_agents):\n agents.append(agentframeworkanimate.Agent(environment, agents))\n\n\ncarry_on = True\n\n# Creating model animation.\ndef update(frame_number):\n fig.clear()\n global carry_on \n\n# Move the agents and store what they eat\n for j in range(num_of_iterations):\n # Shuffle function used to randomise the order agents are processed with each iteration.\n random.shuffle(agents)\n for i in range(num_of_agents):\n agents[i].move()\n agents[i].eat()\n agents[i].share_with_neighbours(neighbourhood)\n \n # Stopping condition for animation when all agents have 100 in their store.\n if agents[i].store == 100:\n carry_on = False\n print(\"Stopping condition met\")\n\n # Generate scatterplot of agents after model iterations.\n matplotlib.pyplot.xlim(0, 99)\n matplotlib.pyplot.ylim(0, 99)\n matplotlib.pyplot.imshow(environment) \n for i in range(num_of_agents):\n matplotlib.pyplot.scatter(agents[i].x,agents[i].y)\n \n# Generator function to stop animation.\n# Will stop animation after 10 iterations unless carry_on variable is set to False.\ndef gen_function(b = [0]):\n a = 0\n global carry_on\n while (a < 100) & (carry_on):\n yield a\n a = a + 1 \n\n# Animation will run until generator function condition is met\n#animation = matplotlib.animation.FuncAnimation(fig, update, interval=1, repeat=False, frames=10)\nanimation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)\n\nmatplotlib.pyplot.show()\n\n \n# Writing the final environment to a text file.\nwith open(\"out.txt\", \"w\", newline=\"\") as finalenviron:\n writer = csv.writer(finalenviron, delimiter=\",\")\n for row in environment:\n writer.writerow(row)\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
speak = 'speak'
def hacker():
try:
raise speak # go to hacker's except
print 'not reached'
except speak:
print 'Hello world!'
raise speak # go to primate's except
def primate():
try:
hacker()
print 'not reached'
except speak:
print 'Huh?'
raise speak # go to mammal's except
def mammal():
try:
primate()
print 'not reached'
except speak:
print 'Spam!'
|
normal
|
{
"blob_id": "644d0a0d88f1a051e004d271359dcc3df855bd77",
"index": 9020,
"step-1": "speak = 'speak'\n\ndef hacker():\n try:\n raise speak # go to hacker's except \n print 'not reached'\n except speak:\n print 'Hello world!'\n raise speak # go to primate's except\n\ndef primate():\n try:\n hacker()\n print 'not reached'\n except speak:\n print 'Huh?'\n raise speak # go to mammal's except\n\ndef mammal():\n try:\n primate()\n print 'not reached'\n except speak:\n print 'Spam!'\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Benthic Parameters - USEPA OPP defaults from EXAMS
benthic_params = {
"depth": 0.05, # benthic depth (m)
"porosity": 0.65, # benthic porosity
"bulk_density": 1, # bulk density, dry solid mass/total vol (g/cm3)
"froc": 0, # benthic organic carbon fraction
"doc": 5, # benthic dissolved organic carbon content (mg/L)
"bnmas": 0, # benthic biomass intensity (g/m2)
"d_over_dx": 1 # mass transfer coefficient for exchange between benthic and water column (m/s)
# (can be modified later if data exists)
}
def partition_benthic(reach, runoff, runoff_mass, erosion_mass):
from .parameters import soil, stream_channel, benthic
try:
reach = self.region.flow_file.fetch(reach)
q, v, l = reach.q, reach.v, reach.l
except AttributeError:
return None, None, (None, None)
mean_runoff = runoff.mean() # m3/d
baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates), where=(q > mean_runoff))
total_flow = runoff + baseflow
mixing_cell = 40. # meters
cross_section = total_flow / v
width = stream_channel.a * np.power(cross_section, stream_channel.b)
depth = cross_section / width
surface_area = width * l
volume = np.array([(depth * surface_area), # Water column
(benthic.depth * surface_area * benthic.porosity)]) # Benthic zone
# Compute concentration in runoff of runoff mass and erosion mass
runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.n_dates), where=(runoff != 0))
daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.zeros(self.i.n_dates),
where=(runoff_mass + erosion_mass > 0.0) & (mixing_cell > 0.0))
# Divide mass loading between water column and benthic zones
mass_input = np.vstack([runoff_mass + ((1. - soil.prben) * erosion_mass), # Water Column
soil.prben * erosion_mass]).T # Benthic
# Partition concentration into benthic and water column concentrations
# This needs to be cleaned up
# Compute benthic solute holding capacity
fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth, surface_area, self.i.koc)
k_adj = np.array((total_flow / mixing_cell) + (self.i.deg_photolysis + self.i.deg_hydrolysis) * fw1 + \
(self.i.deg_wc * fw1) + self.i.deg_benthic * (1 - fw1))
aqconc_avg_wb, daily_avg, daily_peak = \
concentration_loop(self.i.n_dates, daily_conc, k_adj, volume,
mass_input, fw1, fw2, omega, theta, self.i.deg_aqueous)
return map(lambda x: x * 1000000., (runoff_conc, aqconc_avg_wb, daily_avg, daily_peak))
@njit
def concentration_loop(n_dates, daily_concentration, k_adj, daily_volume, mass_input, fw1, fw2, omega, theta, deg_aq):
# Beginning day aquatic concentrations, considered Peak Aqueous Daily Conc in Water Column
daily_peak = np.zeros((2, n_dates))
daily_avg = np.zeros((2, n_dates))
aqconc_avg_wb = np.zeros(n_dates)
# Reset starting values
exp_k = np.exp(-k_adj)
aqconc_wb = 0
antecedent_mass = np.zeros(2) # mn
for day in range(daily_concentration.size):
# Add mass input to antecedent mass
daily_mass = antecedent_mass + mass_input[day]
# Convert to aqueous concentrations (peak) at beginning of day
# JCH - fw comes from solute_holding_capacity. Fraction going into each section. Should fw[0] + fw[1] = 1?
daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]
daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]
# Compute daily average concentration in the water body - when no Benthic layer considered
aqconc_wb += daily_concentration[day] # initial water body concentration for current time step
# Daily avg aq conc in water body, area under curve/t = Ci/k*(1-e^-k), NO benthic
aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])
# initial water body concentration for next time step
aqconc_wb *= exp_k[day]
# For simul diffeq soln: mn1,mn2,mavg1,mavg2 = new_aqconc1, new_aqconc2, aqconc_avg1[d], aqconc_avg2[d]
# Note: aqconc_avg1 and aqconc_avg2 are outputted - Daily avg aq conc in WC and Benthic regions
new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day], deg_aq, omega, theta[day], daily_peak[:, day])
daily_avg[0, day] = wc_avg
daily_avg[1, day] = benthic_avg
# Masses m1 and m2 after time step, t_end
antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]
antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]
return aqconc_avg_wb, daily_avg, daily_peak
@njit
def simultaneous_diffeq(gamma1, gamma2, omega, theta, daily_aq_peak):
"""
ANALYTICAL SOLUTION FOR THE TWO SIMULTANEOUS DIFFERENTIAL EQNS:
dm1/dt = Am1 + Bm2
dm2/dt = Em1 + Fm2
WITH INITIAL VALUES m1 AND m2 FOR m1 AND m2
mn1 IS OUTPUT VALUE FOR m1 AFTER TIME T
mn2 IS OUTPUT VALUE FOR m2 AFTER TIME T
mavg1 IS AVERAGE VALUE OF m1 OVER TIME T
"""
t_end = 86400. # seconds, time step of ONE DAY
m1, m2 = daily_aq_peak
# Calculate constants for simultaneous_diffeq: A,B,E,F
# This reduces the model equivalent parameters to the coefficients needed for solving simultaneous_diffeq
a = -gamma1 - omega * theta
b = omega * theta
e = omega
f = -gamma2 - omega
af = a + f
dif = 4 * ((f * a) - (b * e))
bbb = np.sqrt(af * af - dif)
root1 = (af + bbb) / 2.
root2 = (af - bbb) / 2.
dd = (root1 - a) / b
ee = (root2 - a) / b
ff = ee - dd
x1 = (ee * m1 - m2) / ff
y1 = (m2 - dd * m1) / ff
# Calculate new concentrations for next step
rt1 = root1 * t_end
rt2 = root2 * t_end
exrt1 = np.exp(rt1)
exrt2 = np.exp(rt2)
ccc = x1 * exrt1
ddd = y1 * exrt2
# values for m1 and m2 after time step t_end
mn = np.zeros(2)
mn[0] = ccc + ddd # Water column
mn[1] = dd * ccc + ee * ddd # Benthic
# AVERAGE DAILY CONCENTRATION SOLUTION: set up for daily average, but can be changed by adjusting time step
gx = x1 / root1
hx = y1 / root2
term1 = gx * exrt1 # term3 = -X1/root1*exp(root1*T1)
term2 = hx * exrt2 # term4 = -Y1/root2*exp(root2*T1
term3 = -gx
term4 = -hx
mavg_wc = (term1 + term2 + term3 + term4) / t_end # Water column
mavg_ben = (term1 * dd + term2 * ee + term3 * dd + term4 * ee) / t_end # Benthic
return mn, mavg_wc, mavg_ben
def solute_holding_capacity(depth, surface_area, koc):
"""Calculates Solute Holding capacities and mass transfer between water column and benthic regions"""
from .parameters import benthic, water_column
# Aqueous volumes in each region
vol1 = depth * surface_area # total volume in water column, approximately equal to water volume alone
vol2a = benthic.depth * surface_area # total benthic volume
vol2 = vol2a * benthic.porosity # total benthic pore water volume
# Default EXAMS conditions for partitioning
kow = koc / .35 # DEFAULT EXAMS CONDITION ON Kow p.35
kpdoc1 = kow * .074 # DEFAULT RELATION IN EXAMS (LITTORAL)
kpdoc2 = koc # DEFAULT RELATION IN EXAMS (BENTHIC) p.16 of EXAMS 2.98 (or is it Kow*.46 ?)
xkpb = 0.436 * kow ** .907 # DEFAULT RELATION IN EXAMS
# mass in littoral region
vol1a = depth[0] * surface_area # initial volume corresponding with suspended matter reference
m_sed_1 = water_column.sused * vol1a * .001 # SEDIMENT MASS LITTORAL
m_bio_1 = water_column.plmas * vol1a * .001 # BIOLOGICAL MASS LITTORAL
m_doc_1 = water_column.doc * vol1a * .001 # DOC MASS LITTORAL
# partitioning coefficients of individual media
kd_sed_1 = koc * water_column.froc * .001 # Kd of sediment in littoral [m3/kg]
kd_sed_2 = koc * benthic.froc * .001 # Kd of sediment in benthic
kd_bio = xkpb / 1000. # Kd of biological organisms
kd_doc_1 = kpdoc1 / 1000. # Kd of DOC in littoral region
kd_doc_2 = kpdoc2 / 1000. # Kd of DOC in benthic region
# mass in benthic region
m_sed_2 = benthic.bulk_density * vol2a * 1000. # as defined by EXAMS parameters m_sed_2 = BULKD/PCTWA*VOL2*100000.
m_bio_2 = benthic.bnmas * surface_area * .001
m_doc_2 = benthic.doc * vol2 * .001
# solute holding capacity in regions 1 and 2
capacity_1 = kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 * m_doc_1 + vol1
capacity_2 = kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 * m_doc_2 + vol2
# Fraction going to water column and benthic
fw1 = vol1 / capacity_1 # fw1 is daily, vol1 is daily
fw2 = vol2 / capacity_2
theta = capacity_2 / capacity_1
sed_conv_factor = vol2 / fw2 / m_sed_2 # converts pore water to [Total Conc normalized to sed mass]
# Omega mass transfer - Calculates littoral to benthic mass transfer coefficient
omega = benthic.d_over_dx / benthic.depth # (m3/hr)/(3600 s/hr)
return fw1, fw2, theta, sed_conv_factor, omega
|
normal
|
{
"blob_id": "5890525b16b42578ac06e7ab2170c5613feea0a5",
"index": 6494,
"step-1": "<mask token>\n\n\ndef partition_benthic(reach, runoff, runoff_mass, erosion_mass):\n from .parameters import soil, stream_channel, benthic\n try:\n reach = self.region.flow_file.fetch(reach)\n q, v, l = reach.q, reach.v, reach.l\n except AttributeError:\n return None, None, (None, None)\n mean_runoff = runoff.mean()\n baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),\n where=q > mean_runoff)\n total_flow = runoff + baseflow\n mixing_cell = 40.0\n cross_section = total_flow / v\n width = stream_channel.a * np.power(cross_section, stream_channel.b)\n depth = cross_section / width\n surface_area = width * l\n volume = np.array([depth * surface_area, benthic.depth * surface_area *\n benthic.porosity])\n runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.\n n_dates), where=runoff != 0)\n daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.\n zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (\n mixing_cell > 0.0))\n mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,\n soil.prben * erosion_mass]).T\n fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,\n surface_area, self.i.koc)\n k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +\n self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.\n deg_benthic * (1 - fw1))\n aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.\n n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,\n theta, self.i.deg_aqueous)\n return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,\n daily_avg, daily_peak))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef partition_benthic(reach, runoff, runoff_mass, erosion_mass):\n from .parameters import soil, stream_channel, benthic\n try:\n reach = self.region.flow_file.fetch(reach)\n q, v, l = reach.q, reach.v, reach.l\n except AttributeError:\n return None, None, (None, None)\n mean_runoff = runoff.mean()\n baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),\n where=q > mean_runoff)\n total_flow = runoff + baseflow\n mixing_cell = 40.0\n cross_section = total_flow / v\n width = stream_channel.a * np.power(cross_section, stream_channel.b)\n depth = cross_section / width\n surface_area = width * l\n volume = np.array([depth * surface_area, benthic.depth * surface_area *\n benthic.porosity])\n runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.\n n_dates), where=runoff != 0)\n daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.\n zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (\n mixing_cell > 0.0))\n mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,\n soil.prben * erosion_mass]).T\n fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,\n surface_area, self.i.koc)\n k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +\n self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.\n deg_benthic * (1 - fw1))\n aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.\n n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,\n theta, self.i.deg_aqueous)\n return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,\n daily_avg, daily_peak))\n\n\n@njit\ndef concentration_loop(n_dates, daily_concentration, k_adj, daily_volume,\n mass_input, fw1, fw2, omega, theta, deg_aq):\n daily_peak = np.zeros((2, n_dates))\n daily_avg = np.zeros((2, n_dates))\n aqconc_avg_wb = np.zeros(n_dates)\n exp_k = np.exp(-k_adj)\n aqconc_wb = 0\n antecedent_mass = np.zeros(2)\n for day in range(daily_concentration.size):\n daily_mass = antecedent_mass + mass_input[day]\n daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]\n daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]\n aqconc_wb += daily_concentration[day]\n aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])\n aqconc_wb *= exp_k[day]\n new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day],\n deg_aq, omega, theta[day], daily_peak[:, day])\n daily_avg[0, day] = wc_avg\n daily_avg[1, day] = benthic_avg\n antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]\n antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]\n return aqconc_avg_wb, daily_avg, daily_peak\n\n\n<mask token>\n\n\ndef solute_holding_capacity(depth, surface_area, koc):\n \"\"\"Calculates Solute Holding capacities and mass transfer between water column and benthic regions\"\"\"\n from .parameters import benthic, water_column\n vol1 = depth * surface_area\n vol2a = benthic.depth * surface_area\n vol2 = vol2a * benthic.porosity\n kow = koc / 0.35\n kpdoc1 = kow * 0.074\n kpdoc2 = koc\n xkpb = 0.436 * kow ** 0.907\n vol1a = depth[0] * surface_area\n m_sed_1 = water_column.sused * vol1a * 0.001\n m_bio_1 = water_column.plmas * vol1a * 0.001\n m_doc_1 = water_column.doc * vol1a * 0.001\n kd_sed_1 = koc * water_column.froc * 0.001\n kd_sed_2 = koc * benthic.froc * 0.001\n kd_bio = xkpb / 1000.0\n kd_doc_1 = kpdoc1 / 1000.0\n kd_doc_2 = kpdoc2 / 1000.0\n m_sed_2 = benthic.bulk_density * vol2a * 1000.0\n m_bio_2 = benthic.bnmas * surface_area * 0.001\n m_doc_2 = benthic.doc * vol2 * 0.001\n capacity_1 = (kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 *\n m_doc_1 + vol1)\n capacity_2 = (kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 *\n m_doc_2 + vol2)\n fw1 = vol1 / capacity_1\n fw2 = vol2 / capacity_2\n theta = capacity_2 / capacity_1\n sed_conv_factor = vol2 / fw2 / m_sed_2\n omega = benthic.d_over_dx / benthic.depth\n return fw1, fw2, theta, sed_conv_factor, omega\n",
"step-3": "<mask token>\n\n\ndef partition_benthic(reach, runoff, runoff_mass, erosion_mass):\n from .parameters import soil, stream_channel, benthic\n try:\n reach = self.region.flow_file.fetch(reach)\n q, v, l = reach.q, reach.v, reach.l\n except AttributeError:\n return None, None, (None, None)\n mean_runoff = runoff.mean()\n baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),\n where=q > mean_runoff)\n total_flow = runoff + baseflow\n mixing_cell = 40.0\n cross_section = total_flow / v\n width = stream_channel.a * np.power(cross_section, stream_channel.b)\n depth = cross_section / width\n surface_area = width * l\n volume = np.array([depth * surface_area, benthic.depth * surface_area *\n benthic.porosity])\n runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.\n n_dates), where=runoff != 0)\n daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.\n zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (\n mixing_cell > 0.0))\n mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,\n soil.prben * erosion_mass]).T\n fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,\n surface_area, self.i.koc)\n k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +\n self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.\n deg_benthic * (1 - fw1))\n aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.\n n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,\n theta, self.i.deg_aqueous)\n return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,\n daily_avg, daily_peak))\n\n\n@njit\ndef concentration_loop(n_dates, daily_concentration, k_adj, daily_volume,\n mass_input, fw1, fw2, omega, theta, deg_aq):\n daily_peak = np.zeros((2, n_dates))\n daily_avg = np.zeros((2, n_dates))\n aqconc_avg_wb = np.zeros(n_dates)\n exp_k = np.exp(-k_adj)\n aqconc_wb = 0\n antecedent_mass = np.zeros(2)\n for day in range(daily_concentration.size):\n daily_mass = antecedent_mass + mass_input[day]\n daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]\n daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]\n aqconc_wb += daily_concentration[day]\n aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])\n aqconc_wb *= exp_k[day]\n new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day],\n deg_aq, omega, theta[day], daily_peak[:, day])\n daily_avg[0, day] = wc_avg\n daily_avg[1, day] = benthic_avg\n antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]\n antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]\n return aqconc_avg_wb, daily_avg, daily_peak\n\n\n@njit\ndef simultaneous_diffeq(gamma1, gamma2, omega, theta, daily_aq_peak):\n \"\"\"\n ANALYTICAL SOLUTION FOR THE TWO SIMULTANEOUS DIFFERENTIAL EQNS:\n dm1/dt = Am1 + Bm2\n dm2/dt = Em1 + Fm2\n WITH INITIAL VALUES m1 AND m2 FOR m1 AND m2\n mn1 IS OUTPUT VALUE FOR m1 AFTER TIME T\n mn2 IS OUTPUT VALUE FOR m2 AFTER TIME T\n mavg1 IS AVERAGE VALUE OF m1 OVER TIME T\n \"\"\"\n t_end = 86400.0\n m1, m2 = daily_aq_peak\n a = -gamma1 - omega * theta\n b = omega * theta\n e = omega\n f = -gamma2 - omega\n af = a + f\n dif = 4 * (f * a - b * e)\n bbb = np.sqrt(af * af - dif)\n root1 = (af + bbb) / 2.0\n root2 = (af - bbb) / 2.0\n dd = (root1 - a) / b\n ee = (root2 - a) / b\n ff = ee - dd\n x1 = (ee * m1 - m2) / ff\n y1 = (m2 - dd * m1) / ff\n rt1 = root1 * t_end\n rt2 = root2 * t_end\n exrt1 = np.exp(rt1)\n exrt2 = np.exp(rt2)\n ccc = x1 * exrt1\n ddd = y1 * exrt2\n mn = np.zeros(2)\n mn[0] = ccc + ddd\n mn[1] = dd * ccc + ee * ddd\n gx = x1 / root1\n hx = y1 / root2\n term1 = gx * exrt1\n term2 = hx * exrt2\n term3 = -gx\n term4 = -hx\n mavg_wc = (term1 + term2 + term3 + term4) / t_end\n mavg_ben = (term1 * dd + term2 * ee + term3 * dd + term4 * ee) / t_end\n return mn, mavg_wc, mavg_ben\n\n\ndef solute_holding_capacity(depth, surface_area, koc):\n \"\"\"Calculates Solute Holding capacities and mass transfer between water column and benthic regions\"\"\"\n from .parameters import benthic, water_column\n vol1 = depth * surface_area\n vol2a = benthic.depth * surface_area\n vol2 = vol2a * benthic.porosity\n kow = koc / 0.35\n kpdoc1 = kow * 0.074\n kpdoc2 = koc\n xkpb = 0.436 * kow ** 0.907\n vol1a = depth[0] * surface_area\n m_sed_1 = water_column.sused * vol1a * 0.001\n m_bio_1 = water_column.plmas * vol1a * 0.001\n m_doc_1 = water_column.doc * vol1a * 0.001\n kd_sed_1 = koc * water_column.froc * 0.001\n kd_sed_2 = koc * benthic.froc * 0.001\n kd_bio = xkpb / 1000.0\n kd_doc_1 = kpdoc1 / 1000.0\n kd_doc_2 = kpdoc2 / 1000.0\n m_sed_2 = benthic.bulk_density * vol2a * 1000.0\n m_bio_2 = benthic.bnmas * surface_area * 0.001\n m_doc_2 = benthic.doc * vol2 * 0.001\n capacity_1 = (kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 *\n m_doc_1 + vol1)\n capacity_2 = (kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 *\n m_doc_2 + vol2)\n fw1 = vol1 / capacity_1\n fw2 = vol2 / capacity_2\n theta = capacity_2 / capacity_1\n sed_conv_factor = vol2 / fw2 / m_sed_2\n omega = benthic.d_over_dx / benthic.depth\n return fw1, fw2, theta, sed_conv_factor, omega\n",
"step-4": "benthic_params = {'depth': 0.05, 'porosity': 0.65, 'bulk_density': 1,\n 'froc': 0, 'doc': 5, 'bnmas': 0, 'd_over_dx': 1}\n\n\ndef partition_benthic(reach, runoff, runoff_mass, erosion_mass):\n from .parameters import soil, stream_channel, benthic\n try:\n reach = self.region.flow_file.fetch(reach)\n q, v, l = reach.q, reach.v, reach.l\n except AttributeError:\n return None, None, (None, None)\n mean_runoff = runoff.mean()\n baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates),\n where=q > mean_runoff)\n total_flow = runoff + baseflow\n mixing_cell = 40.0\n cross_section = total_flow / v\n width = stream_channel.a * np.power(cross_section, stream_channel.b)\n depth = cross_section / width\n surface_area = width * l\n volume = np.array([depth * surface_area, benthic.depth * surface_area *\n benthic.porosity])\n runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.\n n_dates), where=runoff != 0)\n daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.\n zeros(self.i.n_dates), where=(runoff_mass + erosion_mass > 0.0) & (\n mixing_cell > 0.0))\n mass_input = np.vstack([runoff_mass + (1.0 - soil.prben) * erosion_mass,\n soil.prben * erosion_mass]).T\n fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth,\n surface_area, self.i.koc)\n k_adj = np.array(total_flow / mixing_cell + (self.i.deg_photolysis +\n self.i.deg_hydrolysis) * fw1 + self.i.deg_wc * fw1 + self.i.\n deg_benthic * (1 - fw1))\n aqconc_avg_wb, daily_avg, daily_peak = concentration_loop(self.i.\n n_dates, daily_conc, k_adj, volume, mass_input, fw1, fw2, omega,\n theta, self.i.deg_aqueous)\n return map(lambda x: x * 1000000.0, (runoff_conc, aqconc_avg_wb,\n daily_avg, daily_peak))\n\n\n@njit\ndef concentration_loop(n_dates, daily_concentration, k_adj, daily_volume,\n mass_input, fw1, fw2, omega, theta, deg_aq):\n daily_peak = np.zeros((2, n_dates))\n daily_avg = np.zeros((2, n_dates))\n aqconc_avg_wb = np.zeros(n_dates)\n exp_k = np.exp(-k_adj)\n aqconc_wb = 0\n antecedent_mass = np.zeros(2)\n for day in range(daily_concentration.size):\n daily_mass = antecedent_mass + mass_input[day]\n daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]\n daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]\n aqconc_wb += daily_concentration[day]\n aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])\n aqconc_wb *= exp_k[day]\n new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day],\n deg_aq, omega, theta[day], daily_peak[:, day])\n daily_avg[0, day] = wc_avg\n daily_avg[1, day] = benthic_avg\n antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]\n antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]\n return aqconc_avg_wb, daily_avg, daily_peak\n\n\n@njit\ndef simultaneous_diffeq(gamma1, gamma2, omega, theta, daily_aq_peak):\n \"\"\"\n ANALYTICAL SOLUTION FOR THE TWO SIMULTANEOUS DIFFERENTIAL EQNS:\n dm1/dt = Am1 + Bm2\n dm2/dt = Em1 + Fm2\n WITH INITIAL VALUES m1 AND m2 FOR m1 AND m2\n mn1 IS OUTPUT VALUE FOR m1 AFTER TIME T\n mn2 IS OUTPUT VALUE FOR m2 AFTER TIME T\n mavg1 IS AVERAGE VALUE OF m1 OVER TIME T\n \"\"\"\n t_end = 86400.0\n m1, m2 = daily_aq_peak\n a = -gamma1 - omega * theta\n b = omega * theta\n e = omega\n f = -gamma2 - omega\n af = a + f\n dif = 4 * (f * a - b * e)\n bbb = np.sqrt(af * af - dif)\n root1 = (af + bbb) / 2.0\n root2 = (af - bbb) / 2.0\n dd = (root1 - a) / b\n ee = (root2 - a) / b\n ff = ee - dd\n x1 = (ee * m1 - m2) / ff\n y1 = (m2 - dd * m1) / ff\n rt1 = root1 * t_end\n rt2 = root2 * t_end\n exrt1 = np.exp(rt1)\n exrt2 = np.exp(rt2)\n ccc = x1 * exrt1\n ddd = y1 * exrt2\n mn = np.zeros(2)\n mn[0] = ccc + ddd\n mn[1] = dd * ccc + ee * ddd\n gx = x1 / root1\n hx = y1 / root2\n term1 = gx * exrt1\n term2 = hx * exrt2\n term3 = -gx\n term4 = -hx\n mavg_wc = (term1 + term2 + term3 + term4) / t_end\n mavg_ben = (term1 * dd + term2 * ee + term3 * dd + term4 * ee) / t_end\n return mn, mavg_wc, mavg_ben\n\n\ndef solute_holding_capacity(depth, surface_area, koc):\n \"\"\"Calculates Solute Holding capacities and mass transfer between water column and benthic regions\"\"\"\n from .parameters import benthic, water_column\n vol1 = depth * surface_area\n vol2a = benthic.depth * surface_area\n vol2 = vol2a * benthic.porosity\n kow = koc / 0.35\n kpdoc1 = kow * 0.074\n kpdoc2 = koc\n xkpb = 0.436 * kow ** 0.907\n vol1a = depth[0] * surface_area\n m_sed_1 = water_column.sused * vol1a * 0.001\n m_bio_1 = water_column.plmas * vol1a * 0.001\n m_doc_1 = water_column.doc * vol1a * 0.001\n kd_sed_1 = koc * water_column.froc * 0.001\n kd_sed_2 = koc * benthic.froc * 0.001\n kd_bio = xkpb / 1000.0\n kd_doc_1 = kpdoc1 / 1000.0\n kd_doc_2 = kpdoc2 / 1000.0\n m_sed_2 = benthic.bulk_density * vol2a * 1000.0\n m_bio_2 = benthic.bnmas * surface_area * 0.001\n m_doc_2 = benthic.doc * vol2 * 0.001\n capacity_1 = (kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 *\n m_doc_1 + vol1)\n capacity_2 = (kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 *\n m_doc_2 + vol2)\n fw1 = vol1 / capacity_1\n fw2 = vol2 / capacity_2\n theta = capacity_2 / capacity_1\n sed_conv_factor = vol2 / fw2 / m_sed_2\n omega = benthic.d_over_dx / benthic.depth\n return fw1, fw2, theta, sed_conv_factor, omega\n",
"step-5": "# Benthic Parameters - USEPA OPP defaults from EXAMS\nbenthic_params = {\n \"depth\": 0.05, # benthic depth (m)\n \"porosity\": 0.65, # benthic porosity\n \"bulk_density\": 1, # bulk density, dry solid mass/total vol (g/cm3)\n \"froc\": 0, # benthic organic carbon fraction\n \"doc\": 5, # benthic dissolved organic carbon content (mg/L)\n \"bnmas\": 0, # benthic biomass intensity (g/m2)\n \"d_over_dx\": 1 # mass transfer coefficient for exchange between benthic and water column (m/s)\n # (can be modified later if data exists)\n}\ndef partition_benthic(reach, runoff, runoff_mass, erosion_mass):\n from .parameters import soil, stream_channel, benthic\n\n try:\n reach = self.region.flow_file.fetch(reach)\n q, v, l = reach.q, reach.v, reach.l\n except AttributeError:\n return None, None, (None, None)\n\n mean_runoff = runoff.mean() # m3/d\n baseflow = np.subtract(q, mean_runoff, out=np.zeros(self.i.n_dates), where=(q > mean_runoff))\n total_flow = runoff + baseflow\n mixing_cell = 40. # meters\n cross_section = total_flow / v\n width = stream_channel.a * np.power(cross_section, stream_channel.b)\n depth = cross_section / width\n surface_area = width * l\n volume = np.array([(depth * surface_area), # Water column\n (benthic.depth * surface_area * benthic.porosity)]) # Benthic zone\n\n # Compute concentration in runoff of runoff mass and erosion mass\n runoff_conc = np.divide(runoff_mass, runoff, out=np.zeros(self.i.n_dates), where=(runoff != 0))\n daily_conc = np.divide(runoff_mass + erosion_mass, mixing_cell, out=np.zeros(self.i.n_dates),\n where=(runoff_mass + erosion_mass > 0.0) & (mixing_cell > 0.0))\n\n # Divide mass loading between water column and benthic zones\n mass_input = np.vstack([runoff_mass + ((1. - soil.prben) * erosion_mass), # Water Column\n soil.prben * erosion_mass]).T # Benthic\n # Partition concentration into benthic and water column concentrations\n # This needs to be cleaned up\n # Compute benthic solute holding capacity\n fw1, fw2, theta, sed_conv_factor, omega = solute_holding_capacity(depth, surface_area, self.i.koc)\n\n k_adj = np.array((total_flow / mixing_cell) + (self.i.deg_photolysis + self.i.deg_hydrolysis) * fw1 + \\\n (self.i.deg_wc * fw1) + self.i.deg_benthic * (1 - fw1))\n\n aqconc_avg_wb, daily_avg, daily_peak = \\\n concentration_loop(self.i.n_dates, daily_conc, k_adj, volume,\n mass_input, fw1, fw2, omega, theta, self.i.deg_aqueous)\n\n return map(lambda x: x * 1000000., (runoff_conc, aqconc_avg_wb, daily_avg, daily_peak))\n\n@njit\ndef concentration_loop(n_dates, daily_concentration, k_adj, daily_volume, mass_input, fw1, fw2, omega, theta, deg_aq):\n # Beginning day aquatic concentrations, considered Peak Aqueous Daily Conc in Water Column\n daily_peak = np.zeros((2, n_dates))\n daily_avg = np.zeros((2, n_dates))\n aqconc_avg_wb = np.zeros(n_dates)\n\n # Reset starting values\n exp_k = np.exp(-k_adj)\n aqconc_wb = 0\n antecedent_mass = np.zeros(2) # mn\n\n for day in range(daily_concentration.size):\n # Add mass input to antecedent mass\n daily_mass = antecedent_mass + mass_input[day]\n\n # Convert to aqueous concentrations (peak) at beginning of day\n # JCH - fw comes from solute_holding_capacity. Fraction going into each section. Should fw[0] + fw[1] = 1?\n daily_peak[0, day] = daily_mass[0] * fw1[day] / daily_volume[day, 0]\n daily_peak[1, day] = daily_mass[1] * fw2[day] / daily_volume[day, 1]\n\n # Compute daily average concentration in the water body - when no Benthic layer considered\n aqconc_wb += daily_concentration[day] # initial water body concentration for current time step\n\n # Daily avg aq conc in water body, area under curve/t = Ci/k*(1-e^-k), NO benthic\n aqconc_avg_wb[day] = aqconc_wb / k_adj[day] * (1 - exp_k[day])\n\n # initial water body concentration for next time step\n aqconc_wb *= exp_k[day]\n\n # For simul diffeq soln: mn1,mn2,mavg1,mavg2 = new_aqconc1, new_aqconc2, aqconc_avg1[d], aqconc_avg2[d]\n # Note: aqconc_avg1 and aqconc_avg2 are outputted - Daily avg aq conc in WC and Benthic regions\n new_aqconc, wc_avg, benthic_avg = simultaneous_diffeq(k_adj[day], deg_aq, omega, theta[day], daily_peak[:, day])\n daily_avg[0, day] = wc_avg\n daily_avg[1, day] = benthic_avg\n\n # Masses m1 and m2 after time step, t_end\n antecedent_mass[0] = new_aqconc[0] / fw1[day] * daily_volume[day, 0]\n antecedent_mass[1] = new_aqconc[1] / fw2[day] * daily_volume[day, 1]\n\n return aqconc_avg_wb, daily_avg, daily_peak\n@njit\ndef simultaneous_diffeq(gamma1, gamma2, omega, theta, daily_aq_peak):\n \"\"\"\n ANALYTICAL SOLUTION FOR THE TWO SIMULTANEOUS DIFFERENTIAL EQNS:\n dm1/dt = Am1 + Bm2\n dm2/dt = Em1 + Fm2\n WITH INITIAL VALUES m1 AND m2 FOR m1 AND m2\n mn1 IS OUTPUT VALUE FOR m1 AFTER TIME T\n mn2 IS OUTPUT VALUE FOR m2 AFTER TIME T\n mavg1 IS AVERAGE VALUE OF m1 OVER TIME T\n \"\"\"\n\n t_end = 86400. # seconds, time step of ONE DAY\n m1, m2 = daily_aq_peak\n\n # Calculate constants for simultaneous_diffeq: A,B,E,F\n # This reduces the model equivalent parameters to the coefficients needed for solving simultaneous_diffeq\n a = -gamma1 - omega * theta\n b = omega * theta\n e = omega\n f = -gamma2 - omega\n\n af = a + f\n dif = 4 * ((f * a) - (b * e))\n bbb = np.sqrt(af * af - dif)\n\n root1 = (af + bbb) / 2.\n root2 = (af - bbb) / 2.\n\n dd = (root1 - a) / b\n ee = (root2 - a) / b\n ff = ee - dd\n x1 = (ee * m1 - m2) / ff\n y1 = (m2 - dd * m1) / ff\n\n # Calculate new concentrations for next step\n rt1 = root1 * t_end\n rt2 = root2 * t_end\n exrt1 = np.exp(rt1)\n exrt2 = np.exp(rt2)\n ccc = x1 * exrt1\n ddd = y1 * exrt2\n\n # values for m1 and m2 after time step t_end\n mn = np.zeros(2)\n mn[0] = ccc + ddd # Water column\n mn[1] = dd * ccc + ee * ddd # Benthic\n\n # AVERAGE DAILY CONCENTRATION SOLUTION: set up for daily average, but can be changed by adjusting time step\n gx = x1 / root1\n hx = y1 / root2\n\n term1 = gx * exrt1 # term3 = -X1/root1*exp(root1*T1)\n term2 = hx * exrt2 # term4 = -Y1/root2*exp(root2*T1\n term3 = -gx\n term4 = -hx\n\n mavg_wc = (term1 + term2 + term3 + term4) / t_end # Water column\n mavg_ben = (term1 * dd + term2 * ee + term3 * dd + term4 * ee) / t_end # Benthic\n\n return mn, mavg_wc, mavg_ben\n\n\ndef solute_holding_capacity(depth, surface_area, koc):\n \"\"\"Calculates Solute Holding capacities and mass transfer between water column and benthic regions\"\"\"\n\n from .parameters import benthic, water_column\n\n # Aqueous volumes in each region\n vol1 = depth * surface_area # total volume in water column, approximately equal to water volume alone\n vol2a = benthic.depth * surface_area # total benthic volume\n vol2 = vol2a * benthic.porosity # total benthic pore water volume\n\n # Default EXAMS conditions for partitioning\n kow = koc / .35 # DEFAULT EXAMS CONDITION ON Kow p.35\n kpdoc1 = kow * .074 # DEFAULT RELATION IN EXAMS (LITTORAL)\n kpdoc2 = koc # DEFAULT RELATION IN EXAMS (BENTHIC) p.16 of EXAMS 2.98 (or is it Kow*.46 ?)\n xkpb = 0.436 * kow ** .907 # DEFAULT RELATION IN EXAMS\n\n # mass in littoral region\n vol1a = depth[0] * surface_area # initial volume corresponding with suspended matter reference\n m_sed_1 = water_column.sused * vol1a * .001 # SEDIMENT MASS LITTORAL\n m_bio_1 = water_column.plmas * vol1a * .001 # BIOLOGICAL MASS LITTORAL\n m_doc_1 = water_column.doc * vol1a * .001 # DOC MASS LITTORAL\n\n # partitioning coefficients of individual media\n kd_sed_1 = koc * water_column.froc * .001 # Kd of sediment in littoral [m3/kg]\n kd_sed_2 = koc * benthic.froc * .001 # Kd of sediment in benthic\n kd_bio = xkpb / 1000. # Kd of biological organisms\n kd_doc_1 = kpdoc1 / 1000. # Kd of DOC in littoral region\n kd_doc_2 = kpdoc2 / 1000. # Kd of DOC in benthic region\n\n # mass in benthic region\n m_sed_2 = benthic.bulk_density * vol2a * 1000. # as defined by EXAMS parameters m_sed_2 = BULKD/PCTWA*VOL2*100000.\n m_bio_2 = benthic.bnmas * surface_area * .001\n m_doc_2 = benthic.doc * vol2 * .001\n\n # solute holding capacity in regions 1 and 2\n capacity_1 = kd_sed_1 * m_sed_1 + kd_bio * m_bio_1 + kd_doc_1 * m_doc_1 + vol1\n capacity_2 = kd_sed_2 * m_sed_2 + kd_bio * m_bio_2 + kd_doc_2 * m_doc_2 + vol2\n\n # Fraction going to water column and benthic\n fw1 = vol1 / capacity_1 # fw1 is daily, vol1 is daily\n fw2 = vol2 / capacity_2\n\n theta = capacity_2 / capacity_1\n\n sed_conv_factor = vol2 / fw2 / m_sed_2 # converts pore water to [Total Conc normalized to sed mass]\n\n # Omega mass transfer - Calculates littoral to benthic mass transfer coefficient\n omega = benthic.d_over_dx / benthic.depth # (m3/hr)/(3600 s/hr)\n\n return fw1, fw2, theta, sed_conv_factor, omega\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from adventurelib import *
from horror import *
from dating import *
from popquiz import *
from comedy import *
from island import *
start()
|
normal
|
{
"blob_id": "8a37299154aded37147e1650cbf52a5cdf7d91da",
"index": 4225,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nstart()\n",
"step-3": "from adventurelib import *\nfrom horror import *\nfrom dating import *\nfrom popquiz import *\nfrom comedy import *\nfrom island import *\nstart()\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
""" Compiled: 2020-09-18 10:38:52 """
#__src_file__ = "extensions/AppWorkspaceTools/etc/FAppWorkspaceDesignerNodes.py"
""" Compiled: 2018-06-07 17:06:19 """
#__src_file__ = "extensions/AppWorkspaceTools/etc/FAppWorkspaceDesignerNodes.py"
import acm
import FUxCore
import Contracts_AppConfig_Messages_AppWorkspace as AppWorkspace
class NodeBase():
def __init__(self, icon=''):
self.icon = icon
def Label(self):
raise NotImplementedError('Label')
def Icon(self):
return self.icon
class WorkspaceNode(NodeBase):
def __init__(self, workspace, label):
NodeBase.__init__(self, 'FWorkspace')
self.contents = workspace
self.label = label
def Label(self):
return self.label
def OnSelection(self, treePanel):
# TODO Don't navigate to siblings, go through parent
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupWorkspaceControls()
class DashboardTabNode(NodeBase):
def __init__(self, tabContent):
NodeBase.__init__(self, 'WindowSwitch')
self.tabContent = tabContent
self.contents = self.Contents()
self.userSettings = self.Settings()
def Label(self):
label = self.tabContent.caption.encode('utf-8')
return label or '<Dashboard>'
def Contents(self):
contents = AppWorkspace.DashboardContent()
contents.ParseFromString(self.tabContent.contents)
return contents
def Settings(self):
userSettings = AppWorkspace.DashboardSettings()
userSettings.ParseFromString(self.tabContent.userSettings)
return userSettings
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupDashboardTabControls()
class WorkbenchTabNode(NodeBase):
def __init__(self, tabContent):
NodeBase.__init__(self, 'Layout')
self.tabContent = tabContent
self.contents = self.Contents()
self.userSettings = self.Settings()
def Label(self):
label = self.tabContent.caption.encode('utf-8')
return label or '<Workbench>'
def Contents(self):
contents = AppWorkspace.WorkbenchContent()
contents.ParseFromString(self.tabContent.contents)
return contents
def Settings(self):
userSettings = AppWorkspace.WorkbenchSettings()
userSettings.ParseFromString(self.tabContent.userSettings)
return userSettings
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.tabContent
workspacePanel.SetupWorkbenchTabControls()
class DashboardPartNode(NodeBase):
def __init__(self, part, settings, label=None):
NodeBase.__init__(self, 'FExtension')
self.part = part
self.settings = settings
def Label(self):
v = self.part.view
label = v.caption if v.HasField('caption') and v.caption else v.viewName
return label.encode('utf-8') or '<Part>'
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self
workspacePanel.SetupDashboardPartControls()
class DockPartNode(NodeBase):
def __init__(self, part):
NodeBase.__init__(self, 'FExtension')
self.part = part
def Label(self):
v = self.part.view
label = v.caption if v.HasField('caption') and v.caption else v.viewName
return label.encode('utf-8') or '<Part>'
def GetButtons(self):
return self.part.selectionActionButtons
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.part
workspacePanel.SetupDockSectionPartControls()
class MainViewNode(NodeBase):
def __init__(self, view):
NodeBase.__init__(self, 'DisplayTabs')
self.view = view
def Label(self):
return 'Main View'
def OnSelection(self, treePanel):
workspace = treePanel.parent.workspacePanel
workspace.nodeData = self.view
workspace.SetupMainViewControls()
class DockSectionNode(NodeBase):
def __init__(self, label, icon, contents, settings):
NodeBase.__init__(self, icon)
self.label = label
self.contents = contents
self.settings = settings
def Label(self):
return self.label
def OnSelection(self, treePanel):
workspace = treePanel.parent.workspacePanel
workspace.nodeData = self
workspace.SetupDockSectionControls()
class ToolbarNode(NodeBase):
def __init__(self, toolbar):
NodeBase.__init__(self, 'InstrumentAppBkg+SettingsOverlay')
self.toolbar = toolbar
def Label(self):
return 'Toolbar'
def GetButtons(self):
return self.toolbar.buttons
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.toolbar.quickOpen
workspacePanel.SetupToolbarControls()
class ButtonNode(NodeBase):
def __init__(self, button, isDockPartNode):
NodeBase.__init__(self, 'TradeEntryApp')
self.button = button
self.isDockPartNode = isDockPartNode
def Label(self):
label = self.button.HasField('label') and \
self.button.label.encode('utf-8')
return label or '<Buttons>'
def OnSelection(self, treePanel):
workspacePanel = treePanel.parent.workspacePanel
workspacePanel.nodeData = self.button
workspacePanel.SetupButtonControls(self.isDockPartNode)
|
normal
|
{
"blob_id": "f80de2b069cf1dee2e665556262c6e84ce04b208",
"index": 1244,
"step-1": "<mask token>\n\n\nclass MainViewNode(NodeBase):\n <mask token>\n\n def Label(self):\n return 'Main View'\n <mask token>\n\n\nclass DockSectionNode(NodeBase):\n\n def __init__(self, label, icon, contents, settings):\n NodeBase.__init__(self, icon)\n self.label = label\n self.contents = contents\n self.settings = settings\n\n def Label(self):\n return self.label\n\n def OnSelection(self, treePanel):\n workspace = treePanel.parent.workspacePanel\n workspace.nodeData = self\n workspace.SetupDockSectionControls()\n\n\nclass ToolbarNode(NodeBase):\n\n def __init__(self, toolbar):\n NodeBase.__init__(self, 'InstrumentAppBkg+SettingsOverlay')\n self.toolbar = toolbar\n\n def Label(self):\n return 'Toolbar'\n\n def GetButtons(self):\n return self.toolbar.buttons\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.toolbar.quickOpen\n workspacePanel.SetupToolbarControls()\n\n\nclass ButtonNode(NodeBase):\n\n def __init__(self, button, isDockPartNode):\n NodeBase.__init__(self, 'TradeEntryApp')\n self.button = button\n self.isDockPartNode = isDockPartNode\n\n def Label(self):\n label = self.button.HasField('label') and self.button.label.encode(\n 'utf-8')\n return label or '<Buttons>'\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.button\n workspacePanel.SetupButtonControls(self.isDockPartNode)\n",
"step-2": "<mask token>\n\n\nclass DashboardTabNode(NodeBase):\n <mask token>\n\n def Label(self):\n label = self.tabContent.caption.encode('utf-8')\n return label or '<Dashboard>'\n\n def Contents(self):\n contents = AppWorkspace.DashboardContent()\n contents.ParseFromString(self.tabContent.contents)\n return contents\n\n def Settings(self):\n userSettings = AppWorkspace.DashboardSettings()\n userSettings.ParseFromString(self.tabContent.userSettings)\n return userSettings\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self\n workspacePanel.SetupDashboardTabControls()\n\n\nclass WorkbenchTabNode(NodeBase):\n\n def __init__(self, tabContent):\n NodeBase.__init__(self, 'Layout')\n self.tabContent = tabContent\n self.contents = self.Contents()\n self.userSettings = self.Settings()\n\n def Label(self):\n label = self.tabContent.caption.encode('utf-8')\n return label or '<Workbench>'\n\n def Contents(self):\n contents = AppWorkspace.WorkbenchContent()\n contents.ParseFromString(self.tabContent.contents)\n return contents\n\n def Settings(self):\n userSettings = AppWorkspace.WorkbenchSettings()\n userSettings.ParseFromString(self.tabContent.userSettings)\n return userSettings\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.tabContent\n workspacePanel.SetupWorkbenchTabControls()\n\n\nclass DashboardPartNode(NodeBase):\n\n def __init__(self, part, settings, label=None):\n NodeBase.__init__(self, 'FExtension')\n self.part = part\n self.settings = settings\n\n def Label(self):\n v = self.part.view\n label = v.caption if v.HasField('caption'\n ) and v.caption else v.viewName\n return label.encode('utf-8') or '<Part>'\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self\n workspacePanel.SetupDashboardPartControls()\n\n\nclass DockPartNode(NodeBase):\n\n def __init__(self, part):\n NodeBase.__init__(self, 'FExtension')\n self.part = part\n\n def Label(self):\n v = self.part.view\n label = v.caption if v.HasField('caption'\n ) and v.caption else v.viewName\n return label.encode('utf-8') or '<Part>'\n\n def GetButtons(self):\n return self.part.selectionActionButtons\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.part\n workspacePanel.SetupDockSectionPartControls()\n\n\nclass MainViewNode(NodeBase):\n\n def __init__(self, view):\n NodeBase.__init__(self, 'DisplayTabs')\n self.view = view\n\n def Label(self):\n return 'Main View'\n\n def OnSelection(self, treePanel):\n workspace = treePanel.parent.workspacePanel\n workspace.nodeData = self.view\n workspace.SetupMainViewControls()\n\n\nclass DockSectionNode(NodeBase):\n\n def __init__(self, label, icon, contents, settings):\n NodeBase.__init__(self, icon)\n self.label = label\n self.contents = contents\n self.settings = settings\n\n def Label(self):\n return self.label\n\n def OnSelection(self, treePanel):\n workspace = treePanel.parent.workspacePanel\n workspace.nodeData = self\n workspace.SetupDockSectionControls()\n\n\nclass ToolbarNode(NodeBase):\n\n def __init__(self, toolbar):\n NodeBase.__init__(self, 'InstrumentAppBkg+SettingsOverlay')\n self.toolbar = toolbar\n\n def Label(self):\n return 'Toolbar'\n\n def GetButtons(self):\n return self.toolbar.buttons\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.toolbar.quickOpen\n workspacePanel.SetupToolbarControls()\n\n\nclass ButtonNode(NodeBase):\n\n def __init__(self, button, isDockPartNode):\n NodeBase.__init__(self, 'TradeEntryApp')\n self.button = button\n self.isDockPartNode = isDockPartNode\n\n def Label(self):\n label = self.button.HasField('label') and self.button.label.encode(\n 'utf-8')\n return label or '<Buttons>'\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.button\n workspacePanel.SetupButtonControls(self.isDockPartNode)\n",
"step-3": "<mask token>\n\n\nclass DashboardTabNode(NodeBase):\n\n def __init__(self, tabContent):\n NodeBase.__init__(self, 'WindowSwitch')\n self.tabContent = tabContent\n self.contents = self.Contents()\n self.userSettings = self.Settings()\n\n def Label(self):\n label = self.tabContent.caption.encode('utf-8')\n return label or '<Dashboard>'\n\n def Contents(self):\n contents = AppWorkspace.DashboardContent()\n contents.ParseFromString(self.tabContent.contents)\n return contents\n\n def Settings(self):\n userSettings = AppWorkspace.DashboardSettings()\n userSettings.ParseFromString(self.tabContent.userSettings)\n return userSettings\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self\n workspacePanel.SetupDashboardTabControls()\n\n\nclass WorkbenchTabNode(NodeBase):\n\n def __init__(self, tabContent):\n NodeBase.__init__(self, 'Layout')\n self.tabContent = tabContent\n self.contents = self.Contents()\n self.userSettings = self.Settings()\n\n def Label(self):\n label = self.tabContent.caption.encode('utf-8')\n return label or '<Workbench>'\n\n def Contents(self):\n contents = AppWorkspace.WorkbenchContent()\n contents.ParseFromString(self.tabContent.contents)\n return contents\n\n def Settings(self):\n userSettings = AppWorkspace.WorkbenchSettings()\n userSettings.ParseFromString(self.tabContent.userSettings)\n return userSettings\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.tabContent\n workspacePanel.SetupWorkbenchTabControls()\n\n\nclass DashboardPartNode(NodeBase):\n\n def __init__(self, part, settings, label=None):\n NodeBase.__init__(self, 'FExtension')\n self.part = part\n self.settings = settings\n\n def Label(self):\n v = self.part.view\n label = v.caption if v.HasField('caption'\n ) and v.caption else v.viewName\n return label.encode('utf-8') or '<Part>'\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self\n workspacePanel.SetupDashboardPartControls()\n\n\nclass DockPartNode(NodeBase):\n\n def __init__(self, part):\n NodeBase.__init__(self, 'FExtension')\n self.part = part\n\n def Label(self):\n v = self.part.view\n label = v.caption if v.HasField('caption'\n ) and v.caption else v.viewName\n return label.encode('utf-8') or '<Part>'\n\n def GetButtons(self):\n return self.part.selectionActionButtons\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.part\n workspacePanel.SetupDockSectionPartControls()\n\n\nclass MainViewNode(NodeBase):\n\n def __init__(self, view):\n NodeBase.__init__(self, 'DisplayTabs')\n self.view = view\n\n def Label(self):\n return 'Main View'\n\n def OnSelection(self, treePanel):\n workspace = treePanel.parent.workspacePanel\n workspace.nodeData = self.view\n workspace.SetupMainViewControls()\n\n\nclass DockSectionNode(NodeBase):\n\n def __init__(self, label, icon, contents, settings):\n NodeBase.__init__(self, icon)\n self.label = label\n self.contents = contents\n self.settings = settings\n\n def Label(self):\n return self.label\n\n def OnSelection(self, treePanel):\n workspace = treePanel.parent.workspacePanel\n workspace.nodeData = self\n workspace.SetupDockSectionControls()\n\n\nclass ToolbarNode(NodeBase):\n\n def __init__(self, toolbar):\n NodeBase.__init__(self, 'InstrumentAppBkg+SettingsOverlay')\n self.toolbar = toolbar\n\n def Label(self):\n return 'Toolbar'\n\n def GetButtons(self):\n return self.toolbar.buttons\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.toolbar.quickOpen\n workspacePanel.SetupToolbarControls()\n\n\nclass ButtonNode(NodeBase):\n\n def __init__(self, button, isDockPartNode):\n NodeBase.__init__(self, 'TradeEntryApp')\n self.button = button\n self.isDockPartNode = isDockPartNode\n\n def Label(self):\n label = self.button.HasField('label') and self.button.label.encode(\n 'utf-8')\n return label or '<Buttons>'\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.button\n workspacePanel.SetupButtonControls(self.isDockPartNode)\n",
"step-4": "<mask token>\n\n\nclass NodeBase:\n <mask token>\n\n def Label(self):\n raise NotImplementedError('Label')\n <mask token>\n\n\nclass WorkspaceNode(NodeBase):\n\n def __init__(self, workspace, label):\n NodeBase.__init__(self, 'FWorkspace')\n self.contents = workspace\n self.label = label\n\n def Label(self):\n return self.label\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self\n workspacePanel.SetupWorkspaceControls()\n\n\nclass DashboardTabNode(NodeBase):\n\n def __init__(self, tabContent):\n NodeBase.__init__(self, 'WindowSwitch')\n self.tabContent = tabContent\n self.contents = self.Contents()\n self.userSettings = self.Settings()\n\n def Label(self):\n label = self.tabContent.caption.encode('utf-8')\n return label or '<Dashboard>'\n\n def Contents(self):\n contents = AppWorkspace.DashboardContent()\n contents.ParseFromString(self.tabContent.contents)\n return contents\n\n def Settings(self):\n userSettings = AppWorkspace.DashboardSettings()\n userSettings.ParseFromString(self.tabContent.userSettings)\n return userSettings\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self\n workspacePanel.SetupDashboardTabControls()\n\n\nclass WorkbenchTabNode(NodeBase):\n\n def __init__(self, tabContent):\n NodeBase.__init__(self, 'Layout')\n self.tabContent = tabContent\n self.contents = self.Contents()\n self.userSettings = self.Settings()\n\n def Label(self):\n label = self.tabContent.caption.encode('utf-8')\n return label or '<Workbench>'\n\n def Contents(self):\n contents = AppWorkspace.WorkbenchContent()\n contents.ParseFromString(self.tabContent.contents)\n return contents\n\n def Settings(self):\n userSettings = AppWorkspace.WorkbenchSettings()\n userSettings.ParseFromString(self.tabContent.userSettings)\n return userSettings\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.tabContent\n workspacePanel.SetupWorkbenchTabControls()\n\n\nclass DashboardPartNode(NodeBase):\n\n def __init__(self, part, settings, label=None):\n NodeBase.__init__(self, 'FExtension')\n self.part = part\n self.settings = settings\n\n def Label(self):\n v = self.part.view\n label = v.caption if v.HasField('caption'\n ) and v.caption else v.viewName\n return label.encode('utf-8') or '<Part>'\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self\n workspacePanel.SetupDashboardPartControls()\n\n\nclass DockPartNode(NodeBase):\n\n def __init__(self, part):\n NodeBase.__init__(self, 'FExtension')\n self.part = part\n\n def Label(self):\n v = self.part.view\n label = v.caption if v.HasField('caption'\n ) and v.caption else v.viewName\n return label.encode('utf-8') or '<Part>'\n\n def GetButtons(self):\n return self.part.selectionActionButtons\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.part\n workspacePanel.SetupDockSectionPartControls()\n\n\nclass MainViewNode(NodeBase):\n\n def __init__(self, view):\n NodeBase.__init__(self, 'DisplayTabs')\n self.view = view\n\n def Label(self):\n return 'Main View'\n\n def OnSelection(self, treePanel):\n workspace = treePanel.parent.workspacePanel\n workspace.nodeData = self.view\n workspace.SetupMainViewControls()\n\n\nclass DockSectionNode(NodeBase):\n\n def __init__(self, label, icon, contents, settings):\n NodeBase.__init__(self, icon)\n self.label = label\n self.contents = contents\n self.settings = settings\n\n def Label(self):\n return self.label\n\n def OnSelection(self, treePanel):\n workspace = treePanel.parent.workspacePanel\n workspace.nodeData = self\n workspace.SetupDockSectionControls()\n\n\nclass ToolbarNode(NodeBase):\n\n def __init__(self, toolbar):\n NodeBase.__init__(self, 'InstrumentAppBkg+SettingsOverlay')\n self.toolbar = toolbar\n\n def Label(self):\n return 'Toolbar'\n\n def GetButtons(self):\n return self.toolbar.buttons\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.toolbar.quickOpen\n workspacePanel.SetupToolbarControls()\n\n\nclass ButtonNode(NodeBase):\n\n def __init__(self, button, isDockPartNode):\n NodeBase.__init__(self, 'TradeEntryApp')\n self.button = button\n self.isDockPartNode = isDockPartNode\n\n def Label(self):\n label = self.button.HasField('label') and self.button.label.encode(\n 'utf-8')\n return label or '<Buttons>'\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.button\n workspacePanel.SetupButtonControls(self.isDockPartNode)\n",
"step-5": "\"\"\" Compiled: 2020-09-18 10:38:52 \"\"\"\n\n#__src_file__ = \"extensions/AppWorkspaceTools/etc/FAppWorkspaceDesignerNodes.py\"\n\"\"\" Compiled: 2018-06-07 17:06:19 \"\"\"\n\n#__src_file__ = \"extensions/AppWorkspaceTools/etc/FAppWorkspaceDesignerNodes.py\"\nimport acm\nimport FUxCore\nimport Contracts_AppConfig_Messages_AppWorkspace as AppWorkspace\n\nclass NodeBase():\n def __init__(self, icon=''):\n self.icon = icon\n \n def Label(self):\n raise NotImplementedError('Label')\n \n def Icon(self):\n return self.icon\n\nclass WorkspaceNode(NodeBase):\n def __init__(self, workspace, label):\n NodeBase.__init__(self, 'FWorkspace')\n self.contents = workspace\n self.label = label\n \n def Label(self):\n return self.label\n \n def OnSelection(self, treePanel):\n # TODO Don't navigate to siblings, go through parent\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self\n workspacePanel.SetupWorkspaceControls()\n \nclass DashboardTabNode(NodeBase):\n def __init__(self, tabContent):\n NodeBase.__init__(self, 'WindowSwitch')\n self.tabContent = tabContent\n self.contents = self.Contents()\n self.userSettings = self.Settings()\n \n def Label(self):\n label = self.tabContent.caption.encode('utf-8')\n return label or '<Dashboard>'\n \n def Contents(self):\n contents = AppWorkspace.DashboardContent()\n contents.ParseFromString(self.tabContent.contents)\n return contents\n \n def Settings(self):\n userSettings = AppWorkspace.DashboardSettings()\n userSettings.ParseFromString(self.tabContent.userSettings)\n return userSettings\n \n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self\n workspacePanel.SetupDashboardTabControls()\n \nclass WorkbenchTabNode(NodeBase):\n def __init__(self, tabContent):\n NodeBase.__init__(self, 'Layout')\n self.tabContent = tabContent\n self.contents = self.Contents()\n self.userSettings = self.Settings()\n \n def Label(self):\n label = self.tabContent.caption.encode('utf-8') \n return label or '<Workbench>'\n \n def Contents(self):\n contents = AppWorkspace.WorkbenchContent()\n contents.ParseFromString(self.tabContent.contents)\n return contents\n \n def Settings(self):\n userSettings = AppWorkspace.WorkbenchSettings()\n userSettings.ParseFromString(self.tabContent.userSettings)\n return userSettings\n \n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.tabContent\n workspacePanel.SetupWorkbenchTabControls()\n \nclass DashboardPartNode(NodeBase):\n def __init__(self, part, settings, label=None):\n NodeBase.__init__(self, 'FExtension')\n self.part = part \n self.settings = settings\n \n def Label(self):\n v = self.part.view\n label = v.caption if v.HasField('caption') and v.caption else v.viewName\n return label.encode('utf-8') or '<Part>'\n \n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self\n workspacePanel.SetupDashboardPartControls()\n\nclass DockPartNode(NodeBase):\n def __init__(self, part):\n NodeBase.__init__(self, 'FExtension')\n self.part = part\n \n def Label(self):\n v = self.part.view\n label = v.caption if v.HasField('caption') and v.caption else v.viewName\n return label.encode('utf-8') or '<Part>'\n\n def GetButtons(self):\n return self.part.selectionActionButtons\n \n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.part\n workspacePanel.SetupDockSectionPartControls()\n\nclass MainViewNode(NodeBase):\n def __init__(self, view):\n NodeBase.__init__(self, 'DisplayTabs')\n self.view = view\n \n def Label(self):\n return 'Main View'\n \n def OnSelection(self, treePanel):\n workspace = treePanel.parent.workspacePanel\n workspace.nodeData = self.view\n workspace.SetupMainViewControls()\n \nclass DockSectionNode(NodeBase):\n def __init__(self, label, icon, contents, settings):\n NodeBase.__init__(self, icon)\n self.label = label\n self.contents = contents\n self.settings = settings\n \n def Label(self):\n return self.label\n \n def OnSelection(self, treePanel):\n workspace = treePanel.parent.workspacePanel\n workspace.nodeData = self\n workspace.SetupDockSectionControls()\n \nclass ToolbarNode(NodeBase):\n def __init__(self, toolbar):\n NodeBase.__init__(self, 'InstrumentAppBkg+SettingsOverlay')\n self.toolbar = toolbar\n \n def Label(self):\n return 'Toolbar'\n\n def GetButtons(self):\n return self.toolbar.buttons \n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.toolbar.quickOpen\n workspacePanel.SetupToolbarControls()\n \nclass ButtonNode(NodeBase):\n def __init__(self, button, isDockPartNode):\n NodeBase.__init__(self, 'TradeEntryApp')\n self.button = button\n self.isDockPartNode = isDockPartNode\n \n def Label(self):\n label = self.button.HasField('label') and \\\n self.button.label.encode('utf-8')\n return label or '<Buttons>'\n\n def OnSelection(self, treePanel):\n workspacePanel = treePanel.parent.workspacePanel\n workspacePanel.nodeData = self.button\n workspacePanel.SetupButtonControls(self.isDockPartNode)",
"step-ids": [
15,
37,
38,
44,
48
]
}
|
[
15,
37,
38,
44,
48
] |
# Copyright 2021 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for interoperability between JAX and pickling libraries."""
import pickle
import unittest
from absl.testing import absltest
from absl.testing import parameterized
try:
import cloudpickle
except ImportError:
cloudpickle = None
import jax
from jax import core
from jax import numpy as jnp
from jax.config import config
from jax.interpreters import pxla
from jax._src import test_util as jtu
config.parse_flags_with_absl()
class CloudpickleTest(jtu.JaxTestCase):
@unittest.skipIf(cloudpickle is None, "Requires cloudpickle")
def testPickleOfJittedFunctions(self):
@jax.jit
def f(x, y):
return x * y
@jax.jit
def g(z):
return f(z, z + 77) # noqa: F821
expected = g(32)
s = cloudpickle.dumps(g)
del f, g
g_unpickled = pickle.loads(s)
actual = g_unpickled(32)
self.assertEqual(expected, actual)
@unittest.skipIf(cloudpickle is None, "Requires cloudpickle")
def testPickleOfPmappedFunctions(self):
@jax.pmap
def f(x, y):
return x * y
@jax.pmap
def g(z):
return f(z, z + 77) # noqa: F821
expected = g(jnp.asarray([[32]]))
s = cloudpickle.dumps(g)
del f, g
g_unpickled = pickle.loads(s)
actual = g_unpickled(jnp.asarray([[32]]))
self.assertEqual(expected, actual)
class PickleTest(jtu.JaxTestCase):
def testPickleOfDeviceArray(self):
x = jnp.arange(10.0)
s = pickle.dumps(x)
y = pickle.loads(s)
self.assertArraysEqual(x, y)
self.assertIsInstance(y, type(x))
self.assertEqual(x.aval, y.aval)
def testPickleOfDeviceArrayWeakType(self):
x = jnp.array(4.0)
self.assertEqual(x.aval.weak_type, True)
s = pickle.dumps(x)
y = pickle.loads(s)
self.assertArraysEqual(x, y)
self.assertIsInstance(y, type(x))
self.assertEqual(x.aval, y.aval)
@jtu.sample_product(prng_name=['threefry2x32', 'rbg', 'unsafe_rbg'])
def testPickleOfKeyArray(self, prng_name):
with jax.default_prng_impl(prng_name):
k1 = jax.random.PRNGKey(72)
s = pickle.dumps(k1)
k2 = pickle.loads(s)
self.assertEqual(k1.dtype, k2.dtype)
self.assertArraysEqual(jax.random.key_data(k1),
jax.random.key_data(k2))
@parameterized.parameters(
(pxla.PartitionSpec(),),
(pxla.PartitionSpec(None),),
(pxla.PartitionSpec('x', None),),
(pxla.PartitionSpec(None, 'y'),),
(pxla.PartitionSpec('x', 'y'),),
(pxla.PartitionSpec(('x', 'y'),),),
)
def testPickleOfPartitionSpecs(self, partition_spec):
restored_partition_spec = pickle.loads(pickle.dumps(partition_spec))
self.assertIsInstance(restored_partition_spec, pxla.PartitionSpec)
self.assertTupleEqual(partition_spec, restored_partition_spec)
def testPickleX64(self):
with jax.experimental.enable_x64():
x = jnp.array(4.0, dtype='float64')
s = pickle.dumps(x)
with jax.experimental.disable_x64():
y = pickle.loads(s)
self.assertEqual(x.dtype, jnp.float64)
self.assertArraysEqual(x, y, check_dtypes=False)
self.assertEqual(y.dtype, jnp.float32)
self.assertEqual(y.aval.dtype, jnp.float32)
self.assertIsInstance(y, type(x))
def testPickleTracerError(self):
with self.assertRaises(core.ConcretizationTypeError):
jax.jit(pickle.dumps)(0)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
normal
|
{
"blob_id": "79c8e87e1d247eef8dd1ca8e307bbe6d25bf48e2",
"index": 8172,
"step-1": "<mask token>\n\n\nclass PickleTest(jtu.JaxTestCase):\n\n def testPickleOfDeviceArray(self):\n x = jnp.arange(10.0)\n s = pickle.dumps(x)\n y = pickle.loads(s)\n self.assertArraysEqual(x, y)\n self.assertIsInstance(y, type(x))\n self.assertEqual(x.aval, y.aval)\n\n def testPickleOfDeviceArrayWeakType(self):\n x = jnp.array(4.0)\n self.assertEqual(x.aval.weak_type, True)\n s = pickle.dumps(x)\n y = pickle.loads(s)\n self.assertArraysEqual(x, y)\n self.assertIsInstance(y, type(x))\n self.assertEqual(x.aval, y.aval)\n\n @jtu.sample_product(prng_name=['threefry2x32', 'rbg', 'unsafe_rbg'])\n def testPickleOfKeyArray(self, prng_name):\n with jax.default_prng_impl(prng_name):\n k1 = jax.random.PRNGKey(72)\n s = pickle.dumps(k1)\n k2 = pickle.loads(s)\n self.assertEqual(k1.dtype, k2.dtype)\n self.assertArraysEqual(jax.random.key_data(k1), jax.random.\n key_data(k2))\n\n @parameterized.parameters((pxla.PartitionSpec(),), (pxla.PartitionSpec(\n None),), (pxla.PartitionSpec('x', None),), (pxla.PartitionSpec(None,\n 'y'),), (pxla.PartitionSpec('x', 'y'),), (pxla.PartitionSpec(('x',\n 'y')),))\n def testPickleOfPartitionSpecs(self, partition_spec):\n restored_partition_spec = pickle.loads(pickle.dumps(partition_spec))\n self.assertIsInstance(restored_partition_spec, pxla.PartitionSpec)\n self.assertTupleEqual(partition_spec, restored_partition_spec)\n\n def testPickleX64(self):\n with jax.experimental.enable_x64():\n x = jnp.array(4.0, dtype='float64')\n s = pickle.dumps(x)\n with jax.experimental.disable_x64():\n y = pickle.loads(s)\n self.assertEqual(x.dtype, jnp.float64)\n self.assertArraysEqual(x, y, check_dtypes=False)\n self.assertEqual(y.dtype, jnp.float32)\n self.assertEqual(y.aval.dtype, jnp.float32)\n self.assertIsInstance(y, type(x))\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CloudpickleTest(jtu.JaxTestCase):\n <mask token>\n\n @unittest.skipIf(cloudpickle is None, 'Requires cloudpickle')\n def testPickleOfPmappedFunctions(self):\n\n @jax.pmap\n def f(x, y):\n return x * y\n\n @jax.pmap\n def g(z):\n return f(z, z + 77)\n expected = g(jnp.asarray([[32]]))\n s = cloudpickle.dumps(g)\n del f, g\n g_unpickled = pickle.loads(s)\n actual = g_unpickled(jnp.asarray([[32]]))\n self.assertEqual(expected, actual)\n\n\nclass PickleTest(jtu.JaxTestCase):\n\n def testPickleOfDeviceArray(self):\n x = jnp.arange(10.0)\n s = pickle.dumps(x)\n y = pickle.loads(s)\n self.assertArraysEqual(x, y)\n self.assertIsInstance(y, type(x))\n self.assertEqual(x.aval, y.aval)\n\n def testPickleOfDeviceArrayWeakType(self):\n x = jnp.array(4.0)\n self.assertEqual(x.aval.weak_type, True)\n s = pickle.dumps(x)\n y = pickle.loads(s)\n self.assertArraysEqual(x, y)\n self.assertIsInstance(y, type(x))\n self.assertEqual(x.aval, y.aval)\n\n @jtu.sample_product(prng_name=['threefry2x32', 'rbg', 'unsafe_rbg'])\n def testPickleOfKeyArray(self, prng_name):\n with jax.default_prng_impl(prng_name):\n k1 = jax.random.PRNGKey(72)\n s = pickle.dumps(k1)\n k2 = pickle.loads(s)\n self.assertEqual(k1.dtype, k2.dtype)\n self.assertArraysEqual(jax.random.key_data(k1), jax.random.\n key_data(k2))\n\n @parameterized.parameters((pxla.PartitionSpec(),), (pxla.PartitionSpec(\n None),), (pxla.PartitionSpec('x', None),), (pxla.PartitionSpec(None,\n 'y'),), (pxla.PartitionSpec('x', 'y'),), (pxla.PartitionSpec(('x',\n 'y')),))\n def testPickleOfPartitionSpecs(self, partition_spec):\n restored_partition_spec = pickle.loads(pickle.dumps(partition_spec))\n self.assertIsInstance(restored_partition_spec, pxla.PartitionSpec)\n self.assertTupleEqual(partition_spec, restored_partition_spec)\n\n def testPickleX64(self):\n with jax.experimental.enable_x64():\n x = jnp.array(4.0, dtype='float64')\n s = pickle.dumps(x)\n with jax.experimental.disable_x64():\n y = pickle.loads(s)\n self.assertEqual(x.dtype, jnp.float64)\n self.assertArraysEqual(x, y, check_dtypes=False)\n self.assertEqual(y.dtype, jnp.float32)\n self.assertEqual(y.aval.dtype, jnp.float32)\n self.assertIsInstance(y, type(x))\n\n def testPickleTracerError(self):\n with self.assertRaises(core.ConcretizationTypeError):\n jax.jit(pickle.dumps)(0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CloudpickleTest(jtu.JaxTestCase):\n\n @unittest.skipIf(cloudpickle is None, 'Requires cloudpickle')\n def testPickleOfJittedFunctions(self):\n\n @jax.jit\n def f(x, y):\n return x * y\n\n @jax.jit\n def g(z):\n return f(z, z + 77)\n expected = g(32)\n s = cloudpickle.dumps(g)\n del f, g\n g_unpickled = pickle.loads(s)\n actual = g_unpickled(32)\n self.assertEqual(expected, actual)\n\n @unittest.skipIf(cloudpickle is None, 'Requires cloudpickle')\n def testPickleOfPmappedFunctions(self):\n\n @jax.pmap\n def f(x, y):\n return x * y\n\n @jax.pmap\n def g(z):\n return f(z, z + 77)\n expected = g(jnp.asarray([[32]]))\n s = cloudpickle.dumps(g)\n del f, g\n g_unpickled = pickle.loads(s)\n actual = g_unpickled(jnp.asarray([[32]]))\n self.assertEqual(expected, actual)\n\n\nclass PickleTest(jtu.JaxTestCase):\n\n def testPickleOfDeviceArray(self):\n x = jnp.arange(10.0)\n s = pickle.dumps(x)\n y = pickle.loads(s)\n self.assertArraysEqual(x, y)\n self.assertIsInstance(y, type(x))\n self.assertEqual(x.aval, y.aval)\n\n def testPickleOfDeviceArrayWeakType(self):\n x = jnp.array(4.0)\n self.assertEqual(x.aval.weak_type, True)\n s = pickle.dumps(x)\n y = pickle.loads(s)\n self.assertArraysEqual(x, y)\n self.assertIsInstance(y, type(x))\n self.assertEqual(x.aval, y.aval)\n\n @jtu.sample_product(prng_name=['threefry2x32', 'rbg', 'unsafe_rbg'])\n def testPickleOfKeyArray(self, prng_name):\n with jax.default_prng_impl(prng_name):\n k1 = jax.random.PRNGKey(72)\n s = pickle.dumps(k1)\n k2 = pickle.loads(s)\n self.assertEqual(k1.dtype, k2.dtype)\n self.assertArraysEqual(jax.random.key_data(k1), jax.random.\n key_data(k2))\n\n @parameterized.parameters((pxla.PartitionSpec(),), (pxla.PartitionSpec(\n None),), (pxla.PartitionSpec('x', None),), (pxla.PartitionSpec(None,\n 'y'),), (pxla.PartitionSpec('x', 'y'),), (pxla.PartitionSpec(('x',\n 'y')),))\n def testPickleOfPartitionSpecs(self, partition_spec):\n restored_partition_spec = pickle.loads(pickle.dumps(partition_spec))\n self.assertIsInstance(restored_partition_spec, pxla.PartitionSpec)\n self.assertTupleEqual(partition_spec, restored_partition_spec)\n\n def testPickleX64(self):\n with jax.experimental.enable_x64():\n x = jnp.array(4.0, dtype='float64')\n s = pickle.dumps(x)\n with jax.experimental.disable_x64():\n y = pickle.loads(s)\n self.assertEqual(x.dtype, jnp.float64)\n self.assertArraysEqual(x, y, check_dtypes=False)\n self.assertEqual(y.dtype, jnp.float32)\n self.assertEqual(y.aval.dtype, jnp.float32)\n self.assertIsInstance(y, type(x))\n\n def testPickleTracerError(self):\n with self.assertRaises(core.ConcretizationTypeError):\n jax.jit(pickle.dumps)(0)\n\n\n<mask token>\n",
"step-4": "<mask token>\ntry:\n import cloudpickle\nexcept ImportError:\n cloudpickle = None\n<mask token>\nconfig.parse_flags_with_absl()\n\n\nclass CloudpickleTest(jtu.JaxTestCase):\n\n @unittest.skipIf(cloudpickle is None, 'Requires cloudpickle')\n def testPickleOfJittedFunctions(self):\n\n @jax.jit\n def f(x, y):\n return x * y\n\n @jax.jit\n def g(z):\n return f(z, z + 77)\n expected = g(32)\n s = cloudpickle.dumps(g)\n del f, g\n g_unpickled = pickle.loads(s)\n actual = g_unpickled(32)\n self.assertEqual(expected, actual)\n\n @unittest.skipIf(cloudpickle is None, 'Requires cloudpickle')\n def testPickleOfPmappedFunctions(self):\n\n @jax.pmap\n def f(x, y):\n return x * y\n\n @jax.pmap\n def g(z):\n return f(z, z + 77)\n expected = g(jnp.asarray([[32]]))\n s = cloudpickle.dumps(g)\n del f, g\n g_unpickled = pickle.loads(s)\n actual = g_unpickled(jnp.asarray([[32]]))\n self.assertEqual(expected, actual)\n\n\nclass PickleTest(jtu.JaxTestCase):\n\n def testPickleOfDeviceArray(self):\n x = jnp.arange(10.0)\n s = pickle.dumps(x)\n y = pickle.loads(s)\n self.assertArraysEqual(x, y)\n self.assertIsInstance(y, type(x))\n self.assertEqual(x.aval, y.aval)\n\n def testPickleOfDeviceArrayWeakType(self):\n x = jnp.array(4.0)\n self.assertEqual(x.aval.weak_type, True)\n s = pickle.dumps(x)\n y = pickle.loads(s)\n self.assertArraysEqual(x, y)\n self.assertIsInstance(y, type(x))\n self.assertEqual(x.aval, y.aval)\n\n @jtu.sample_product(prng_name=['threefry2x32', 'rbg', 'unsafe_rbg'])\n def testPickleOfKeyArray(self, prng_name):\n with jax.default_prng_impl(prng_name):\n k1 = jax.random.PRNGKey(72)\n s = pickle.dumps(k1)\n k2 = pickle.loads(s)\n self.assertEqual(k1.dtype, k2.dtype)\n self.assertArraysEqual(jax.random.key_data(k1), jax.random.\n key_data(k2))\n\n @parameterized.parameters((pxla.PartitionSpec(),), (pxla.PartitionSpec(\n None),), (pxla.PartitionSpec('x', None),), (pxla.PartitionSpec(None,\n 'y'),), (pxla.PartitionSpec('x', 'y'),), (pxla.PartitionSpec(('x',\n 'y')),))\n def testPickleOfPartitionSpecs(self, partition_spec):\n restored_partition_spec = pickle.loads(pickle.dumps(partition_spec))\n self.assertIsInstance(restored_partition_spec, pxla.PartitionSpec)\n self.assertTupleEqual(partition_spec, restored_partition_spec)\n\n def testPickleX64(self):\n with jax.experimental.enable_x64():\n x = jnp.array(4.0, dtype='float64')\n s = pickle.dumps(x)\n with jax.experimental.disable_x64():\n y = pickle.loads(s)\n self.assertEqual(x.dtype, jnp.float64)\n self.assertArraysEqual(x, y, check_dtypes=False)\n self.assertEqual(y.dtype, jnp.float32)\n self.assertEqual(y.aval.dtype, jnp.float32)\n self.assertIsInstance(y, type(x))\n\n def testPickleTracerError(self):\n with self.assertRaises(core.ConcretizationTypeError):\n jax.jit(pickle.dumps)(0)\n\n\nif __name__ == '__main__':\n absltest.main(testLoader=jtu.JaxTestLoader())\n",
"step-5": "# Copyright 2021 The JAX Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for interoperability between JAX and pickling libraries.\"\"\"\n\nimport pickle\nimport unittest\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n\ntry:\n import cloudpickle\nexcept ImportError:\n cloudpickle = None\n\nimport jax\nfrom jax import core\nfrom jax import numpy as jnp\nfrom jax.config import config\nfrom jax.interpreters import pxla\nfrom jax._src import test_util as jtu\n\nconfig.parse_flags_with_absl()\n\n\nclass CloudpickleTest(jtu.JaxTestCase):\n\n @unittest.skipIf(cloudpickle is None, \"Requires cloudpickle\")\n def testPickleOfJittedFunctions(self):\n\n @jax.jit\n def f(x, y):\n return x * y\n\n @jax.jit\n def g(z):\n return f(z, z + 77) # noqa: F821\n\n expected = g(32)\n s = cloudpickle.dumps(g)\n del f, g\n\n g_unpickled = pickle.loads(s)\n actual = g_unpickled(32)\n self.assertEqual(expected, actual)\n\n @unittest.skipIf(cloudpickle is None, \"Requires cloudpickle\")\n def testPickleOfPmappedFunctions(self):\n\n @jax.pmap\n def f(x, y):\n return x * y\n\n @jax.pmap\n def g(z):\n return f(z, z + 77) # noqa: F821\n\n expected = g(jnp.asarray([[32]]))\n s = cloudpickle.dumps(g)\n del f, g\n\n g_unpickled = pickle.loads(s)\n actual = g_unpickled(jnp.asarray([[32]]))\n self.assertEqual(expected, actual)\n\n\nclass PickleTest(jtu.JaxTestCase):\n\n def testPickleOfDeviceArray(self):\n x = jnp.arange(10.0)\n s = pickle.dumps(x)\n y = pickle.loads(s)\n self.assertArraysEqual(x, y)\n self.assertIsInstance(y, type(x))\n self.assertEqual(x.aval, y.aval)\n\n def testPickleOfDeviceArrayWeakType(self):\n x = jnp.array(4.0)\n self.assertEqual(x.aval.weak_type, True)\n s = pickle.dumps(x)\n y = pickle.loads(s)\n self.assertArraysEqual(x, y)\n self.assertIsInstance(y, type(x))\n self.assertEqual(x.aval, y.aval)\n\n @jtu.sample_product(prng_name=['threefry2x32', 'rbg', 'unsafe_rbg'])\n def testPickleOfKeyArray(self, prng_name):\n with jax.default_prng_impl(prng_name):\n k1 = jax.random.PRNGKey(72)\n s = pickle.dumps(k1)\n k2 = pickle.loads(s)\n self.assertEqual(k1.dtype, k2.dtype)\n self.assertArraysEqual(jax.random.key_data(k1),\n jax.random.key_data(k2))\n\n @parameterized.parameters(\n (pxla.PartitionSpec(),),\n (pxla.PartitionSpec(None),),\n (pxla.PartitionSpec('x', None),),\n (pxla.PartitionSpec(None, 'y'),),\n (pxla.PartitionSpec('x', 'y'),),\n (pxla.PartitionSpec(('x', 'y'),),),\n )\n def testPickleOfPartitionSpecs(self, partition_spec):\n restored_partition_spec = pickle.loads(pickle.dumps(partition_spec))\n self.assertIsInstance(restored_partition_spec, pxla.PartitionSpec)\n self.assertTupleEqual(partition_spec, restored_partition_spec)\n\n def testPickleX64(self):\n with jax.experimental.enable_x64():\n x = jnp.array(4.0, dtype='float64')\n s = pickle.dumps(x)\n\n with jax.experimental.disable_x64():\n y = pickle.loads(s)\n\n self.assertEqual(x.dtype, jnp.float64)\n self.assertArraysEqual(x, y, check_dtypes=False)\n self.assertEqual(y.dtype, jnp.float32)\n self.assertEqual(y.aval.dtype, jnp.float32)\n self.assertIsInstance(y, type(x))\n\n def testPickleTracerError(self):\n with self.assertRaises(core.ConcretizationTypeError):\n jax.jit(pickle.dumps)(0)\n\nif __name__ == \"__main__\":\n absltest.main(testLoader=jtu.JaxTestLoader())\n",
"step-ids": [
6,
9,
10,
11,
13
]
}
|
[
6,
9,
10,
11,
13
] |
from django.urls import path
from jobscrapper.views import *
urlpatterns = [
path('', home_vacancies_view, name="vacancy-home"),
path('list/', vacancies_view, name="vacancy"),
]
|
normal
|
{
"blob_id": "3ee20391d56d8c429ab1bd2f6b0e5b261721e401",
"index": 7965,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', home_vacancies_view, name='vacancy-home'), path(\n 'list/', vacancies_view, name='vacancy')]\n",
"step-3": "from django.urls import path\nfrom jobscrapper.views import *\nurlpatterns = [path('', home_vacancies_view, name='vacancy-home'), path(\n 'list/', vacancies_view, name='vacancy')]\n",
"step-4": "from django.urls import path\nfrom jobscrapper.views import *\n\nurlpatterns = [\n path('', home_vacancies_view, name=\"vacancy-home\"),\n path('list/', vacancies_view, name=\"vacancy\"),\n\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from Crypto.PublicKey import DSA
from Crypto.Signature import DSS
from Crypto.Hash import SHA256
import os
import time
kB = 1024 # 1kB
with open('small_file.txt', 'wb') as f:
f.write(os.urandom(kB))
mB = 10485760 # 1GB
with open('large_file.txt', 'wb') as f:
f.write(os.urandom(mB))
Begin = time.time()
key = DSA.generate(2048)
with open("public_key.pem", "wb") as f:
f.write(key.publickey().export_key())
f.close()
End = time.time()
print("Key Generation Time: ", End-Begin)
def DSA_2048(filename,key):
with open(filename, 'rb') as f:
message = f.read()
hash_obj = SHA256.new(message)
signer = DSS.new(key, 'fips-186-3')
signature = signer.sign(hash_obj)
# Load the public key
f = open("public_key.pem", "r")
hash_obj = SHA256.new(message)
pub_key = DSA.import_key(f.read())
verifier = DSS.new(pub_key, 'fips-186-3')
# Verify the authenticity of the message
try:
verifier.verify(hash_obj, signature)
print ("The message is authentic.")
except ValueError:
print ("The message is not authentic.")
Begin=time.time()
DSA_2048('small_file.txt',key)
End=time.time()
print("Time taken for DSA_2048 with 1 kb file: ",End-Begin)
if End-Begin != 0:
print("DSA_2048 speed for 1 kb file: ",1024/(End-Begin),"bytes/sec")
Begin=time.time()
DSA_2048('large_file.txt',key)
End=time.time()
print("Time taken for DSA_2048 with 10 mb file: ",End-Begin)
if End-Begin != 0:
print("DSA_2048 speed for 1 kb file: ",10485760/(End-Begin),"bytes/sec")
exit()
|
normal
|
{
"blob_id": "d24bbfc3587a2a79891a11e00ec865498c01c286",
"index": 2101,
"step-1": "<mask token>\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\n<mask token>\n",
"step-2": "<mask token>\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\n<mask token>\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\n<mask token>\nwith open('public_key.pem', 'wb') as f:\n f.write(key.publickey().export_key())\n f.close()\n<mask token>\nprint('Key Generation Time: ', End - Begin)\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\n<mask token>\nDSA_2048('small_file.txt', key)\n<mask token>\nprint('Time taken for DSA_2048 with 1 kb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')\n<mask token>\nDSA_2048('large_file.txt', key)\n<mask token>\nprint('Time taken for DSA_2048 with 10 mb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),\n 'bytes/sec')\nexit()\n",
"step-3": "<mask token>\nkB = 1024\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\nmB = 10485760\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\nBegin = time.time()\nkey = DSA.generate(2048)\nwith open('public_key.pem', 'wb') as f:\n f.write(key.publickey().export_key())\n f.close()\nEnd = time.time()\nprint('Key Generation Time: ', End - Begin)\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\nBegin = time.time()\nDSA_2048('small_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 1 kb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')\nBegin = time.time()\nDSA_2048('large_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 10 mb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),\n 'bytes/sec')\nexit()\n",
"step-4": "from Crypto.PublicKey import DSA\nfrom Crypto.Signature import DSS\nfrom Crypto.Hash import SHA256\nimport os\nimport time\nkB = 1024\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\nmB = 10485760\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\nBegin = time.time()\nkey = DSA.generate(2048)\nwith open('public_key.pem', 'wb') as f:\n f.write(key.publickey().export_key())\n f.close()\nEnd = time.time()\nprint('Key Generation Time: ', End - Begin)\n\n\ndef DSA_2048(filename, key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n f = open('public_key.pem', 'r')\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n try:\n verifier.verify(hash_obj, signature)\n print('The message is authentic.')\n except ValueError:\n print('The message is not authentic.')\n\n\nBegin = time.time()\nDSA_2048('small_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 1 kb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 1024 / (End - Begin), 'bytes/sec')\nBegin = time.time()\nDSA_2048('large_file.txt', key)\nEnd = time.time()\nprint('Time taken for DSA_2048 with 10 mb file: ', End - Begin)\nif End - Begin != 0:\n print('DSA_2048 speed for 1 kb file: ', 10485760 / (End - Begin),\n 'bytes/sec')\nexit()\n",
"step-5": "from Crypto.PublicKey import DSA\nfrom Crypto.Signature import DSS\nfrom Crypto.Hash import SHA256\nimport os\nimport time\n\nkB = 1024 # 1kB\nwith open('small_file.txt', 'wb') as f:\n f.write(os.urandom(kB))\n\nmB = 10485760 # 1GB\nwith open('large_file.txt', 'wb') as f:\n f.write(os.urandom(mB))\n\nBegin = time.time()\nkey = DSA.generate(2048)\nwith open(\"public_key.pem\", \"wb\") as f:\n f.write(key.publickey().export_key())\n f.close()\nEnd = time.time()\nprint(\"Key Generation Time: \", End-Begin)\n\ndef DSA_2048(filename,key):\n with open(filename, 'rb') as f:\n message = f.read()\n hash_obj = SHA256.new(message)\n signer = DSS.new(key, 'fips-186-3')\n signature = signer.sign(hash_obj)\n # Load the public key\n f = open(\"public_key.pem\", \"r\")\n hash_obj = SHA256.new(message)\n pub_key = DSA.import_key(f.read())\n verifier = DSS.new(pub_key, 'fips-186-3')\n # Verify the authenticity of the message\n try:\n verifier.verify(hash_obj, signature)\n print (\"The message is authentic.\")\n except ValueError:\n print (\"The message is not authentic.\")\n\nBegin=time.time()\nDSA_2048('small_file.txt',key)\nEnd=time.time()\nprint(\"Time taken for DSA_2048 with 1 kb file: \",End-Begin)\nif End-Begin != 0:\n print(\"DSA_2048 speed for 1 kb file: \",1024/(End-Begin),\"bytes/sec\")\n\nBegin=time.time()\nDSA_2048('large_file.txt',key)\nEnd=time.time()\nprint(\"Time taken for DSA_2048 with 10 mb file: \",End-Begin)\nif End-Begin != 0:\n print(\"DSA_2048 speed for 1 kb file: \",10485760/(End-Begin),\"bytes/sec\")\nexit()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from flask import render_template, request, current_app
from . import main
from .. import db, cache
from ..models import Content
from ..utils import make_cache_key
import requests
@main.route('/')
def index():
return render_template("templates/index.html")
@main.route('/link')
@cache.cached(key_prefix=make_cache_key, timeout=60)
def get_link():
url = request.args.get('url')
params = {'video': True,
'audio': True,
'screenshot': False}
if request.args.get('iframe'):
params['iframe'] = True
if url[8:11] != 'www':
url = url[:8] + 'www.' + url[8:]
content = Content.query.filter_by(url=url).first()
if content:
return {'status': 'success',
'data': content.to_json(iframe=params['iframe'], video=params['video'], audio=params['audio'])}, 200
else:
headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}
m_url = 'https://pro.microlink.io?url={}'.format(url)
r = requests.get(m_url, headers=headers, params=params)
if r.json().get('status') == 'success':
content = Content.from_json(r.json().get('data'))
db.session.add(content)
db.session.commit()
return r.json(), 200
|
normal
|
{
"blob_id": "c4096cfae7182875a79ba7837187cd94b4379922",
"index": 1100,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/link')\[email protected](key_prefix=make_cache_key, timeout=60)\ndef get_link():\n url = request.args.get('url')\n params = {'video': True, 'audio': True, 'screenshot': False}\n if request.args.get('iframe'):\n params['iframe'] = True\n if url[8:11] != 'www':\n url = url[:8] + 'www.' + url[8:]\n content = Content.query.filter_by(url=url).first()\n if content:\n return {'status': 'success', 'data': content.to_json(iframe=params[\n 'iframe'], video=params['video'], audio=params['audio'])}, 200\n else:\n headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}\n m_url = 'https://pro.microlink.io?url={}'.format(url)\n r = requests.get(m_url, headers=headers, params=params)\n if r.json().get('status') == 'success':\n content = Content.from_json(r.json().get('data'))\n db.session.add(content)\n db.session.commit()\n return r.json(), 200\n",
"step-3": "<mask token>\n\n\[email protected]('/')\ndef index():\n return render_template('templates/index.html')\n\n\[email protected]('/link')\[email protected](key_prefix=make_cache_key, timeout=60)\ndef get_link():\n url = request.args.get('url')\n params = {'video': True, 'audio': True, 'screenshot': False}\n if request.args.get('iframe'):\n params['iframe'] = True\n if url[8:11] != 'www':\n url = url[:8] + 'www.' + url[8:]\n content = Content.query.filter_by(url=url).first()\n if content:\n return {'status': 'success', 'data': content.to_json(iframe=params[\n 'iframe'], video=params['video'], audio=params['audio'])}, 200\n else:\n headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}\n m_url = 'https://pro.microlink.io?url={}'.format(url)\n r = requests.get(m_url, headers=headers, params=params)\n if r.json().get('status') == 'success':\n content = Content.from_json(r.json().get('data'))\n db.session.add(content)\n db.session.commit()\n return r.json(), 200\n",
"step-4": "from flask import render_template, request, current_app\nfrom . import main\nfrom .. import db, cache\nfrom ..models import Content\nfrom ..utils import make_cache_key\nimport requests\n\n\[email protected]('/')\ndef index():\n return render_template('templates/index.html')\n\n\[email protected]('/link')\[email protected](key_prefix=make_cache_key, timeout=60)\ndef get_link():\n url = request.args.get('url')\n params = {'video': True, 'audio': True, 'screenshot': False}\n if request.args.get('iframe'):\n params['iframe'] = True\n if url[8:11] != 'www':\n url = url[:8] + 'www.' + url[8:]\n content = Content.query.filter_by(url=url).first()\n if content:\n return {'status': 'success', 'data': content.to_json(iframe=params[\n 'iframe'], video=params['video'], audio=params['audio'])}, 200\n else:\n headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}\n m_url = 'https://pro.microlink.io?url={}'.format(url)\n r = requests.get(m_url, headers=headers, params=params)\n if r.json().get('status') == 'success':\n content = Content.from_json(r.json().get('data'))\n db.session.add(content)\n db.session.commit()\n return r.json(), 200\n",
"step-5": "from flask import render_template, request, current_app\nfrom . import main\nfrom .. import db, cache\nfrom ..models import Content\nfrom ..utils import make_cache_key\nimport requests\n\n\[email protected]('/')\ndef index():\n return render_template(\"templates/index.html\")\n\n\[email protected]('/link')\[email protected](key_prefix=make_cache_key, timeout=60)\ndef get_link():\n\n url = request.args.get('url')\n\n params = {'video': True,\n 'audio': True,\n 'screenshot': False}\n\n if request.args.get('iframe'):\n params['iframe'] = True\n\n if url[8:11] != 'www':\n url = url[:8] + 'www.' + url[8:]\n\n content = Content.query.filter_by(url=url).first()\n\n if content:\n return {'status': 'success',\n 'data': content.to_json(iframe=params['iframe'], video=params['video'], audio=params['audio'])}, 200\n else:\n headers = {'x-api-key': current_app.config['MICROLINK_API_KEY']}\n m_url = 'https://pro.microlink.io?url={}'.format(url)\n r = requests.get(m_url, headers=headers, params=params)\n\n if r.json().get('status') == 'success':\n content = Content.from_json(r.json().get('data'))\n db.session.add(content)\n db.session.commit()\n\n return r.json(), 200\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import argparse
from train import train
from test import infer
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, default='train',
help='could be either infer or train')
parser.add_argument('--model_dir', type=str, default='model',
help='directory to save models')
parser.add_argument('--batch_size', type=int, default='20',
help='train batch size')
parser.add_argument('--epoch', type=int, default='10',
help='train epoch num')
parser.add_argument('--nd', type=int, default='100',
help='noise dimension')
parser.add_argument('--num', type=int, default='1',
help='which number to infer')
args = parser.parse_args()
# if not os.path.exists(args.model_dir):
# os.mkdir(args.model_dir)
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
infer(args)
else:
print('unknown mode')
|
normal
|
{
"blob_id": "f0fa85f240b74b003ade767ffe8642feacdfaa32",
"index": 5807,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, default='train', help=\n 'could be either infer or train')\n parser.add_argument('--model_dir', type=str, default='model', help=\n 'directory to save models')\n parser.add_argument('--batch_size', type=int, default='20', help=\n 'train batch size')\n parser.add_argument('--epoch', type=int, default='10', help=\n 'train epoch num')\n parser.add_argument('--nd', type=int, default='100', help='noise dimension'\n )\n parser.add_argument('--num', type=int, default='1', help=\n 'which number to infer')\n args = parser.parse_args()\n if args.mode == 'train':\n train(args)\n elif args.mode == 'infer':\n infer(args)\n else:\n print('unknown mode')\n",
"step-3": "import argparse\nfrom train import train\nfrom test import infer\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, default='train', help=\n 'could be either infer or train')\n parser.add_argument('--model_dir', type=str, default='model', help=\n 'directory to save models')\n parser.add_argument('--batch_size', type=int, default='20', help=\n 'train batch size')\n parser.add_argument('--epoch', type=int, default='10', help=\n 'train epoch num')\n parser.add_argument('--nd', type=int, default='100', help='noise dimension'\n )\n parser.add_argument('--num', type=int, default='1', help=\n 'which number to infer')\n args = parser.parse_args()\n if args.mode == 'train':\n train(args)\n elif args.mode == 'infer':\n infer(args)\n else:\n print('unknown mode')\n",
"step-4": "import argparse\nfrom train import train\nfrom test import infer\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--mode', type=str, default='train',\n help='could be either infer or train')\n parser.add_argument('--model_dir', type=str, default='model',\n help='directory to save models')\n parser.add_argument('--batch_size', type=int, default='20',\n help='train batch size')\n parser.add_argument('--epoch', type=int, default='10',\n help='train epoch num')\n parser.add_argument('--nd', type=int, default='100',\n help='noise dimension')\n parser.add_argument('--num', type=int, default='1',\n help='which number to infer')\n args = parser.parse_args()\n\n # if not os.path.exists(args.model_dir):\n # os.mkdir(args.model_dir)\n\n if args.mode == 'train':\n train(args)\n elif args.mode == 'infer':\n infer(args)\n else:\n print('unknown mode')",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.db import models
#from publicservants import models
from django.utils.encoding import smart_unicode
# Create your models here.
class Score(models.Model):
#score ID - publicservant ID plus score
#sID = models.ManyToOneRel(field=PublicServant.psID)
#PS Score at time t
pst = models.IntegerField(null=False)
timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)
#Factors that determine Public Servant Score, include Thumbs up or down on certain criterias.
#Aggregrate values for period of time
positivePersonality = models.IntegerField(null=False, blank=False)
negativePersonality = models.IntegerField(null=False, blank=False)
positiveReviewMentions = models.IntegerField(null=False, blank=False)
negativeReviewMentions = models.IntegerField(null=False, blank=False)
userScore= models.IntegerField(null=False, blank=False)
#Actual PSScore at 12am everyday
ps = models.IntegerField(null=False)
def __unicode__(self):
return smart_unicode(self.ps) # + smart_unicode(self.PublicServant.psID)
|
normal
|
{
"blob_id": "8c166dd4cb091dcd2d80b5ae3085b5dee77564e0",
"index": 1227,
"step-1": "<mask token>\n\n\nclass Score(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Score(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __unicode__(self):\n return smart_unicode(self.ps)\n",
"step-3": "<mask token>\n\n\nclass Score(models.Model):\n pst = models.IntegerField(null=False)\n timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)\n positivePersonality = models.IntegerField(null=False, blank=False)\n negativePersonality = models.IntegerField(null=False, blank=False)\n positiveReviewMentions = models.IntegerField(null=False, blank=False)\n negativeReviewMentions = models.IntegerField(null=False, blank=False)\n userScore = models.IntegerField(null=False, blank=False)\n ps = models.IntegerField(null=False)\n\n def __unicode__(self):\n return smart_unicode(self.ps)\n",
"step-4": "from django.db import models\nfrom django.utils.encoding import smart_unicode\n\n\nclass Score(models.Model):\n pst = models.IntegerField(null=False)\n timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)\n positivePersonality = models.IntegerField(null=False, blank=False)\n negativePersonality = models.IntegerField(null=False, blank=False)\n positiveReviewMentions = models.IntegerField(null=False, blank=False)\n negativeReviewMentions = models.IntegerField(null=False, blank=False)\n userScore = models.IntegerField(null=False, blank=False)\n ps = models.IntegerField(null=False)\n\n def __unicode__(self):\n return smart_unicode(self.ps)\n",
"step-5": "from django.db import models\n#from publicservants import models\nfrom django.utils.encoding import smart_unicode\n\n# Create your models here.\n\n\nclass Score(models.Model):\n #score ID - publicservant ID plus score\n #sID = models.ManyToOneRel(field=PublicServant.psID)\n \n #PS Score at time t\n pst = models.IntegerField(null=False)\n timestamp = models.DateTimeField(auto_now_add=False, auto_now=True)\n \n #Factors that determine Public Servant Score, include Thumbs up or down on certain criterias.\n #Aggregrate values for period of time\n positivePersonality = models.IntegerField(null=False, blank=False)\n negativePersonality = models.IntegerField(null=False, blank=False)\n \n positiveReviewMentions = models.IntegerField(null=False, blank=False)\n negativeReviewMentions = models.IntegerField(null=False, blank=False)\n \n userScore= models.IntegerField(null=False, blank=False)\n \n #Actual PSScore at 12am everyday\n ps = models.IntegerField(null=False)\n \n def __unicode__(self):\n return smart_unicode(self.ps) # + smart_unicode(self.PublicServant.psID)\n \n \n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from fastapi import APIRouter, Depends, HTTPException, status
from sqlalchemy.orm import Session
from typing import List
from sqlalchemy.sql.functions import current_date, current_user
from db.session import get_db
from db.models.jobs import Job
from schemas.jobs import JobCreate, ShowJob
from db.repository.jobs import create_new_job, delete_job_by_id, retrieve_job, list_jobs, update_job_by_id
from apis.version1.route_login import get_current_user_from_token
from db.models.users import User
router = APIRouter()
@router.post("/create-job", response_model=ShowJob)
def create_job(job: JobCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)):
owner_id = current_user.id
job = create_new_job(job=job, db=db, owner_id=owner_id)
return job
@router.put("/update/{id}")
def update_job(id: int, job: JobCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)):
owner_id = current_user.id
message = update_job_by_id(id, job, db, owner_id)
if message == 0:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Job with id {id} does not exist"
)
return {"detail": "Successfully updated"}
@router.get("/get/{id}", response_model=ShowJob)
def retrieve_job_by_id(id: int, db: Session = Depends(get_db)):
job = retrieve_job(id=id, db=db)
if not job:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Job with id {id} does not exist"
)
return job
@router.get("/all", response_model=List[ShowJob])
def retrieve_all_jobs(db: Session = Depends(get_db)):
jobs = list_jobs(db=db)
return jobs
@router.delete("/delete/{id}")
def delete_job(id: int, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)):
owner_id = current_user.id
message = delete_job_by_id(id, db, owner_id=owner_id)
if message == 0:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail=f"Job with id {id} does not exist"
)
return {"details": "Successfully deleted"}
|
normal
|
{
"blob_id": "e8092faed22607f9c8f18a79709022037ff647bf",
"index": 9625,
"step-1": "<mask token>\n\n\[email protected]('/create-job', response_model=ShowJob)\ndef create_job(job: JobCreate, db: Session=Depends(get_db), current_user:\n User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n job = create_new_job(job=job, db=db, owner_id=owner_id)\n return job\n\n\[email protected]('/update/{id}')\ndef update_job(id: int, job: JobCreate, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = update_job_by_id(id, job, db, owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'detail': 'Successfully updated'}\n\n\[email protected]('/get/{id}', response_model=ShowJob)\ndef retrieve_job_by_id(id: int, db: Session=Depends(get_db)):\n job = retrieve_job(id=id, db=db)\n if not job:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return job\n\n\[email protected]('/all', response_model=List[ShowJob])\ndef retrieve_all_jobs(db: Session=Depends(get_db)):\n jobs = list_jobs(db=db)\n return jobs\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/create-job', response_model=ShowJob)\ndef create_job(job: JobCreate, db: Session=Depends(get_db), current_user:\n User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n job = create_new_job(job=job, db=db, owner_id=owner_id)\n return job\n\n\[email protected]('/update/{id}')\ndef update_job(id: int, job: JobCreate, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = update_job_by_id(id, job, db, owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'detail': 'Successfully updated'}\n\n\[email protected]('/get/{id}', response_model=ShowJob)\ndef retrieve_job_by_id(id: int, db: Session=Depends(get_db)):\n job = retrieve_job(id=id, db=db)\n if not job:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return job\n\n\[email protected]('/all', response_model=List[ShowJob])\ndef retrieve_all_jobs(db: Session=Depends(get_db)):\n jobs = list_jobs(db=db)\n return jobs\n\n\[email protected]('/delete/{id}')\ndef delete_job(id: int, db: Session=Depends(get_db), current_user: User=\n Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = delete_job_by_id(id, db, owner_id=owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'details': 'Successfully deleted'}\n",
"step-3": "<mask token>\nrouter = APIRouter()\n\n\[email protected]('/create-job', response_model=ShowJob)\ndef create_job(job: JobCreate, db: Session=Depends(get_db), current_user:\n User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n job = create_new_job(job=job, db=db, owner_id=owner_id)\n return job\n\n\[email protected]('/update/{id}')\ndef update_job(id: int, job: JobCreate, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = update_job_by_id(id, job, db, owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'detail': 'Successfully updated'}\n\n\[email protected]('/get/{id}', response_model=ShowJob)\ndef retrieve_job_by_id(id: int, db: Session=Depends(get_db)):\n job = retrieve_job(id=id, db=db)\n if not job:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return job\n\n\[email protected]('/all', response_model=List[ShowJob])\ndef retrieve_all_jobs(db: Session=Depends(get_db)):\n jobs = list_jobs(db=db)\n return jobs\n\n\[email protected]('/delete/{id}')\ndef delete_job(id: int, db: Session=Depends(get_db), current_user: User=\n Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = delete_job_by_id(id, db, owner_id=owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'details': 'Successfully deleted'}\n",
"step-4": "from fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\nfrom typing import List\nfrom sqlalchemy.sql.functions import current_date, current_user\nfrom db.session import get_db\nfrom db.models.jobs import Job\nfrom schemas.jobs import JobCreate, ShowJob\nfrom db.repository.jobs import create_new_job, delete_job_by_id, retrieve_job, list_jobs, update_job_by_id\nfrom apis.version1.route_login import get_current_user_from_token\nfrom db.models.users import User\nrouter = APIRouter()\n\n\[email protected]('/create-job', response_model=ShowJob)\ndef create_job(job: JobCreate, db: Session=Depends(get_db), current_user:\n User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n job = create_new_job(job=job, db=db, owner_id=owner_id)\n return job\n\n\[email protected]('/update/{id}')\ndef update_job(id: int, job: JobCreate, db: Session=Depends(get_db),\n current_user: User=Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = update_job_by_id(id, job, db, owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'detail': 'Successfully updated'}\n\n\[email protected]('/get/{id}', response_model=ShowJob)\ndef retrieve_job_by_id(id: int, db: Session=Depends(get_db)):\n job = retrieve_job(id=id, db=db)\n if not job:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return job\n\n\[email protected]('/all', response_model=List[ShowJob])\ndef retrieve_all_jobs(db: Session=Depends(get_db)):\n jobs = list_jobs(db=db)\n return jobs\n\n\[email protected]('/delete/{id}')\ndef delete_job(id: int, db: Session=Depends(get_db), current_user: User=\n Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = delete_job_by_id(id, db, owner_id=owner_id)\n if message == 0:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=\n f'Job with id {id} does not exist')\n return {'details': 'Successfully deleted'}\n",
"step-5": "from fastapi import APIRouter, Depends, HTTPException, status\nfrom sqlalchemy.orm import Session\nfrom typing import List\n\nfrom sqlalchemy.sql.functions import current_date, current_user\n\nfrom db.session import get_db\nfrom db.models.jobs import Job\nfrom schemas.jobs import JobCreate, ShowJob\nfrom db.repository.jobs import create_new_job, delete_job_by_id, retrieve_job, list_jobs, update_job_by_id\nfrom apis.version1.route_login import get_current_user_from_token\nfrom db.models.users import User\n\n\nrouter = APIRouter()\n\n\[email protected](\"/create-job\", response_model=ShowJob)\ndef create_job(job: JobCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)):\n owner_id = current_user.id\n job = create_new_job(job=job, db=db, owner_id=owner_id)\n return job\n\n\[email protected](\"/update/{id}\")\ndef update_job(id: int, job: JobCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = update_job_by_id(id, job, db, owner_id)\n\n if message == 0:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Job with id {id} does not exist\"\n )\n \n return {\"detail\": \"Successfully updated\"}\n\n\[email protected](\"/get/{id}\", response_model=ShowJob)\ndef retrieve_job_by_id(id: int, db: Session = Depends(get_db)):\n job = retrieve_job(id=id, db=db)\n if not job:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Job with id {id} does not exist\"\n )\n return job\n\n\[email protected](\"/all\", response_model=List[ShowJob])\ndef retrieve_all_jobs(db: Session = Depends(get_db)):\n jobs = list_jobs(db=db)\n return jobs\n\n\[email protected](\"/delete/{id}\")\ndef delete_job(id: int, db: Session = Depends(get_db), current_user: User = Depends(get_current_user_from_token)):\n owner_id = current_user.id\n message = delete_job_by_id(id, db, owner_id=owner_id)\n if message == 0:\n raise HTTPException(\n status_code=status.HTTP_404_NOT_FOUND,\n detail=f\"Job with id {id} does not exist\"\n )\n return {\"details\": \"Successfully deleted\"}\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
# -*- coding: utf-8 -*-
"""
current_models - library of ionic current models implemented in Python
Created on Mon Apr 10 16:30:04 2017
@author: Oliver Britton
"""
import os
import sys
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
" Voltage clamp generator functions "
" //--Nav models--\\ "
" -- Nav 1.7 models -- "
def nav17vw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav17cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.7 from Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
alpha_m = 15.5/(1 + np.exp(-(v-5)/(12.08)))
beta_m = 35.2/(1 + np.exp((v+72.7)/16.7))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.38685/(1 + np.exp((v+122.35)/15.29))
beta_h = -0.00283 + 2.00283/(1 + np.exp(-(v+5.5266)/12.70195)) # Rate is negative if v = -inf?
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.00003 + 0.00092/(1 + np.exp((v+93.9)/16.6))
beta_s = 132.05 - 132.05/(1 + np.exp((v-384.9)/28.5))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
" -- Nav 1.8 models -- "
def nav18hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.8 from Huang Waxman 20(14?) "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 7.35 - 7.35/(1 + np.exp((v+1.38)/10.9))
beta_m = 5.97/(1 + np.exp((v+56.43)/18.26))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.011 + 1.39/(1 + np.exp((v+78.04)/11.32))
beta_h = 0.56 - 0.56/(1 + np.exp((v-21.82)/20.03))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
def nav18tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Rat? Nav 1.8 used in Tigerholm model "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
s = Y[2]
u = Y[3]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
alpha_s = 0.001 * 5.4203 / (1 + np.exp((v+79.816)/16.269))
beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v+15.968)/11.542))
sinf = 1/(1+np.exp((v+45.0)/8))
stau = 1/(alpha_s + beta_s)
alpha_u = 0.002 * 2.0434 / (1 + np.exp((v+67.499)/19.51))
beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v+30.963)/14.792))
uinf = 1/(1+np.exp((v+51.0)/8))
utau = 1.0/(alpha_u + beta_u)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
du = (uinf-u)/utau
return [dm, dh, ds, du]
def nav18cw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.8 model used in Choi Waxman 2011 "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))
beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
hinf = 1/(1+np.exp((v+32.2)/4))
htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
" -- Nav 1.9 models -- "
def nav19hw(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Huang Waxman 2014"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
alpha_m = 0.751/(1 + np.exp(-(v+32.26)/13.71))
beta_m = 5.68/(1 + np.exp((v+123.71)/13.94))
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.082/(1 + np.exp((v+113.69)/17.4))
beta_h = 0.24/(1 + np.exp(-(v-10.1)/17.2))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
alpha_s = 0.019/(1 + np.exp((v+154.51)/11.46))
beta_s = 0.000376/(1 + np.exp(-(v+60.92)/15.79))
sinf = alpha_s/(alpha_s + beta_s)
stau = 1/(alpha_s + beta_s)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
ds = (sinf-s)/stau
return [dm, dh, ds]
def nav19md(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.9 model from Maingret 2008"
m = Y[0]
h = Y[1]
s = Y[2]
v = voltage_clamp_func(t,voltage_clamp_params)
return [dm, dh, ds]
def nav16zm(Y,t,voltage_clamp_func,voltage_clamp_params):
" Nav 1.6 model from Zach Mainen 1994 "
m = Y[0]
h = Y[1]
v = voltage_clamp_func(t,voltage_clamp_params)
vhalf = -43.0
a_m = 0.182*(v-vhalf)/(1-np.exp((vhalf-v)/6.))
b_m = 0.124*(-v+vhalf)/(1-np.exp((-vhalf+v)/6.))
m_inf = a_m/(a_m + b_m)
m_tau = 1./(a_m + b_m)
vhalf_ha = -50.0
vhalf_hb = -75.0
q_h = 5.0
vhalf_inf = -72.0
qinf = 6.2
rate_ha = 0.0091
rate_hb = 0.024
a_h = rate_ha*(v-vhalf_ha)/(1-np.exp((vhalf_ha-v)/q_h))
b_h = rate_hb*(-v+vhalf_hb)/(1-np.exp((-vhalf_hb+v)/q_h))
h_inf = 1.0/(1.0 + np.exp((v-vhalf_inf)/qinf))
h_tau = 1./(a_h + b_h)
dm = (m_inf-m)/m_tau
dh = (h_inf-h)/h_tau
return [dm, dh]
" Kv models "
def kdr_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
" Tigerholm version of the Sheets et al. IKdr model "
" Model was developed from data recorded at 21 oC "
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v > -31.0:
tau = 0.16+0.8*np.exp(-0.0267*(v+11))
else:
tau = 1000*(0.000688 + 1/(np.exp((v+75.2)/6.5) + np.exp(-(v-131.5)/(34.8))))
ninf = 1/(1 + np.exp(-(v+45)/15.4))
ntau = tau/q10
dn = (ninf-n)/ntau
return [dn]
def km_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of the IM current. Current is from multiple sources:
The voltage dependence of steady-state activation forthe KM current is from
Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow
time constant as described by Passmore et al. (2003). To account for the
two time constants, weimplemented one fast (nf) and one slow (ns) gate,
combined as follows.
"""
# g = gbar * (0.25*ns + 0.75*nf)
v = voltage_clamp_func(t,voltage_clamp_params)
ns = Y[0]
nf = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
if v < -60.0:
nstau = 219.0*q10
else:
nstau = 13.0*v + 1000.0*q10
nftau_alpha = 0.00395*np.exp((v+30.0)/40.0)
nftau_beta = 0.00395*np.exp(-(v+30.0)/20.0)*q10
nftau = 1.0/(nftau_alpha + nftau_beta)
ninf = 1.0/(1.0 + np.exp(-(v+30.0)/6.0)) # Threshold is around -30 mV
dns = (ninf-ns)/nstau
dnf = (ninf-nf)/nftau
return [dns,dnf]
def ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
""" Tigerholm version of IA.
"""
# g = gbar * n * h
v = voltage_clamp_func(t,voltage_clamp_params)
n = Y[0]
h = Y[1]
q10 = 1.0#3.3 # Preserved in case it is useful but disabled
ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4
ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10
hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))
htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10
# Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms
if htau < 5.0:
htau = 5.0
dn = (ninf-n)/ntau
dh = (hinf-h)/htau
return [dn,dh]
"""
Ca models
Implemented:
cal_ja - Jaffe et al. 1994 ICaL model.
can_mi - Model of N-type Ca current from Migliore 95
To do:
SK
BK
Ca diffusion
"""
def cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Jaffe et al. 1994 ICaL model.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
tfa = 1.
ki = 0.001 # (mM)
cao = 2.5 # Davidson (mM)
" To do: make cai variable as an input like voltage "
cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007
celsius = 37.
def alpha(v):
return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)
def beta(v):
return 0.29*np.exp(-v/10.86)
def KTF(celsius):
return ((25./293.15)*(celsius + 273.15))
def efun(z):
return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])
def calc_ghk(v, cai, cao):
f = KTF(celsius)/2
nu = v/f
return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)
a = alpha(v)
b = beta(v)
tau = 1./(tfa*(a + b))
minf = a/(a+b)
dm = (minf - m)/tau
""" Calculating the current
# h gate
h2 = ki/(ki+cai)
gcalbar = 0.003
ghk = calc_ghk(v,cai,cao)
ical = gcalbar*m*m*h2*ghk
"""
return [dm]
def can_mi():
"""
Model of N-type Ca current from Migliore 95
"""
pass
" HCN models "
def hcn_kn(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
def hcn_tf(Y,t,voltage_clamp_func,voltage_clamp_params):
"""
Tigerholm version of the Kouranova Ih model which is identical except
that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50.
"""
v = voltage_clamp_func(t,voltage_clamp_params)
n_s = Y[0]
n_f = Y[1]
ninf_s = 1/(1 + np.exp((v+87.2)/9.7))
ninf_f = ninf_s
if v > -70.0:
tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)
tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)
else:
tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)
tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)
dns = (ninf_s - n_s)/tau_ns
dnf = (ninf_f - n_f)/tau_nf
return [dns, dnf]
"""
# ena, ek, + or -?
Ih_na = 0.5 * g_h (0.5*n_s + 0.5*n_f) * (Vm + ena)
Ih_k = 0.5 * g_h * (0.5*n_s + 0.5*n_f) * (Vm + ek)
"""
" Test models "
def nav17test(Y,t,voltage_clamp_func,voltage_clamp_params):
" Human Nav 1.7 from Vasylyev Waxman "
v = voltage_clamp_func(t,voltage_clamp_params)
m = Y[0]
h = Y[1]
alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)
beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed
"""
Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.
"""
minf = alpha_m/(alpha_m + beta_m)
mtau = 1/(alpha_m + beta_m)
alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))
beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))
hinf = alpha_h/(alpha_h + beta_h)
htau = 1/(alpha_h + beta_h)
dm = (minf-m)/mtau
dh = (hinf-h)/htau
return [dm, dh]
|
normal
|
{
"blob_id": "012ab947f7a2c9d44f54464b3e477582ffcf3d77",
"index": 5589,
"step-1": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n",
"step-3": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.8 from Huang Waxman 20(14?) \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 7.35 - 7.35 / (1 + np.exp((v + 1.38) / 10.9))\n beta_m = 5.97 / (1 + np.exp((v + 56.43) / 18.26))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.011 + 1.39 / (1 + np.exp((v + 78.04) / 11.32))\n beta_h = 0.56 - 0.56 / (1 + np.exp((v - 21.82) / 20.03))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n",
"step-4": "<mask token>\n\n\ndef nav17vw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav17cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.7 from Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n alpha_m = 15.5 / (1 + np.exp(-(v - 5) / 12.08))\n beta_m = 35.2 / (1 + np.exp((v + 72.7) / 16.7))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.38685 / (1 + np.exp((v + 122.35) / 15.29))\n beta_h = -0.00283 + 2.00283 / (1 + np.exp(-(v + 5.5266) / 12.70195))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 3e-05 + 0.00092 / (1 + np.exp((v + 93.9) / 16.6))\n beta_s = 132.05 - 132.05 / (1 + np.exp((v - 384.9) / 28.5))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\n<mask token>\n\n\ndef nav18hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.8 from Huang Waxman 20(14?) \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 7.35 - 7.35 / (1 + np.exp((v + 1.38) / 10.9))\n beta_m = 5.97 / (1 + np.exp((v + 56.43) / 18.26))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.011 + 1.39 / (1 + np.exp((v + 78.04) / 11.32))\n beta_h = 0.56 - 0.56 / (1 + np.exp((v - 21.82) / 20.03))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\ndef nav18tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Rat? Nav 1.8 used in Tigerholm model \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v + 79.816) / 16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v + 15.968) / 11.542))\n sinf = 1 / (1 + np.exp((v + 45.0) / 8))\n stau = 1 / (alpha_s + beta_s)\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v + 67.499) / 19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v + 30.963) / 14.792))\n uinf = 1 / (1 + np.exp((v + 51.0) / 8))\n utau = 1.0 / (alpha_u + beta_u)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n du = (uinf - u) / utau\n return [dm, dh, ds, du]\n\n\ndef nav18cw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.8 model used in Choi Waxman 2011 \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 2.85 - 2.839 / (1 + np.exp((v - 1.159) / 13.95))\n beta_m = 7.6205 / (1 + np.exp((v + 46.463) / 8.8289))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n hinf = 1 / (1 + np.exp((v + 32.2) / 4))\n htau = 1.218 + 42.043 * np.exp(-(v + 38.1) ** 2 / (2 * 15.19 ** 2))\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef nav19hw(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Huang Waxman 2014\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n alpha_m = 0.751 / (1 + np.exp(-(v + 32.26) / 13.71))\n beta_m = 5.68 / (1 + np.exp((v + 123.71) / 13.94))\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.082 / (1 + np.exp((v + 113.69) / 17.4))\n beta_h = 0.24 / (1 + np.exp(-(v - 10.1) / 17.2))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n alpha_s = 0.019 / (1 + np.exp((v + 154.51) / 11.46))\n beta_s = 0.000376 / (1 + np.exp(-(v + 60.92) / 15.79))\n sinf = alpha_s / (alpha_s + beta_s)\n stau = 1 / (alpha_s + beta_s)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n ds = (sinf - s) / stau\n return [dm, dh, ds]\n\n\ndef nav19md(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.9 model from Maingret 2008\"\"\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n v = voltage_clamp_func(t, voltage_clamp_params)\n return [dm, dh, ds]\n\n\ndef nav16zm(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Nav 1.6 model from Zach Mainen 1994 \"\"\"\n m = Y[0]\n h = Y[1]\n v = voltage_clamp_func(t, voltage_clamp_params)\n vhalf = -43.0\n a_m = 0.182 * (v - vhalf) / (1 - np.exp((vhalf - v) / 6.0))\n b_m = 0.124 * (-v + vhalf) / (1 - np.exp((-vhalf + v) / 6.0))\n m_inf = a_m / (a_m + b_m)\n m_tau = 1.0 / (a_m + b_m)\n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n vhalf_inf = -72.0\n qinf = 6.2\n rate_ha = 0.0091\n rate_hb = 0.024\n a_h = rate_ha * (v - vhalf_ha) / (1 - np.exp((vhalf_ha - v) / q_h))\n b_h = rate_hb * (-v + vhalf_hb) / (1 - np.exp((-vhalf_hb + v) / q_h))\n h_inf = 1.0 / (1.0 + np.exp((v - vhalf_inf) / qinf))\n h_tau = 1.0 / (a_h + b_h)\n dm = (m_inf - m) / m_tau\n dh = (h_inf - h) / h_tau\n return [dm, dh]\n\n\n<mask token>\n\n\ndef kdr_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the Sheets et al. IKdr model \"\"\"\n \"\"\" Model was developed from data recorded at 21 oC \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n = Y[0]\n q10 = 1.0\n if v > -31.0:\n tau = 0.16 + 0.8 * np.exp(-0.0267 * (v + 11))\n else:\n tau = 1000 * (0.000688 + 1 / (np.exp((v + 75.2) / 6.5) + np.exp(-(v -\n 131.5) / 34.8)))\n ninf = 1 / (1 + np.exp(-(v + 45) / 15.4))\n ntau = tau / q10\n dn = (ninf - n) / ntau\n return [dn]\n\n\ndef km_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0\n if v < -60.0:\n nstau = 219.0 * q10\n else:\n nstau = 13.0 * v + 1000.0 * q10\n nftau_alpha = 0.00395 * np.exp((v + 30.0) / 40.0)\n nftau_beta = 0.00395 * np.exp(-(v + 30.0) / 20.0) * q10\n nftau = 1.0 / (nftau_alpha + nftau_beta)\n ninf = 1.0 / (1.0 + np.exp(-(v + 30.0) / 6.0))\n dns = (ninf - ns) / nstau\n dnf = (ninf - nf) / nftau\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef cal_ja(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n tfa = 1.0\n ki = 0.001\n cao = 2.5\n \"\"\" To do: make cai variable as an input like voltage \"\"\"\n cai = 0.0001\n celsius = 37.0\n\n def alpha(v):\n return 15.69 * (81.5 - v) / (np.exp((-1.0 * v + 81.5) / 10.0) - 1.0)\n\n def beta(v):\n return 0.29 * np.exp(-v / 10.86)\n\n def KTF(celsius):\n return 25.0 / 293.15 * (celsius + 273.15)\n\n def efun(z):\n return np.array([(1 - i / 2 if i < 0.0001 else i / (np.exp(i) - 1)) for\n i in z])\n\n def calc_ghk(v, cai, cao):\n f = KTF(celsius) / 2\n nu = v / f\n return -f * (1.0 - cai / cao * np.exp(nu)) * efun(nu)\n a = alpha(v)\n b = beta(v)\n tau = 1.0 / (tfa * (a + b))\n minf = a / (a + b)\n dm = (minf - m) / tau\n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n\n\n<mask token>\n\n\ndef hcn_kn(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\ndef hcn_tf(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n ninf_s = 1 / (1 + np.exp((v + 87.2) / 9.7))\n ninf_f = ninf_s\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v + 25.0) / 20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v + 25.0) / 20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v + 240.0) / 50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v + 240.0) / 50.0)\n dns = (ninf_s - n_s) / tau_ns\n dnf = (ninf_f - n_f) / tau_nf\n return [dns, dnf]\n\n\n<mask token>\n\n\ndef nav17test(Y, t, voltage_clamp_func, voltage_clamp_params):\n \"\"\" Human Nav 1.7 from Vasylyev Waxman \"\"\"\n v = voltage_clamp_func(t, voltage_clamp_params)\n m = Y[0]\n h = Y[1]\n alpha_m = 10.22 - 10.22 / (1 + np.exp((v + 7.19) / 15.43))\n beta_m = 23.76 / (1 + np.exp((v + 70.37) / 14.53))\n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n minf = alpha_m / (alpha_m + beta_m)\n mtau = 1 / (alpha_m + beta_m)\n alpha_h = 0.0744 / (1 + np.exp((v + 99.76) / 11.07))\n beta_h = 2.54 - 2.54 / (1 + np.exp((v + 7.8) / 10.68))\n hinf = alpha_h / (alpha_h + beta_h)\n htau = 1 / (alpha_h + beta_h)\n dm = (minf - m) / mtau\n dh = (hinf - h) / htau\n return [dm, dh]\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\ncurrent_models - library of ionic current models implemented in Python\n\nCreated on Mon Apr 10 16:30:04 2017\n\n@author: Oliver Britton\n\"\"\"\n\nimport os\nimport sys\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport seaborn as sns\n\n\" Voltage clamp generator functions \"\n\n\n\" //--Nav models--\\\\ \"\n\n\" -- Nav 1.7 models -- \"\n\ndef nav17vw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Human Nav 1.7 from Vasylyev Waxman \"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)\n beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed\n \n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))\n beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n return [dm, dh]\n\ndef nav17cw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Rat? Nav 1.7 from Choi Waxman 2011 \"\n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n s = Y[2]\n \n alpha_m = 15.5/(1 + np.exp(-(v-5)/(12.08)))\n beta_m = 35.2/(1 + np.exp((v+72.7)/16.7))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.38685/(1 + np.exp((v+122.35)/15.29))\n beta_h = -0.00283 + 2.00283/(1 + np.exp(-(v+5.5266)/12.70195)) # Rate is negative if v = -inf?\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n\n alpha_s = 0.00003 + 0.00092/(1 + np.exp((v+93.9)/16.6))\n beta_s = 132.05 - 132.05/(1 + np.exp((v-384.9)/28.5))\n\n sinf = alpha_s/(alpha_s + beta_s)\n stau = 1/(alpha_s + beta_s)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n ds = (sinf-s)/stau\n \n return [dm, dh, ds]\n \n\" -- Nav 1.8 models -- \"\ndef nav18hw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Human Nav 1.8 from Huang Waxman 20(14?) \"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 7.35 - 7.35/(1 + np.exp((v+1.38)/10.9))\n beta_m = 5.97/(1 + np.exp((v+56.43)/18.26))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.011 + 1.39/(1 + np.exp((v+78.04)/11.32))\n beta_h = 0.56 - 0.56/(1 + np.exp((v-21.82)/20.03))\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n\n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n \n return [dm, dh]\n\ndef nav18tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Rat? Nav 1.8 used in Tigerholm model \"\n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n s = Y[2]\n u = Y[3]\n \n alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))\n beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n hinf = 1/(1+np.exp((v+32.2)/4))\n htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))\n\n alpha_s = 0.001 * 5.4203 / (1 + np.exp((v+79.816)/16.269))\n beta_s = 0.001 * 5.0757 / (1 + np.exp(-(v+15.968)/11.542))\n\n sinf = 1/(1+np.exp((v+45.0)/8))\n stau = 1/(alpha_s + beta_s)\n\n alpha_u = 0.002 * 2.0434 / (1 + np.exp((v+67.499)/19.51))\n beta_u = 0.002 * 1.9952 / (1 + np.exp(-(v+30.963)/14.792))\n\n uinf = 1/(1+np.exp((v+51.0)/8))\n utau = 1.0/(alpha_u + beta_u) \n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n ds = (sinf-s)/stau\n du = (uinf-u)/utau\n \n return [dm, dh, ds, du]\n \ndef nav18cw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.8 model used in Choi Waxman 2011 \"\n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 2.85 - 2.839/(1 + np.exp((v-1.159)/13.95))\n beta_m = 7.6205/(1 + np.exp((v+46.463)/8.8289))\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n hinf = 1/(1+np.exp((v+32.2)/4))\n htau = 1.218 + 42.043*np.exp(-((v+38.1)**2)/(2*15.19**2))\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n \n return [dm, dh]\n \n\" -- Nav 1.9 models -- \"\n\ndef nav19hw(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.9 model from Huang Waxman 2014\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n alpha_m = 0.751/(1 + np.exp(-(v+32.26)/13.71))\n beta_m = 5.68/(1 + np.exp((v+123.71)/13.94))\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.082/(1 + np.exp((v+113.69)/17.4))\n beta_h = 0.24/(1 + np.exp(-(v-10.1)/17.2))\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n \n alpha_s = 0.019/(1 + np.exp((v+154.51)/11.46))\n beta_s = 0.000376/(1 + np.exp(-(v+60.92)/15.79))\n sinf = alpha_s/(alpha_s + beta_s)\n stau = 1/(alpha_s + beta_s)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n ds = (sinf-s)/stau\n \n return [dm, dh, ds]\n \ndef nav19md(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.9 model from Maingret 2008\"\n m = Y[0]\n h = Y[1]\n s = Y[2]\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n \n return [dm, dh, ds]\n \ndef nav16zm(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Nav 1.6 model from Zach Mainen 1994 \"\n m = Y[0]\n h = Y[1]\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n vhalf = -43.0\n a_m = 0.182*(v-vhalf)/(1-np.exp((vhalf-v)/6.))\n b_m = 0.124*(-v+vhalf)/(1-np.exp((-vhalf+v)/6.))\n \n m_inf = a_m/(a_m + b_m)\n m_tau = 1./(a_m + b_m)\n \n vhalf_ha = -50.0\n vhalf_hb = -75.0\n q_h = 5.0\n\n vhalf_inf = -72.0\n qinf = 6.2\n\n rate_ha = 0.0091\n rate_hb = 0.024\n\n a_h = rate_ha*(v-vhalf_ha)/(1-np.exp((vhalf_ha-v)/q_h))\n b_h = rate_hb*(-v+vhalf_hb)/(1-np.exp((-vhalf_hb+v)/q_h))\n\n h_inf = 1.0/(1.0 + np.exp((v-vhalf_inf)/qinf))\n h_tau = 1./(a_h + b_h)\n \n dm = (m_inf-m)/m_tau\n dh = (h_inf-h)/h_tau\n \n return [dm, dh]\n\n\" Kv models \"\n\ndef kdr_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Tigerholm version of the Sheets et al. IKdr model \"\n \" Model was developed from data recorded at 21 oC \"\n \n \n v = voltage_clamp_func(t,voltage_clamp_params)\n n = Y[0]\n q10 = 1.0#3.3 # Preserved in case it is useful but disabled\n \n if v > -31.0:\n tau = 0.16+0.8*np.exp(-0.0267*(v+11))\n else:\n tau = 1000*(0.000688 + 1/(np.exp((v+75.2)/6.5) + np.exp(-(v-131.5)/(34.8))))\n\t\t\n ninf = 1/(1 + np.exp(-(v+45)/15.4))\n ntau = tau/q10\n \n dn = (ninf-n)/ntau\n return [dn]\n \ndef km_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\" Tigerholm version of the IM current. Current is from multiple sources:\n The voltage dependence of steady-state activation forthe KM current is from\n Maingret et al. (2008), which was derived from Passmore 2003. The KM channel activation has a fast and a slow \n time constant as described by Passmore et al. (2003). To account for the \n two time constants, weimplemented one fast (nf) and one slow (ns) gate, \n combined as follows.\n \"\"\"\n # g = gbar * (0.25*ns + 0.75*nf)\n v = voltage_clamp_func(t,voltage_clamp_params)\n ns = Y[0]\n nf = Y[1]\n q10 = 1.0#3.3 # Preserved in case it is useful but disabled\n \n if v < -60.0:\n nstau = 219.0*q10\n else:\n nstau = 13.0*v + 1000.0*q10\n \n nftau_alpha = 0.00395*np.exp((v+30.0)/40.0)\n nftau_beta = 0.00395*np.exp(-(v+30.0)/20.0)*q10\n nftau = 1.0/(nftau_alpha + nftau_beta)\n \n ninf = 1.0/(1.0 + np.exp(-(v+30.0)/6.0)) # Threshold is around -30 mV\n \n dns = (ninf-ns)/nstau\n dnf = (ninf-nf)/nftau\n \n return [dns,dnf]\n \ndef ka_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\" Tigerholm version of IA.\n \"\"\"\n # g = gbar * n * h\n v = voltage_clamp_func(t,voltage_clamp_params)\n n = Y[0]\n h = Y[1]\n q10 = 1.0#3.3 # Preserved in case it is useful but disabled\n \n ninf = (1.0/(1.0 + np.exp(-(v+5.4+15)/16.4)))**4\n ntau = 0.25 + 10.04*np.exp((-(v+24.67)**2)/(2*34.8**2))*q10\n\t\t\n hinf = 1.0/(1.0 + np.exp((v+49.9 + 15.0)/4.6))\n htau = 20.0 + 50.0 * np.exp((-(v+40.0)**2)/(2.0*40.0**2))*q10\n \n # Trap for htau following Sheets /ChoiWaxman/Tigerholm - set it to 5 ms if less than 5 ms\n if htau < 5.0:\n htau = 5.0\n\n dn = (ninf-n)/ntau\n dh = (hinf-h)/htau\n \n return [dn,dh]\n\n\"\"\" \nCa models \n\nImplemented:\ncal_ja - Jaffe et al. 1994 ICaL model. \ncan_mi - Model of N-type Ca current from Migliore 95\n\nTo do:\nSK\nBK\nCa diffusion\n\n\n\"\"\"\n\ndef cal_ja(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\"\n Jaffe et al. 1994 ICaL model. \n \"\"\"\n v = voltage_clamp_func(t,voltage_clamp_params)\n m = Y[0]\n \n tfa = 1.\n ki = 0.001 # (mM)\n \n cao = 2.5 # Davidson (mM)\n \" To do: make cai variable as an input like voltage \"\n cai = 1.e-4 # (mM) Roughly values (100 nM) from Intracellular calcium regulation among subpopulations of rat dorsal root ganglion neurons by Lu, Zhang, Gold 2007\n \n celsius = 37.\n \n def alpha(v):\n return 15.69*(81.5 - v)/(np.exp((-1.0*v+81.5)/10.0)-1.0)\n def beta(v):\n return 0.29*np.exp(-v/10.86)\n def KTF(celsius):\n return ((25./293.15)*(celsius + 273.15))\n def efun(z):\n return np.array([1 - i/2 if i < 1e-4 else i/(np.exp(i)-1) for i in z])\n def calc_ghk(v, cai, cao): \n f = KTF(celsius)/2\n nu = v/f\n return -f*(1. - (cai/cao)*np.exp(nu))*efun(nu)\n\n a = alpha(v)\n b = beta(v)\n tau = 1./(tfa*(a + b))\n minf = a/(a+b)\n dm = (minf - m)/tau\n \n \"\"\" Calculating the current \n # h gate\n h2 = ki/(ki+cai)\n gcalbar = 0.003\n ghk = calc_ghk(v,cai,cao)\n ical = gcalbar*m*m*h2*ghk\n \"\"\"\n return [dm]\n\ndef can_mi():\n \"\"\"\n Model of N-type Ca current from Migliore 95\n \"\"\"\n pass\n \n\" HCN models \"\ndef hcn_kn(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\" \n Kouranova Ih model with non-specific current (reversal potential should be set at -30 mV \n \"\"\"\n\n v = voltage_clamp_func(t,voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n\n ninf_s = 1/(1 + np.exp((v+87.2)/9.7))\n ninf_f = ninf_s\n\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)\n\n dns = (ninf_s - n_s)/tau_ns\n dnf = (ninf_f - n_f)/tau_nf\n\n return [dns, dnf]\n \ndef hcn_tf(Y,t,voltage_clamp_func,voltage_clamp_params):\n \"\"\"\n Tigerholm version of the Kouranova Ih model which is identical except\n that when you calculate the current you don't use a nonspecific reversal potential and instead split the current between Na+ and K+, 50/50. \n \"\"\"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n n_s = Y[0]\n n_f = Y[1]\n \n ninf_s = 1/(1 + np.exp((v+87.2)/9.7))\n ninf_f = ninf_s\n\n if v > -70.0:\n tau_ns = 300.0 + 542.0 * np.exp((v+25.0)/20.0)\n tau_nf = 140.0 + 50.0 * np.exp(-(v+25.0)/20.0)\n else:\n tau_ns = 2500.0 + 100.0 * np.exp((v+240.0)/50.0)\n tau_nf = 250.0 + 12.0 * np.exp((v+240.0)/50.0)\n\n dns = (ninf_s - n_s)/tau_ns\n dnf = (ninf_f - n_f)/tau_nf\n \n return [dns, dnf]\n\n\"\"\"\n # ena, ek, + or -?\n Ih_na = 0.5 * g_h (0.5*n_s + 0.5*n_f) * (Vm + ena)\n Ih_k = 0.5 * g_h * (0.5*n_s + 0.5*n_f) * (Vm + ek) \n\n\"\"\"\n\n\" Test models \"\ndef nav17test(Y,t,voltage_clamp_func,voltage_clamp_params):\n \" Human Nav 1.7 from Vasylyev Waxman \"\n \n v = voltage_clamp_func(t,voltage_clamp_params)\n \n m = Y[0]\n h = Y[1]\n \n alpha_m = 10.22 - 10.22/(1 + np.exp((v+7.19)/15.43)) # Rate for closed -> open (sort of)\n beta_m = 23.76/(1 + np.exp((v+70.37)/14.53)) # Rate for open->closed\n \n \"\"\"\n Parameters for kinetics - rate constant (3), 2 voltage shifts, 2 slope coefficients.\n \"\"\"\n\n minf = alpha_m/(alpha_m + beta_m)\n mtau = 1/(alpha_m + beta_m)\n\n alpha_h = 0.0744/(1 + np.exp((v+99.76)/11.07))\n beta_h = 2.54 - 2.54/(1 + np.exp((v+7.8)/10.68))\n\n hinf = alpha_h/(alpha_h + beta_h)\n htau = 1/(alpha_h + beta_h)\n \n dm = (minf-m)/mtau\n dh = (hinf-h)/htau\n return [dm, dh]\n \n\n \n",
"step-ids": [
12,
13,
14,
15,
18
]
}
|
[
12,
13,
14,
15,
18
] |
# Generated by Django 3.2.3 on 2021-05-29 16:37
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('login', '0014_auto_20210529_1637'),
]
operations = [
migrations.CreateModel(
name='Location',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street', models.CharField(blank=True, max_length=255, null=True)),
('postal_code', models.IntegerField(blank=True, null=True)),
('city', models.CharField(blank=True, max_length=255, null=True)),
('country', models.CharField(blank=True, max_length=255, null=True)),
('facility', models.CharField(blank=True, max_length=255, null=True)),
],
),
migrations.CreateModel(
name='StudySession',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('start_time', models.TimeField()),
('end_time', models.TimeField()),
('date', models.DateField()),
('available_spots', models.IntegerField(default=1)),
('taken_spots', models.IntegerField(default=0)),
('description', models.CharField(blank=True, max_length=500, null=True)),
('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study_sessions.location')),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='login.subject')),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('study_session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study_sessions.studysession')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
normal
|
{
"blob_id": "6285d1665bacbff746f44f42ce65981f937fff64",
"index": 4189,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('login', '0014_auto_20210529_1637')]\n operations = [migrations.CreateModel(name='Location', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('street', models.CharField(blank=True,\n max_length=255, null=True)), ('postal_code', models.IntegerField(\n blank=True, null=True)), ('city', models.CharField(blank=True,\n max_length=255, null=True)), ('country', models.CharField(blank=\n True, max_length=255, null=True)), ('facility', models.CharField(\n blank=True, max_length=255, null=True))]), migrations.CreateModel(\n name='StudySession', fields=[('id', models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('is_active', models.BooleanField(default=True)), (\n 'start_time', models.TimeField()), ('end_time', models.TimeField()),\n ('date', models.DateField()), ('available_spots', models.\n IntegerField(default=1)), ('taken_spots', models.IntegerField(\n default=0)), ('description', models.CharField(blank=True,\n max_length=500, null=True)), ('location', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to=\n 'study_sessions.location')), ('subject', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='login.subject'))]),\n migrations.CreateModel(name='Participant', fields=[('id', models.\n BigAutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('study_session', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='study_sessions.studysession'\n )), ('user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to=settings.AUTH_USER_MODEL))])]\n",
"step-4": "from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.\n AUTH_USER_MODEL), ('login', '0014_auto_20210529_1637')]\n operations = [migrations.CreateModel(name='Location', fields=[('id',\n models.BigAutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('street', models.CharField(blank=True,\n max_length=255, null=True)), ('postal_code', models.IntegerField(\n blank=True, null=True)), ('city', models.CharField(blank=True,\n max_length=255, null=True)), ('country', models.CharField(blank=\n True, max_length=255, null=True)), ('facility', models.CharField(\n blank=True, max_length=255, null=True))]), migrations.CreateModel(\n name='StudySession', fields=[('id', models.BigAutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('is_active', models.BooleanField(default=True)), (\n 'start_time', models.TimeField()), ('end_time', models.TimeField()),\n ('date', models.DateField()), ('available_spots', models.\n IntegerField(default=1)), ('taken_spots', models.IntegerField(\n default=0)), ('description', models.CharField(blank=True,\n max_length=500, null=True)), ('location', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to=\n 'study_sessions.location')), ('subject', models.ForeignKey(\n on_delete=django.db.models.deletion.CASCADE, to='login.subject'))]),\n migrations.CreateModel(name='Participant', fields=[('id', models.\n BigAutoField(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')), ('study_session', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, to='study_sessions.studysession'\n )), ('user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to=settings.AUTH_USER_MODEL))])]\n",
"step-5": "# Generated by Django 3.2.3 on 2021-05-29 16:37\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('login', '0014_auto_20210529_1637'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Location',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('street', models.CharField(blank=True, max_length=255, null=True)),\n ('postal_code', models.IntegerField(blank=True, null=True)),\n ('city', models.CharField(blank=True, max_length=255, null=True)),\n ('country', models.CharField(blank=True, max_length=255, null=True)),\n ('facility', models.CharField(blank=True, max_length=255, null=True)),\n ],\n ),\n migrations.CreateModel(\n name='StudySession',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('is_active', models.BooleanField(default=True)),\n ('start_time', models.TimeField()),\n ('end_time', models.TimeField()),\n ('date', models.DateField()),\n ('available_spots', models.IntegerField(default=1)),\n ('taken_spots', models.IntegerField(default=0)),\n ('description', models.CharField(blank=True, max_length=500, null=True)),\n ('location', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study_sessions.location')),\n ('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='login.subject')),\n ],\n ),\n migrations.CreateModel(\n name='Participant',\n fields=[\n ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('study_session', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='study_sessions.studysession')),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime,os
def GetDatetimeFromMyFormat(l):
# l = "2018-5-17 19:18:45"
l_words = l.split()
l_days = l_words[0].split('-')
l_times = l_words[1].split(':')
out = datetime.datetime(int(l_days[0]),int(l_days[1]),int(l_days[2]),int(l_times[0]),int(l_times[1]),int(l_times[2]))
return out
|
normal
|
{
"blob_id": "6767302869d73d041e2d7061722e05484d19f3e0",
"index": 4752,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef GetDatetimeFromMyFormat(l):\n l_words = l.split()\n l_days = l_words[0].split('-')\n l_times = l_words[1].split(':')\n out = datetime.datetime(int(l_days[0]), int(l_days[1]), int(l_days[2]),\n int(l_times[0]), int(l_times[1]), int(l_times[2]))\n return out\n",
"step-3": "import datetime, os\n\n\ndef GetDatetimeFromMyFormat(l):\n l_words = l.split()\n l_days = l_words[0].split('-')\n l_times = l_words[1].split(':')\n out = datetime.datetime(int(l_days[0]), int(l_days[1]), int(l_days[2]),\n int(l_times[0]), int(l_times[1]), int(l_times[2]))\n return out\n",
"step-4": "import datetime,os\n\ndef GetDatetimeFromMyFormat(l):\n # l = \"2018-5-17 19:18:45\"\n l_words = l.split()\n l_days = l_words[0].split('-')\n l_times = l_words[1].split(':')\n out = datetime.datetime(int(l_days[0]),int(l_days[1]),int(l_days[2]),int(l_times[0]),int(l_times[1]),int(l_times[2]))\n return out\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin/env python
from game_calc import *
def game(screen, clock):
running = True
time = 0
WHITE = (255,255,255)
BLUE = (0,0,205)
upper_border = pygame.Rect(12,44,1000,20)
right_border = pygame.Rect(992,60,20,648)
left_border = pygame.Rect(12,60,20,648)
down_border = pygame.Rect(12,694,1000,20)
snake = [(512,344),(512,354),(512,364),(512,374),(512,384)]
direction = 'UP'
bonus_timer = 0
food = new_food(screen, snake)
bonus = None
eaten = True
eaten_cooldown = 1
x_change = 0
y_change = 0
score = 0
font = pygame.font.Font(os.path.join('font.ttf'), 28)
countdown_font = pygame.font.Font(os.path.join('font.ttf'), 100)
up_pressed = False
right_pressed = False
down_pressed = False
left_pressed = False
countdown = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
running = False
if event.key == pygame.K_UP and not direction == 'DOWN' and not right_pressed and not left_pressed:
direction = 'UP'
up_pressed = True
elif event.key == pygame.K_DOWN and not direction == 'UP' and not right_pressed and not left_pressed:
direction = 'DOWN'
down_pressed = True
elif event.key == pygame.K_RIGHT and not direction == 'LEFT' and not up_pressed and not down_pressed:
direction = 'RIGHT'
right_pressed = True
elif event.key == pygame.K_LEFT and not direction == 'RIGHT' and not up_pressed and not down_pressed:
direction = 'LEFT'
left_pressed = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_DOWN:
None
elif event.key == pygame.K_UP:
None
elif event.key == pygame.K_RIGHT:
None
elif event.key == pygame.K_LEFT:
None
up_pressed = False
right_pressed = False
down_pressed = False
left_pressed = False
if direction == 'RIGHT':
x_change = 10
y_change = 0
elif direction == 'LEFT':
x_change = -10
y_change = 0
elif direction == 'UP':
x_change = 0
y_change = -10
elif direction == 'DOWN':
x_change = 0
y_change = 10
status = check_ahead(screen, snake[0][0]+x_change, snake[0][1]+y_change)
if status == 'NOTHING' or status == 'EAT':
snake.insert(0,(snake[0][0]+x_change,snake[0][1]+y_change))
if status == 'EAT':
eaten = True
eaten_cooldown = eaten_cooldown + 4
food = new_food(screen, None)
score += 1
if random.randint(1,8) == 8 and not bonus:
bonus = new_food(screen, [food])
bonus_timer = 5
if status == 'BONUS':
bonus = None
score += 6
eaten_cooldown += 8
if not eaten and eaten_cooldown == 0:
snake = snake[0:-1]
else:
eaten = False
eaten_cooldown = eaten_cooldown - 1
if status == 'GAME_OVER':
return score
if bonus_timer:
bonus_timer = bonus_timer - (clock.get_time() / 1000)
if bonus_timer <= 0:
bonus = None
bonus_timer = 0
screen.fill((0,0,0))
pygame.draw.rect(screen,BLUE,upper_border)
pygame.draw.rect(screen,BLUE,right_border)
pygame.draw.rect(screen,BLUE,left_border)
pygame.draw.rect(screen,BLUE,down_border)
pygame.draw.rect(screen,(35,142,35),pygame.Rect(food[0],food[1],10,10))
if bonus:
pygame.draw.rect(screen,(255,215,0),pygame.Rect(bonus[0],bonus[1],10,10))
screen.blit(font.render(str(round(bonus_timer,1)),False,(255,255,0)), (200,8))
screen.blit(font.render("Score: " + str(score),False,(255,255,0)), (900,8))
for dot in snake:
pygame.draw.rect(screen,WHITE,pygame.Rect(dot[0],dot[1],10,10))
pygame.display.update()
if countdown:
update_rect = pygame.Rect(500,350,100,100)
countdown = False
for i in range(3,0,-1):
pygame.draw.rect(screen,(0,0,0),update_rect)
screen.blit(countdown_font.render(str(i),False,BLUE), (500,350))
pygame.display.update(update_rect)
pygame.time.delay(1000)
#print(clock.get_fps())
clock.tick(25)
|
normal
|
{
"blob_id": "83815acb0520c1f8186b0b5c69f8597b1b6a552a",
"index": 8051,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef game(screen, clock):\n running = True\n time = 0\n WHITE = 255, 255, 255\n BLUE = 0, 0, 205\n upper_border = pygame.Rect(12, 44, 1000, 20)\n right_border = pygame.Rect(992, 60, 20, 648)\n left_border = pygame.Rect(12, 60, 20, 648)\n down_border = pygame.Rect(12, 694, 1000, 20)\n snake = [(512, 344), (512, 354), (512, 364), (512, 374), (512, 384)]\n direction = 'UP'\n bonus_timer = 0\n food = new_food(screen, snake)\n bonus = None\n eaten = True\n eaten_cooldown = 1\n x_change = 0\n y_change = 0\n score = 0\n font = pygame.font.Font(os.path.join('font.ttf'), 28)\n countdown_font = pygame.font.Font(os.path.join('font.ttf'), 100)\n up_pressed = False\n right_pressed = False\n down_pressed = False\n left_pressed = False\n countdown = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n if (event.key == pygame.K_UP and not direction == 'DOWN' and\n not right_pressed and not left_pressed):\n direction = 'UP'\n up_pressed = True\n elif event.key == pygame.K_DOWN and not direction == 'UP' and not right_pressed and not left_pressed:\n direction = 'DOWN'\n down_pressed = True\n elif event.key == pygame.K_RIGHT and not direction == 'LEFT' and not up_pressed and not down_pressed:\n direction = 'RIGHT'\n right_pressed = True\n elif event.key == pygame.K_LEFT and not direction == 'RIGHT' and not up_pressed and not down_pressed:\n direction = 'LEFT'\n left_pressed = True\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_DOWN:\n None\n elif event.key == pygame.K_UP:\n None\n elif event.key == pygame.K_RIGHT:\n None\n elif event.key == pygame.K_LEFT:\n None\n up_pressed = False\n right_pressed = False\n down_pressed = False\n left_pressed = False\n if direction == 'RIGHT':\n x_change = 10\n y_change = 0\n elif direction == 'LEFT':\n x_change = -10\n y_change = 0\n elif direction == 'UP':\n x_change = 0\n y_change = -10\n elif direction == 'DOWN':\n x_change = 0\n y_change = 10\n status = check_ahead(screen, snake[0][0] + x_change, snake[0][1] +\n y_change)\n if status == 'NOTHING' or status == 'EAT':\n snake.insert(0, (snake[0][0] + x_change, snake[0][1] + y_change))\n if status == 'EAT':\n eaten = True\n eaten_cooldown = eaten_cooldown + 4\n food = new_food(screen, None)\n score += 1\n if random.randint(1, 8) == 8 and not bonus:\n bonus = new_food(screen, [food])\n bonus_timer = 5\n if status == 'BONUS':\n bonus = None\n score += 6\n eaten_cooldown += 8\n if not eaten and eaten_cooldown == 0:\n snake = snake[0:-1]\n else:\n eaten = False\n eaten_cooldown = eaten_cooldown - 1\n if status == 'GAME_OVER':\n return score\n if bonus_timer:\n bonus_timer = bonus_timer - clock.get_time() / 1000\n if bonus_timer <= 0:\n bonus = None\n bonus_timer = 0\n screen.fill((0, 0, 0))\n pygame.draw.rect(screen, BLUE, upper_border)\n pygame.draw.rect(screen, BLUE, right_border)\n pygame.draw.rect(screen, BLUE, left_border)\n pygame.draw.rect(screen, BLUE, down_border)\n pygame.draw.rect(screen, (35, 142, 35), pygame.Rect(food[0], food[1\n ], 10, 10))\n if bonus:\n pygame.draw.rect(screen, (255, 215, 0), pygame.Rect(bonus[0],\n bonus[1], 10, 10))\n screen.blit(font.render(str(round(bonus_timer, 1)), False, (255,\n 255, 0)), (200, 8))\n screen.blit(font.render('Score: ' + str(score), False, (255, 255, 0\n )), (900, 8))\n for dot in snake:\n pygame.draw.rect(screen, WHITE, pygame.Rect(dot[0], dot[1], 10, 10)\n )\n pygame.display.update()\n if countdown:\n update_rect = pygame.Rect(500, 350, 100, 100)\n countdown = False\n for i in range(3, 0, -1):\n pygame.draw.rect(screen, (0, 0, 0), update_rect)\n screen.blit(countdown_font.render(str(i), False, BLUE), (\n 500, 350))\n pygame.display.update(update_rect)\n pygame.time.delay(1000)\n clock.tick(25)\n",
"step-3": "from game_calc import *\n\n\ndef game(screen, clock):\n running = True\n time = 0\n WHITE = 255, 255, 255\n BLUE = 0, 0, 205\n upper_border = pygame.Rect(12, 44, 1000, 20)\n right_border = pygame.Rect(992, 60, 20, 648)\n left_border = pygame.Rect(12, 60, 20, 648)\n down_border = pygame.Rect(12, 694, 1000, 20)\n snake = [(512, 344), (512, 354), (512, 364), (512, 374), (512, 384)]\n direction = 'UP'\n bonus_timer = 0\n food = new_food(screen, snake)\n bonus = None\n eaten = True\n eaten_cooldown = 1\n x_change = 0\n y_change = 0\n score = 0\n font = pygame.font.Font(os.path.join('font.ttf'), 28)\n countdown_font = pygame.font.Font(os.path.join('font.ttf'), 100)\n up_pressed = False\n right_pressed = False\n down_pressed = False\n left_pressed = False\n countdown = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n if (event.key == pygame.K_UP and not direction == 'DOWN' and\n not right_pressed and not left_pressed):\n direction = 'UP'\n up_pressed = True\n elif event.key == pygame.K_DOWN and not direction == 'UP' and not right_pressed and not left_pressed:\n direction = 'DOWN'\n down_pressed = True\n elif event.key == pygame.K_RIGHT and not direction == 'LEFT' and not up_pressed and not down_pressed:\n direction = 'RIGHT'\n right_pressed = True\n elif event.key == pygame.K_LEFT and not direction == 'RIGHT' and not up_pressed and not down_pressed:\n direction = 'LEFT'\n left_pressed = True\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_DOWN:\n None\n elif event.key == pygame.K_UP:\n None\n elif event.key == pygame.K_RIGHT:\n None\n elif event.key == pygame.K_LEFT:\n None\n up_pressed = False\n right_pressed = False\n down_pressed = False\n left_pressed = False\n if direction == 'RIGHT':\n x_change = 10\n y_change = 0\n elif direction == 'LEFT':\n x_change = -10\n y_change = 0\n elif direction == 'UP':\n x_change = 0\n y_change = -10\n elif direction == 'DOWN':\n x_change = 0\n y_change = 10\n status = check_ahead(screen, snake[0][0] + x_change, snake[0][1] +\n y_change)\n if status == 'NOTHING' or status == 'EAT':\n snake.insert(0, (snake[0][0] + x_change, snake[0][1] + y_change))\n if status == 'EAT':\n eaten = True\n eaten_cooldown = eaten_cooldown + 4\n food = new_food(screen, None)\n score += 1\n if random.randint(1, 8) == 8 and not bonus:\n bonus = new_food(screen, [food])\n bonus_timer = 5\n if status == 'BONUS':\n bonus = None\n score += 6\n eaten_cooldown += 8\n if not eaten and eaten_cooldown == 0:\n snake = snake[0:-1]\n else:\n eaten = False\n eaten_cooldown = eaten_cooldown - 1\n if status == 'GAME_OVER':\n return score\n if bonus_timer:\n bonus_timer = bonus_timer - clock.get_time() / 1000\n if bonus_timer <= 0:\n bonus = None\n bonus_timer = 0\n screen.fill((0, 0, 0))\n pygame.draw.rect(screen, BLUE, upper_border)\n pygame.draw.rect(screen, BLUE, right_border)\n pygame.draw.rect(screen, BLUE, left_border)\n pygame.draw.rect(screen, BLUE, down_border)\n pygame.draw.rect(screen, (35, 142, 35), pygame.Rect(food[0], food[1\n ], 10, 10))\n if bonus:\n pygame.draw.rect(screen, (255, 215, 0), pygame.Rect(bonus[0],\n bonus[1], 10, 10))\n screen.blit(font.render(str(round(bonus_timer, 1)), False, (255,\n 255, 0)), (200, 8))\n screen.blit(font.render('Score: ' + str(score), False, (255, 255, 0\n )), (900, 8))\n for dot in snake:\n pygame.draw.rect(screen, WHITE, pygame.Rect(dot[0], dot[1], 10, 10)\n )\n pygame.display.update()\n if countdown:\n update_rect = pygame.Rect(500, 350, 100, 100)\n countdown = False\n for i in range(3, 0, -1):\n pygame.draw.rect(screen, (0, 0, 0), update_rect)\n screen.blit(countdown_font.render(str(i), False, BLUE), (\n 500, 350))\n pygame.display.update(update_rect)\n pygame.time.delay(1000)\n clock.tick(25)\n",
"step-4": "#! /usr/bin/env python\nfrom game_calc import *\n\ndef game(screen, clock):\n running = True\n time = 0\n WHITE = (255,255,255)\n BLUE = (0,0,205)\n upper_border = pygame.Rect(12,44,1000,20)\n right_border = pygame.Rect(992,60,20,648)\n left_border = pygame.Rect(12,60,20,648)\n down_border = pygame.Rect(12,694,1000,20)\n \n snake = [(512,344),(512,354),(512,364),(512,374),(512,384)]\n \n direction = 'UP'\n \n bonus_timer = 0\n \n food = new_food(screen, snake)\n bonus = None\n eaten = True\n eaten_cooldown = 1\n\n x_change = 0\n y_change = 0\n \n score = 0\n \n font = pygame.font.Font(os.path.join('font.ttf'), 28)\n countdown_font = pygame.font.Font(os.path.join('font.ttf'), 100)\n\n up_pressed = False\n right_pressed = False\n down_pressed = False\n left_pressed = False\n \n countdown = True\n \n while running:\n \n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n running = False\n if event.key == pygame.K_UP and not direction == 'DOWN' and not right_pressed and not left_pressed:\n direction = 'UP'\n up_pressed = True\n elif event.key == pygame.K_DOWN and not direction == 'UP' and not right_pressed and not left_pressed:\n direction = 'DOWN'\n down_pressed = True\n elif event.key == pygame.K_RIGHT and not direction == 'LEFT' and not up_pressed and not down_pressed:\n direction = 'RIGHT'\n right_pressed = True\n elif event.key == pygame.K_LEFT and not direction == 'RIGHT' and not up_pressed and not down_pressed:\n direction = 'LEFT'\n left_pressed = True\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_DOWN:\n None \n elif event.key == pygame.K_UP:\n None \n elif event.key == pygame.K_RIGHT:\n None \n elif event.key == pygame.K_LEFT:\n None\n up_pressed = False\n right_pressed = False\n down_pressed = False\n left_pressed = False\n \n if direction == 'RIGHT':\n x_change = 10\n y_change = 0\n elif direction == 'LEFT':\n x_change = -10\n y_change = 0\n elif direction == 'UP':\n x_change = 0\n y_change = -10\n elif direction == 'DOWN':\n x_change = 0\n y_change = 10\n status = check_ahead(screen, snake[0][0]+x_change, snake[0][1]+y_change)\n if status == 'NOTHING' or status == 'EAT':\n snake.insert(0,(snake[0][0]+x_change,snake[0][1]+y_change))\n if status == 'EAT':\n eaten = True\n eaten_cooldown = eaten_cooldown + 4\n food = new_food(screen, None)\n score += 1\n if random.randint(1,8) == 8 and not bonus:\n bonus = new_food(screen, [food])\n bonus_timer = 5\n if status == 'BONUS':\n bonus = None\n score += 6\n eaten_cooldown += 8\n if not eaten and eaten_cooldown == 0:\n snake = snake[0:-1]\n else:\n eaten = False\n eaten_cooldown = eaten_cooldown - 1\n if status == 'GAME_OVER':\n return score\n if bonus_timer:\n bonus_timer = bonus_timer - (clock.get_time() / 1000)\n if bonus_timer <= 0:\n bonus = None\n bonus_timer = 0\n screen.fill((0,0,0))\n pygame.draw.rect(screen,BLUE,upper_border)\n pygame.draw.rect(screen,BLUE,right_border)\n pygame.draw.rect(screen,BLUE,left_border)\n pygame.draw.rect(screen,BLUE,down_border)\n pygame.draw.rect(screen,(35,142,35),pygame.Rect(food[0],food[1],10,10))\n if bonus:\n pygame.draw.rect(screen,(255,215,0),pygame.Rect(bonus[0],bonus[1],10,10))\n screen.blit(font.render(str(round(bonus_timer,1)),False,(255,255,0)), (200,8)) \n screen.blit(font.render(\"Score: \" + str(score),False,(255,255,0)), (900,8))\n for dot in snake:\n pygame.draw.rect(screen,WHITE,pygame.Rect(dot[0],dot[1],10,10))\n pygame.display.update()\n if countdown:\n update_rect = pygame.Rect(500,350,100,100)\n countdown = False\n for i in range(3,0,-1):\n pygame.draw.rect(screen,(0,0,0),update_rect)\n screen.blit(countdown_font.render(str(i),False,BLUE), (500,350))\n pygame.display.update(update_rect)\n pygame.time.delay(1000)\n #print(clock.get_fps())\n clock.tick(25)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import socket
from threading import Thread
from ast import literal_eval
clients = {}
addresses = {}
host = '127.0.0.1'
port = 5678
active = []
addr = (host, port)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(addr)
groups = []
def broadcast(msg, prefix=""): # prefix is for name identification.
"""Broadcasts a message to all the clients."""
for sock in clients:
sock.send(bytes(prefix, "utf8")+msg)
def broadcast_file(msg):
for sock in clients:
sock.send(msg)
def private_message(address,message):
message = '<private>' + message
receiver = list(filter(lambda x: address in str(x), clients))[0]
receiver.send(bytes(message,'utf-8'))
def accept_incoming_connections():
"""Sets up handling for incoming clients."""
while True:
client, client_address = server.accept()
print(str(client_address[0]) + ":" + str(client_address[1]) + " has connected.")
addresses[client] = client_address
Thread(target=handle_client, args=(client,)).start()
def handle_client(client): # Takes client socket as argument.
"""Handles a single client connection."""
name = client.recv(2048).decode("utf8")
welcome = 'Welcome %s! Enter {quit} to exit.' % name
try:
client.send(bytes(welcome, "utf8"))
msg = "%s: has joined the chat!" % name
broadcast(bytes(msg, "utf8"))
clients[client] = name
temp_client = {'Address':addresses[client],'Name':clients[client]}
active.append(temp_client)
broadcast(bytes(str(active),'utf-8'))
while True:
msg = client.recv(2048)
try:
if '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):
temp = msg.decode('utf-8').split(')')
address = temp[0] + ')'
private_message(address,temp[1])
elif msg != bytes("{quit}", "utf8"):
broadcast(msg, "<global>" + name + ": ")
print(client)
else:
#client.send(bytes("{quit}", "utf8"))
client.close()
active.remove({'Address':addresses[client],'Name':clients[client]})
del clients[client]
broadcast(bytes("%s has left the chat." % name, "utf8"))
broadcast(bytes(str(active),'utf-8'))
break
except:
print(msg)
broadcast_file(msg)
except Exception as e:
print(e)
if __name__ == "__main__":
server.listen(5) # Listens for 5 connections at max.
print("Waiting for connection...")
accept_clients_thread = Thread(target=accept_incoming_connections)
accept_clients_thread.start() # Starts the infinite loop.
accept_clients_thread.join()
server.close()
|
normal
|
{
"blob_id": "9f02313b6f91f83e3a8b4af8d9447b1d8f3558f6",
"index": 4430,
"step-1": "<mask token>\n\n\ndef broadcast(msg, prefix=''):\n \"\"\"Broadcasts a message to all the clients.\"\"\"\n for sock in clients:\n sock.send(bytes(prefix, 'utf8') + msg)\n\n\ndef broadcast_file(msg):\n for sock in clients:\n sock.send(msg)\n\n\ndef private_message(address, message):\n message = '<private>' + message\n receiver = list(filter(lambda x: address in str(x), clients))[0]\n receiver.send(bytes(message, 'utf-8'))\n\n\ndef accept_incoming_connections():\n \"\"\"Sets up handling for incoming clients.\"\"\"\n while True:\n client, client_address = server.accept()\n print(str(client_address[0]) + ':' + str(client_address[1]) +\n ' has connected.')\n addresses[client] = client_address\n Thread(target=handle_client, args=(client,)).start()\n\n\ndef handle_client(client):\n \"\"\"Handles a single client connection.\"\"\"\n name = client.recv(2048).decode('utf8')\n welcome = 'Welcome %s! Enter {quit} to exit.' % name\n try:\n client.send(bytes(welcome, 'utf8'))\n msg = '%s: has joined the chat!' % name\n broadcast(bytes(msg, 'utf8'))\n clients[client] = name\n temp_client = {'Address': addresses[client], 'Name': clients[client]}\n active.append(temp_client)\n broadcast(bytes(str(active), 'utf-8'))\n while True:\n msg = client.recv(2048)\n try:\n if '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):\n temp = msg.decode('utf-8').split(')')\n address = temp[0] + ')'\n private_message(address, temp[1])\n elif msg != bytes('{quit}', 'utf8'):\n broadcast(msg, '<global>' + name + ': ')\n print(client)\n else:\n client.close()\n active.remove({'Address': addresses[client], 'Name':\n clients[client]})\n del clients[client]\n broadcast(bytes('%s has left the chat.' % name, 'utf8'))\n broadcast(bytes(str(active), 'utf-8'))\n break\n except:\n print(msg)\n broadcast_file(msg)\n except Exception as e:\n print(e)\n\n\n<mask token>\n",
"step-2": "<mask token>\nserver.bind(addr)\n<mask token>\n\n\ndef broadcast(msg, prefix=''):\n \"\"\"Broadcasts a message to all the clients.\"\"\"\n for sock in clients:\n sock.send(bytes(prefix, 'utf8') + msg)\n\n\ndef broadcast_file(msg):\n for sock in clients:\n sock.send(msg)\n\n\ndef private_message(address, message):\n message = '<private>' + message\n receiver = list(filter(lambda x: address in str(x), clients))[0]\n receiver.send(bytes(message, 'utf-8'))\n\n\ndef accept_incoming_connections():\n \"\"\"Sets up handling for incoming clients.\"\"\"\n while True:\n client, client_address = server.accept()\n print(str(client_address[0]) + ':' + str(client_address[1]) +\n ' has connected.')\n addresses[client] = client_address\n Thread(target=handle_client, args=(client,)).start()\n\n\ndef handle_client(client):\n \"\"\"Handles a single client connection.\"\"\"\n name = client.recv(2048).decode('utf8')\n welcome = 'Welcome %s! Enter {quit} to exit.' % name\n try:\n client.send(bytes(welcome, 'utf8'))\n msg = '%s: has joined the chat!' % name\n broadcast(bytes(msg, 'utf8'))\n clients[client] = name\n temp_client = {'Address': addresses[client], 'Name': clients[client]}\n active.append(temp_client)\n broadcast(bytes(str(active), 'utf-8'))\n while True:\n msg = client.recv(2048)\n try:\n if '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):\n temp = msg.decode('utf-8').split(')')\n address = temp[0] + ')'\n private_message(address, temp[1])\n elif msg != bytes('{quit}', 'utf8'):\n broadcast(msg, '<global>' + name + ': ')\n print(client)\n else:\n client.close()\n active.remove({'Address': addresses[client], 'Name':\n clients[client]})\n del clients[client]\n broadcast(bytes('%s has left the chat.' % name, 'utf8'))\n broadcast(bytes(str(active), 'utf-8'))\n break\n except:\n print(msg)\n broadcast_file(msg)\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n server.listen(5)\n print('Waiting for connection...')\n accept_clients_thread = Thread(target=accept_incoming_connections)\n accept_clients_thread.start()\n accept_clients_thread.join()\n server.close()\n",
"step-3": "<mask token>\nclients = {}\naddresses = {}\nhost = '127.0.0.1'\nport = 5678\nactive = []\naddr = host, port\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(addr)\ngroups = []\n\n\ndef broadcast(msg, prefix=''):\n \"\"\"Broadcasts a message to all the clients.\"\"\"\n for sock in clients:\n sock.send(bytes(prefix, 'utf8') + msg)\n\n\ndef broadcast_file(msg):\n for sock in clients:\n sock.send(msg)\n\n\ndef private_message(address, message):\n message = '<private>' + message\n receiver = list(filter(lambda x: address in str(x), clients))[0]\n receiver.send(bytes(message, 'utf-8'))\n\n\ndef accept_incoming_connections():\n \"\"\"Sets up handling for incoming clients.\"\"\"\n while True:\n client, client_address = server.accept()\n print(str(client_address[0]) + ':' + str(client_address[1]) +\n ' has connected.')\n addresses[client] = client_address\n Thread(target=handle_client, args=(client,)).start()\n\n\ndef handle_client(client):\n \"\"\"Handles a single client connection.\"\"\"\n name = client.recv(2048).decode('utf8')\n welcome = 'Welcome %s! Enter {quit} to exit.' % name\n try:\n client.send(bytes(welcome, 'utf8'))\n msg = '%s: has joined the chat!' % name\n broadcast(bytes(msg, 'utf8'))\n clients[client] = name\n temp_client = {'Address': addresses[client], 'Name': clients[client]}\n active.append(temp_client)\n broadcast(bytes(str(active), 'utf-8'))\n while True:\n msg = client.recv(2048)\n try:\n if '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):\n temp = msg.decode('utf-8').split(')')\n address = temp[0] + ')'\n private_message(address, temp[1])\n elif msg != bytes('{quit}', 'utf8'):\n broadcast(msg, '<global>' + name + ': ')\n print(client)\n else:\n client.close()\n active.remove({'Address': addresses[client], 'Name':\n clients[client]})\n del clients[client]\n broadcast(bytes('%s has left the chat.' % name, 'utf8'))\n broadcast(bytes(str(active), 'utf-8'))\n break\n except:\n print(msg)\n broadcast_file(msg)\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n server.listen(5)\n print('Waiting for connection...')\n accept_clients_thread = Thread(target=accept_incoming_connections)\n accept_clients_thread.start()\n accept_clients_thread.join()\n server.close()\n",
"step-4": "import socket\nfrom threading import Thread\nfrom ast import literal_eval\nclients = {}\naddresses = {}\nhost = '127.0.0.1'\nport = 5678\nactive = []\naddr = host, port\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(addr)\ngroups = []\n\n\ndef broadcast(msg, prefix=''):\n \"\"\"Broadcasts a message to all the clients.\"\"\"\n for sock in clients:\n sock.send(bytes(prefix, 'utf8') + msg)\n\n\ndef broadcast_file(msg):\n for sock in clients:\n sock.send(msg)\n\n\ndef private_message(address, message):\n message = '<private>' + message\n receiver = list(filter(lambda x: address in str(x), clients))[0]\n receiver.send(bytes(message, 'utf-8'))\n\n\ndef accept_incoming_connections():\n \"\"\"Sets up handling for incoming clients.\"\"\"\n while True:\n client, client_address = server.accept()\n print(str(client_address[0]) + ':' + str(client_address[1]) +\n ' has connected.')\n addresses[client] = client_address\n Thread(target=handle_client, args=(client,)).start()\n\n\ndef handle_client(client):\n \"\"\"Handles a single client connection.\"\"\"\n name = client.recv(2048).decode('utf8')\n welcome = 'Welcome %s! Enter {quit} to exit.' % name\n try:\n client.send(bytes(welcome, 'utf8'))\n msg = '%s: has joined the chat!' % name\n broadcast(bytes(msg, 'utf8'))\n clients[client] = name\n temp_client = {'Address': addresses[client], 'Name': clients[client]}\n active.append(temp_client)\n broadcast(bytes(str(active), 'utf-8'))\n while True:\n msg = client.recv(2048)\n try:\n if '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):\n temp = msg.decode('utf-8').split(')')\n address = temp[0] + ')'\n private_message(address, temp[1])\n elif msg != bytes('{quit}', 'utf8'):\n broadcast(msg, '<global>' + name + ': ')\n print(client)\n else:\n client.close()\n active.remove({'Address': addresses[client], 'Name':\n clients[client]})\n del clients[client]\n broadcast(bytes('%s has left the chat.' % name, 'utf8'))\n broadcast(bytes(str(active), 'utf-8'))\n break\n except:\n print(msg)\n broadcast_file(msg)\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n server.listen(5)\n print('Waiting for connection...')\n accept_clients_thread = Thread(target=accept_incoming_connections)\n accept_clients_thread.start()\n accept_clients_thread.join()\n server.close()\n",
"step-5": "import socket\nfrom threading import Thread\nfrom ast import literal_eval\n\nclients = {}\naddresses = {}\nhost = '127.0.0.1'\nport = 5678\nactive = []\naddr = (host, port)\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver.bind(addr)\ngroups = []\n\ndef broadcast(msg, prefix=\"\"): # prefix is for name identification.\n\t\"\"\"Broadcasts a message to all the clients.\"\"\"\n\tfor sock in clients:\n\t\tsock.send(bytes(prefix, \"utf8\")+msg)\n\ndef broadcast_file(msg):\n\tfor sock in clients:\n\t\tsock.send(msg)\n\ndef private_message(address,message):\n\tmessage = '<private>' + message\n\treceiver = list(filter(lambda x: address in str(x), clients))[0]\n\treceiver.send(bytes(message,'utf-8'))\n\ndef accept_incoming_connections():\n\t\"\"\"Sets up handling for incoming clients.\"\"\"\n\twhile True:\n\t\tclient, client_address = server.accept()\n\t\tprint(str(client_address[0]) + \":\" + str(client_address[1]) + \" has connected.\")\n\t\taddresses[client] = client_address\n\t\tThread(target=handle_client, args=(client,)).start()\n\ndef handle_client(client): # Takes client socket as argument.\n\t\"\"\"Handles a single client connection.\"\"\"\n\tname = client.recv(2048).decode(\"utf8\")\n\twelcome = 'Welcome %s! Enter {quit} to exit.' % name\n\ttry:\n\t\tclient.send(bytes(welcome, \"utf8\"))\n\t\tmsg = \"%s: has joined the chat!\" % name\n\t\tbroadcast(bytes(msg, \"utf8\"))\n\t\tclients[client] = name\n\t\ttemp_client = {'Address':addresses[client],'Name':clients[client]}\n\t\tactive.append(temp_client)\n\t\tbroadcast(bytes(str(active),'utf-8'))\n\t\twhile True:\n\t\t\tmsg = client.recv(2048)\n\t\t\ttry:\n\t\t\t\tif '(' in msg.decode('utf-8') and ')' in msg.decode('utf-8'):\n\t\t\t\t\ttemp = msg.decode('utf-8').split(')')\n\t\t\t\t\taddress = temp[0] + ')'\n\t\t\t\t\tprivate_message(address,temp[1])\n\t\t\t\telif msg != bytes(\"{quit}\", \"utf8\"):\n\t\t\t\t\tbroadcast(msg, \"<global>\" + name + \": \")\n\t\t\t\t\tprint(client)\n\t\t\t\telse:\n\t\t\t\t\t#client.send(bytes(\"{quit}\", \"utf8\"))\n\t\t\t\t\tclient.close()\n\t\t\t\t\tactive.remove({'Address':addresses[client],'Name':clients[client]})\n\t\t\t\t\tdel clients[client]\n\t\t\t\t\tbroadcast(bytes(\"%s has left the chat.\" % name, \"utf8\"))\n\t\t\t\t\tbroadcast(bytes(str(active),'utf-8'))\n\t\t\t\t\tbreak\n\t\t\texcept:\n\t\t\t\tprint(msg)\n\t\t\t\tbroadcast_file(msg)\n\texcept Exception as e:\n\t\tprint(e)\n\nif __name__ == \"__main__\":\n\tserver.listen(5) # Listens for 5 connections at max.\n\tprint(\"Waiting for connection...\")\n\taccept_clients_thread = Thread(target=accept_incoming_connections)\n\taccept_clients_thread.start() # Starts the infinite loop.\n\taccept_clients_thread.join()\n\tserver.close()\n\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#!/usr/bin/env python
#python
import os
import math
import sys
import time
import re
import cPickle
import random
#eman
try:
import EMAN
except:
print "EMAN module did not get imported"
#scipy
import numpy
#appion
from appionlib import appionScript
from appionlib import appiondata
from appionlib import apDisplay
from appionlib import apStack
from appionlib import apRecon
from appionlib import apEMAN
from appionlib import apSymmetry
from appionlib import apFile
#=====================
#=====================
class satAverageScript(appionScript.AppionScript):
#=====================
def makeEvenOddClasses(self, listfile, outputstack, classdata, maskrad):
f=open(listfile,'r')
f.readline()
lines = f.readlines()
f.close()
randstr = str(int(random.random()*10e5))
evenfile = self.rootname+"-even.lst"
evenf = open(evenfile,'w')
oddfile = self.rootname+"-odd.lst"
oddf = open(oddfile,'w')
evenf.write("#LST\n")
oddf.write("#LST\n")
neven=0
nodd=0
for i in range(0, len(lines)):
if i%2:
nodd+=1
oddf.write(lines[i])
else:
neven+=1
evenf.write(lines[i])
evenf.close()
oddf.close()
if neven>0:
self.makeClassAverages(evenfile, self.params['evenstack'], classdata, maskrad)
if nodd>0:
self.makeClassAverages(oddfile, self.params['oddstack'], classdata, maskrad)
apFile.removeFile(evenfile)
apFile.removeFile(oddfile)
#=====================
def getParticleInfo(self, reconid, iteration):
"""
Get all particle data for given recon and iteration
"""
t0 = time.time()
cachefile = os.path.join(self.params['rundir'],
"refineparticledata-r"+str(reconid)+"-i"+str(iteration)+".cache")
if os.path.isfile(cachefile):
apDisplay.printColor("loading refineparticledata from cache file", "cyan")
f = open(cachefile, 'r')
refineparticledata = cPickle.load(f)
f.close()
else:
refinerundata = appiondata.ApRefineRunData.direct_query(reconid)
if not refinerundata:
apDisplay.printError("Could not find refinerundata for reconrun id="+str(reconid))
refineq = appiondata.ApRefineIterData()
refineq['refineRun'] = refinerundata
refineq['iteration'] = iteration
refinedata = refineq.query(results=1)
if not refinedata:
apDisplay.printError("Could not find refinedata for reconrun id="
+str(reconid)+" iter="+str(iteration))
refinepartq=appiondata.ApRefineParticleData()
refinepartq['refineIter']=refinedata[0]
apDisplay.printMsg("querying particles on "+time.asctime())
refineparticledata = refinepartq.query()
apDisplay.printMsg("saving refineparticledata to cache file")
f = open(cachefile, 'w')
cPickle.dump(refineparticledata, f)
f.close()
apDisplay.printMsg("received "+str(len(refineparticledata))+" particles in "+apDisplay.timeString(time.time()-t0))
return refineparticledata
#=====================
def procKeepList(self):
"""
Removes particles by reading a list of particle numbers generated externally.
Requirements:
the input file has one particle per line
the first piece of data is the particle number from the db
"""
keeplist = []
f = open(self.params['keeplist'], 'r')
lines = f.readlines()
f.close()
for n in lines:
words = n.split()
keeplist.append(int(words[0])+1)
return keeplist
#=====================
def makeClassAverages(self, classlist, outputstack, classdata, maskrad):
#align images in class
#print classlist
images = EMAN.readImages(classlist, -1, -1, 0)
for image in images:
image.rotateAndTranslate()
if image.isFlipped():
image.hFlip()
#make class average
avg = EMAN.EMData()
avg.makeMedian(images)
#write class average
e = EMAN.Euler()
alt = classdata['euler1']*math.pi/180
az = classdata['euler2']*math.pi/180
phi = 0.0 #classdata['euler3']*math.pi/180
e.setAngle(alt, az, phi)
avg.setRAlign(e)
avg.setNImg(len(images))
avg.applyMask(maskrad, 0)
avg.writeImage(outputstack,-1)
#=====================
def determineClasses(self, particles):
"""
Takes refineparticledata and returns a dictionary of classes
"""
apDisplay.printMsg("sorting refineparticledata into classes")
t0 = time.time()
classes={}
class_stats={}
quality=numpy.zeros(len(particles))
for partnum in range(len(particles)):
quality[partnum] = particles[partnum]['quality_factor']
key = ("%.3f_%.3f"%(particles[partnum]['euler1'], particles[partnum]['euler2']))
if key not in classes.keys():
classes[key]={}
classes[key]['particles']=[]
classes[key]['euler1'] = particles[partnum]['euler1']
classes[key]['euler2'] = particles[partnum]['euler2']
#classes have no inplane rotation
classes[key]['euler3'] = 0.0 #particles[partnum]['euler3']
classes[key]['particles'].append(particles[partnum])
class_stats['meanquality']=quality.mean()
class_stats['stdquality']=quality.std()
class_stats['max']=quality.max()
class_stats['min']=quality.min()
apDisplay.printMsg("sorted %d particles into %d classes"%(len(particles), len(classes)))
### print stats
print "-- quality factor stats --"
print ("mean/std :: "+str(round(class_stats['meanquality'],2))+" +/- "
+str(round(class_stats['stdquality'],2)))
print ("min/max :: "+str(round(class_stats['min'],2))+" <> "
+str(round(class_stats['max'],2)))
apDisplay.printMsg("finished sorting in "+apDisplay.timeString(time.time()-t0))
return classes, class_stats
#=====================
def getClassData(self, reconid, iternum):
t0 = time.time()
cachefile = os.path.join(self.params['rundir'],
"partclassdata-r"+str(reconid)+"-i"+str(iternum)+".cache")
if os.path.isfile(cachefile):
apDisplay.printColor("loading particle class data from cache file", "cyan")
f = open(cachefile, 'r')
classes = cPickle.load(f)
f.close()
else:
apDisplay.printMsg("determine particle class data from database")
particles = self.getParticleInfo(reconid, iternum)
classes, cstats = self.determineClasses(particles)
f = open(cachefile, 'w')
apDisplay.printMsg("saving particle class data to cache file")
cPickle.dump(classes, f)
f.close()
apDisplay.printMsg("received "+str(len(classes))+" classes in "+apDisplay.timeString(time.time()-t0))
return classes
#######################################################
#### ITEMS BELOW CAN BE SPECIFIED IN A NEW PROGRAM ####
#######################################################
#=====================
def setupParserOptions(self):
self.parser.set_usage("Usage: %prog --reconid=<DEF_id> --iter=<iter> --mask=<radius>\n\t "
+"[ --stackname=<name> "
+" --avgjump=<avg> --sigma=<sigma> --eotest ]")
self.parser.add_option("-r", "--reconid", dest="reconid", type="int",
help="Reconstruction run id", metavar="INT")
self.parser.add_option("-m", "--mask", dest="mask", type="int",
help="Mask radius in pixels", metavar="INT")
self.parser.add_option("-i", "--iter", dest="iter", type="int",
help="Final eulers applied to particles will come from this iteration", metavar="INT")
self.parser.add_option("--stackname", dest="stackname", default="goodavgs.hed",
help="Name of the stack to write the averages", metavar="file.hed")
self.parser.add_option("--keep-list", dest="keeplist",
help="Keep particles in the specified text file, EMAN style 0,1,...", metavar="TEXT")
self.parser.add_option("--eotest", dest="eotest", default=False,
action="store_true", help="Perform even/odd test")
#=====================
def checkConflicts(self):
if self.params['reconid'] is None:
apDisplay.printError("enter a reconstruction ID from the database")
if self.params['mask'] is None:
apDisplay.printError("enter a mask radius")
if self.params['iter'] is None:
apDisplay.printError("enter an iteration for the final Eulers")
if self.params['keeplist'] is None:
apDisplay.printError("enter an keep list file")
self.params['keeplist'] = os.path.abspath(self.params['keeplist'])
if not os.path.isfile(self.params['keeplist']):
apDisplay.printError("could not find list file")
self.params['stackid'] = apStack.getStackIdFromRecon(self.params['reconid'])
if self.params['stackname'][-4:] != ".hed":
s = os.path.splitext(self.params['stackname'])[0]
s += ".hed"
self.params['stackname'] = s
apDisplay.printMsg("Stack name: "+self.params['stackname'])
self.params['symmetry'] = apSymmetry.getSymmetryFromReconRunId(self.params['reconid'])
self.params['symmname'] = self.params['symmetry']['eman_name']
#=====================
def setRunDir(self):
refdata = appiondata.ApRefineRunData.direct_query(self.params['reconid'])
if not refdata:
apDisplay.printError("reconid "+str(self.params['reconid'])+" does not exist in the database")
refpath = refdata['path']['path']
rundir = os.path.join(refpath, "../../satEuler/sat-recon%d/volumes"%(self.params['reconid']))
self.params['rundir'] = os.path.abspath(rundir)
#=====================
def start(self):
self.rootname = self.params['stackname'].split(".")[0]
self.params['outputstack'] = os.path.join(self.params['rundir'], self.params['stackname'])
if os.path.isfile(self.params['outputstack']):
apFile.removeStack(self.params['outputstack'])
if self.params['eotest'] is True:
self.params['evenstack'] = os.path.splitext(self.params['outputstack'])[0]+'.even.hed'
if os.path.isfile(self.params['evenstack']):
apFile.removeStack(self.params['evenstack'])
self.params['oddstack'] = os.path.splitext(self.params['outputstack'])[0]+'.odd.hed'
if os.path.isfile(self.params['oddstack']):
apFile.removeStack(self.params['oddstack'])
classes = self.getClassData(self.params['reconid'], self.params['iter'])
stackid = apStack.getStackIdFromRecon(self.params['reconid'])
stackdata = apStack.getOnlyStackData(stackid)
stackpath = os.path.join(stackdata['path']['path'], stackdata['name'])
classkeys = classes.keys()
classkeys.sort()
classnum=0
keeplist = self.procKeepList()
finallist = []
apDisplay.printMsg("Processing "+str(len(classes))+" classes")
#loop through classes
for key in classkeys:
classnum+=1
if classnum%10 == 1:
sys.stderr.write("\b\b\b\b\b\b\b\b\b\b\b\b\b\b")
sys.stderr.write(str(classnum)+" of "+(str(len(classkeys))))
# loop through particles in class
classfile = self.rootname+"-class.lst"
classf = open(classfile, 'w')
classf.write('#LST\n')
nptcls=0
for ptcl in classes[key]['particles']:
# translate DB into EMAN
partnum = ptcl['particle']['particleNumber'] - 1
if partnum in keeplist:
if ptcl['mirror']:
mirror=1
else:
mirror=0
rot = ptcl['euler3']*math.pi/180.0
classf.write(
"%d\t%s\t%f,\t%f,%f,%f,%d\n" %
(partnum, stackpath, ptcl['quality_factor'],
rot, ptcl['shiftx'], ptcl['shifty'], mirror))
nptcls+=1
finallist.append(partnum)
classf.close()
if nptcls<1:
continue
self.makeClassAverages(classfile, self.params['outputstack'], classes[key], self.params['mask'])
if self.params['eotest'] is True:
self.makeEvenOddClasses(classfile, self.params['outputstack'], classes[key], self.params['mask'])
apFile.removeFile(classfile)
sys.stderr.write("\n")
finalfilename = self.rootname+"-keep.lst"
finalf = open(finalfilename, 'w')
finallist.sort()
for partnum in finallist:
finalf.write('%d\n' % (partnum,) )
finalf.close()
stackstr = str(stackdata.dbid)
reconstr = str(self.params['reconid'])
### recon 3d volumes
threedname = os.path.join(self.params['rundir'], self.rootname+"."+str(self.params['iter'])+"a.mrc")
emancmd = ( "make3d "+self.params['outputstack']+" out="
+threedname+" hard=50 sym="+self.params['symmname']+" pad=240 mask="+str(self.params['mask'])+"; echo ''" )
#print emancmd
apEMAN.executeEmanCmd(emancmd, verbose=False, showcmd=True, logfile=self.rootname+"-eman.log")
threednameb = os.path.join(self.params['rundir'], self.rootname+"."+str(self.params['iter'])+"b.mrc")
emancmd = ( "proc3d "+threedname+" "+threednameb
+" apix=1.63 norm=0,1 lp=8 origin=0,0,0 mask="+str(self.params['mask'])+"; echo '' " )
apEMAN.executeEmanCmd(emancmd, verbose=False, showcmd=True, logfile=self.rootname+"-eman.log")
if self.params['eotest'] is True:
# even
evenname = os.path.join(self.params['rundir'], self.rootname+"-even."+str(self.params['iter'])+"a.mrc")
if os.path.isfile(self.params['evenstack']):
evenemancmd = ( "make3d "+self.params['evenstack']+" out="
+evenname+" hard=50 sym="+self.params['symmname']+" pad=240 mask="+str(self.params['mask'])+"; echo ''" )
#print evenemancmd
apEMAN.executeEmanCmd(evenemancmd, verbose=False, showcmd=True, logfile=self.rootname+"-eveneman.log")
else:
apDisplay.printWarning("file "+self.params['evenstack']+" does not exist")
# odd
oddname = os.path.join(self.params['rundir'], self.rootname+"-odd."+str(self.params['iter'])+"a.mrc")
if os.path.isfile(self.params['oddstack']):
oddemancmd = ( "make3d "+self.params['oddstack']+" out="
+oddname+" hard=50 sym="+self.params['symmname']+" pad=240 mask="+str(self.params['mask'])+"; echo ''" )
#print oddemancmd
apEMAN.executeEmanCmd(oddemancmd, verbose=False, showcmd=True, logfile=self.rootname+"-oddeman.log")
else:
apDisplay.printWarning("file "+self.params['oddstack']+" does not exist")
#eotest
fscout = os.path.join(self.params['rundir'], self.rootname+"-fsc.eotest")
if os.path.isfile(oddname) and os.path.isfile(evenname):
eotestcmd = "proc3d "+oddname+" "+evenname+" fsc="+fscout
apEMAN.executeEmanCmd(eotestcmd, verbose=True, showcmd=True)
else:
apDisplay.printWarning("could not perform eotest")
if os.path.isfile(fscout):
res = apRecon.getResolutionFromFSCFile(fscout, 160.0, 1.63)
apDisplay.printColor( ("resolution: %.5f" % (res)), "cyan")
resfile = self.rootname+"-res.txt"
f = open(resfile, 'a')
f.write("[ %s ]\nresolution: %.5f\n" % (time.asctime(), res))
f.close()
#=====================
#=====================
if __name__ == '__main__':
satavg = satAverageScript()
satavg.start()
satavg.close()
|
normal
|
{
"blob_id": "49887a3914fa0021a03d89721aa47cded95d54f6",
"index": 9605,
"step-1": "#!/usr/bin/env python\n\n#python\nimport os\nimport math\nimport sys\nimport time\nimport re\nimport cPickle\nimport random\n#eman\ntry:\n\timport EMAN\nexcept:\n\tprint \"EMAN module did not get imported\"\n#scipy\nimport numpy\n#appion\nfrom appionlib import appionScript\nfrom appionlib import appiondata\nfrom appionlib import apDisplay\nfrom appionlib import apStack\nfrom appionlib import apRecon\nfrom appionlib import apEMAN\nfrom appionlib import apSymmetry\nfrom appionlib import apFile\n\n\n\n#=====================\n#=====================\nclass satAverageScript(appionScript.AppionScript):\n\n\t#=====================\n\tdef makeEvenOddClasses(self, listfile, outputstack, classdata, maskrad):\n\t\tf=open(listfile,'r')\n\t\tf.readline()\n\t\tlines = f.readlines()\n\t\tf.close()\n\t\trandstr = str(int(random.random()*10e5))\n\t\tevenfile = self.rootname+\"-even.lst\"\n\t\tevenf = open(evenfile,'w')\n\t\toddfile = self.rootname+\"-odd.lst\"\n\t\toddf = open(oddfile,'w')\n\t\tevenf.write(\"#LST\\n\")\n\t\toddf.write(\"#LST\\n\")\n\t\tneven=0\n\t\tnodd=0\n\t\tfor i in range(0, len(lines)):\n\t\t\tif i%2:\n\t\t\t\tnodd+=1\n\t\t\t\toddf.write(lines[i])\n\t\t\telse:\n\t\t\t\tneven+=1\n\t\t\t\tevenf.write(lines[i])\n\t\tevenf.close()\n\t\toddf.close()\n\n\t\tif neven>0:\n\t\t\tself.makeClassAverages(evenfile, self.params['evenstack'], classdata, maskrad)\n\t\tif nodd>0:\n\t\t\tself.makeClassAverages(oddfile, self.params['oddstack'], classdata, maskrad)\n\t\tapFile.removeFile(evenfile)\n\t\tapFile.removeFile(oddfile)\n\n\t#=====================\n\tdef getParticleInfo(self, reconid, iteration):\n\t\t\"\"\"\n\t\tGet all particle data for given recon and iteration\n\t\t\"\"\"\n\t\tt0 = time.time()\n\t\tcachefile = os.path.join(self.params['rundir'],\n\t\t\t\"refineparticledata-r\"+str(reconid)+\"-i\"+str(iteration)+\".cache\")\n\t\tif os.path.isfile(cachefile):\n\t\t\tapDisplay.printColor(\"loading refineparticledata from cache file\", \"cyan\")\n\t\t\tf = open(cachefile, 'r')\n\t\t\trefineparticledata = cPickle.load(f)\n\t\t\tf.close()\n\t\telse:\n\t\t\trefinerundata = appiondata.ApRefineRunData.direct_query(reconid)\n\t\t\tif not refinerundata:\n\t\t\t\tapDisplay.printError(\"Could not find refinerundata for reconrun id=\"+str(reconid))\n\n\t\t\trefineq = appiondata.ApRefineIterData()\n\t\t\trefineq['refineRun'] = refinerundata\n\t\t\trefineq['iteration'] = iteration\n\t\t\trefinedata = refineq.query(results=1)\n\n\t\t\tif not refinedata:\n\t\t\t\tapDisplay.printError(\"Could not find refinedata for reconrun id=\"\n\t\t\t\t\t+str(reconid)+\" iter=\"+str(iteration))\n\n\t\t\trefinepartq=appiondata.ApRefineParticleData()\n\t\t\trefinepartq['refineIter']=refinedata[0]\n\n\t\t\tapDisplay.printMsg(\"querying particles on \"+time.asctime())\n\t\t\trefineparticledata = refinepartq.query()\n\t\t\tapDisplay.printMsg(\"saving refineparticledata to cache file\")\n\t\t\tf = open(cachefile, 'w')\n\t\t\tcPickle.dump(refineparticledata, f)\n\t\t\tf.close()\n\n\t\tapDisplay.printMsg(\"received \"+str(len(refineparticledata))+\" particles in \"+apDisplay.timeString(time.time()-t0))\n\t\treturn refineparticledata\n\n\t#=====================\n\tdef procKeepList(self):\n\t\t\"\"\"\n\t\tRemoves particles by reading a list of particle numbers generated externally.\n\n\t\tRequirements:\n\t\t\tthe input file has one particle per line\n\t\t\tthe first piece of data is the particle number from the db\n\t\t\"\"\"\n\t\tkeeplist = []\n\t\tf = open(self.params['keeplist'], 'r')\n\t\tlines = f.readlines()\n\t\tf.close()\n\t\tfor n in lines:\n\t\t\twords = n.split()\n\t\t\tkeeplist.append(int(words[0])+1)\n\t\treturn keeplist\n\n\t#=====================\n\tdef makeClassAverages(self, classlist, outputstack, classdata, maskrad):\n\t\t#align images in class\n\t\t#print classlist\n\t\timages = EMAN.readImages(classlist, -1, -1, 0)\n\t\tfor image in images:\n\t\t\timage.rotateAndTranslate()\n\t\t\tif image.isFlipped():\n\t\t\t\timage.hFlip()\n\n\t\t#make class average\n\t\tavg = EMAN.EMData()\n\t\tavg.makeMedian(images)\n\n\t\t#write class average\n\t\te = EMAN.Euler()\n\t\talt = classdata['euler1']*math.pi/180\n\t\taz = classdata['euler2']*math.pi/180\n\t\tphi = 0.0 #classdata['euler3']*math.pi/180\n\t\te.setAngle(alt, az, phi)\n\t\tavg.setRAlign(e)\n\t\tavg.setNImg(len(images))\n\t\tavg.applyMask(maskrad, 0)\n\n\t\tavg.writeImage(outputstack,-1)\n\n\t#=====================\n\tdef determineClasses(self, particles):\n\t\t\"\"\"\n\t\tTakes refineparticledata and returns a dictionary of classes\n\t\t\"\"\"\n\t\tapDisplay.printMsg(\"sorting refineparticledata into classes\")\n\t\tt0 = time.time()\n\t\tclasses={}\n\t\tclass_stats={}\n\t\tquality=numpy.zeros(len(particles))\n\t\tfor partnum in range(len(particles)):\n\t\t\tquality[partnum] = particles[partnum]['quality_factor']\n\t\t\tkey = (\"%.3f_%.3f\"%(particles[partnum]['euler1'], particles[partnum]['euler2']))\n\t\t\tif key not in classes.keys():\n\t\t\t\tclasses[key]={}\n\t\t\t\tclasses[key]['particles']=[]\n\t\t\t\tclasses[key]['euler1'] = particles[partnum]['euler1']\n\t\t\t\tclasses[key]['euler2'] = particles[partnum]['euler2']\n\t\t\t\t#classes have no inplane rotation\n\t\t\t\tclasses[key]['euler3'] = 0.0 #particles[partnum]['euler3']\n\t\t\tclasses[key]['particles'].append(particles[partnum])\n\t\tclass_stats['meanquality']=quality.mean()\n\t\tclass_stats['stdquality']=quality.std()\n\t\tclass_stats['max']=quality.max()\n\t\tclass_stats['min']=quality.min()\n\t\tapDisplay.printMsg(\"sorted %d particles into %d classes\"%(len(particles), len(classes)))\n\t\t### print stats\n\t\tprint \"-- quality factor stats --\"\n\t\tprint (\"mean/std :: \"+str(round(class_stats['meanquality'],2))+\" +/- \"\n\t\t\t+str(round(class_stats['stdquality'],2)))\n\t\tprint (\"min/max :: \"+str(round(class_stats['min'],2))+\" <> \"\n\t\t\t+str(round(class_stats['max'],2)))\n\t\tapDisplay.printMsg(\"finished sorting in \"+apDisplay.timeString(time.time()-t0))\n\t\treturn classes, class_stats\n\n\t#=====================\n\tdef getClassData(self, reconid, iternum):\n\t\tt0 = time.time()\n\t\tcachefile = os.path.join(self.params['rundir'],\n\t\t\t\"partclassdata-r\"+str(reconid)+\"-i\"+str(iternum)+\".cache\")\n\t\tif os.path.isfile(cachefile):\n\t\t\tapDisplay.printColor(\"loading particle class data from cache file\", \"cyan\")\n\t\t\tf = open(cachefile, 'r')\n\t\t\tclasses = cPickle.load(f)\n\t\t\tf.close()\n\t\telse:\n\t\t\tapDisplay.printMsg(\"determine particle class data from database\")\n\t\t\tparticles = self.getParticleInfo(reconid, iternum)\n\t\t\tclasses, cstats = self.determineClasses(particles)\n\t\t\tf = open(cachefile, 'w')\n\t\t\tapDisplay.printMsg(\"saving particle class data to cache file\")\n\t\t\tcPickle.dump(classes, f)\n\t\t\tf.close()\n\t\tapDisplay.printMsg(\"received \"+str(len(classes))+\" classes in \"+apDisplay.timeString(time.time()-t0))\n\t\treturn classes\n\n\n\t#######################################################\n\t#### ITEMS BELOW CAN BE SPECIFIED IN A NEW PROGRAM ####\n\t#######################################################\n\n\t#=====================\n\tdef setupParserOptions(self):\n\t\tself.parser.set_usage(\"Usage: %prog --reconid=<DEF_id> --iter=<iter> --mask=<radius>\\n\\t \"\n\t\t\t+\"[ --stackname=<name> \"\n\t\t\t+\" --avgjump=<avg> --sigma=<sigma> --eotest ]\")\n\t\tself.parser.add_option(\"-r\", \"--reconid\", dest=\"reconid\", type=\"int\",\n\t\t\thelp=\"Reconstruction run id\", metavar=\"INT\")\n\t\tself.parser.add_option(\"-m\", \"--mask\", dest=\"mask\", type=\"int\",\n\t\t\thelp=\"Mask radius in pixels\", metavar=\"INT\")\n\t\tself.parser.add_option(\"-i\", \"--iter\", dest=\"iter\", type=\"int\",\n\t\t\thelp=\"Final eulers applied to particles will come from this iteration\", metavar=\"INT\")\n\t\tself.parser.add_option(\"--stackname\", dest=\"stackname\", default=\"goodavgs.hed\",\n\t\t\thelp=\"Name of the stack to write the averages\", metavar=\"file.hed\")\n\t\tself.parser.add_option(\"--keep-list\", dest=\"keeplist\",\n\t\t\thelp=\"Keep particles in the specified text file, EMAN style 0,1,...\", metavar=\"TEXT\")\n\t\tself.parser.add_option(\"--eotest\", dest=\"eotest\", default=False,\n\t\t\taction=\"store_true\", help=\"Perform even/odd test\")\n\n\t#=====================\n\tdef checkConflicts(self):\n\t\tif self.params['reconid'] is None:\n\t\t\tapDisplay.printError(\"enter a reconstruction ID from the database\")\n\t\tif self.params['mask'] is None:\n\t\t\tapDisplay.printError(\"enter a mask radius\")\n\t\tif self.params['iter'] is None:\n\t\t\tapDisplay.printError(\"enter an iteration for the final Eulers\")\n\t\tif self.params['keeplist'] is None:\n\t\t\tapDisplay.printError(\"enter an keep list file\")\n\t\tself.params['keeplist'] = os.path.abspath(self.params['keeplist'])\n\t\tif not os.path.isfile(self.params['keeplist']):\n\t\t\tapDisplay.printError(\"could not find list file\")\n\t\tself.params['stackid'] = apStack.getStackIdFromRecon(self.params['reconid'])\n\t\tif self.params['stackname'][-4:] != \".hed\":\n\t\t\ts = os.path.splitext(self.params['stackname'])[0]\n\t\t\ts += \".hed\"\n\t\t\tself.params['stackname'] = s\n\t\tapDisplay.printMsg(\"Stack name: \"+self.params['stackname'])\n\t\tself.params['symmetry'] = apSymmetry.getSymmetryFromReconRunId(self.params['reconid'])\n\t\tself.params['symmname'] = self.params['symmetry']['eman_name']\n\n\t#=====================\n\tdef setRunDir(self):\n\t\trefdata = appiondata.ApRefineRunData.direct_query(self.params['reconid'])\n\t\tif not refdata:\n\t\t\tapDisplay.printError(\"reconid \"+str(self.params['reconid'])+\" does not exist in the database\")\n\t\trefpath = refdata['path']['path']\n\t\trundir = os.path.join(refpath, \"../../satEuler/sat-recon%d/volumes\"%(self.params['reconid']))\n\t\tself.params['rundir'] = os.path.abspath(rundir)\n\n\t#=====================\n\tdef start(self):\n\t\tself.rootname = self.params['stackname'].split(\".\")[0]\n\t\tself.params['outputstack'] = os.path.join(self.params['rundir'], self.params['stackname'])\n\n\t\tif os.path.isfile(self.params['outputstack']):\n\t\t\tapFile.removeStack(self.params['outputstack'])\n\t\tif self.params['eotest'] is True:\n\t\t\tself.params['evenstack'] = os.path.splitext(self.params['outputstack'])[0]+'.even.hed'\n\t\t\tif os.path.isfile(self.params['evenstack']):\n\t\t\t\tapFile.removeStack(self.params['evenstack'])\n\t\t\tself.params['oddstack'] = os.path.splitext(self.params['outputstack'])[0]+'.odd.hed'\n\t\t\tif os.path.isfile(self.params['oddstack']):\n\t\t\t\tapFile.removeStack(self.params['oddstack'])\n\n\t\tclasses = self.getClassData(self.params['reconid'], self.params['iter'])\n\t\tstackid = apStack.getStackIdFromRecon(self.params['reconid'])\n\t\tstackdata = apStack.getOnlyStackData(stackid)\n\t\tstackpath = os.path.join(stackdata['path']['path'], stackdata['name'])\n\n\t\tclasskeys = classes.keys()\n\t\tclasskeys.sort()\n\n\t\tclassnum=0\n\t\tkeeplist = self.procKeepList()\n\t\tfinallist = []\n\t\tapDisplay.printMsg(\"Processing \"+str(len(classes))+\" classes\")\n\t\t#loop through classes\n\t\tfor key in classkeys:\n\t\t\tclassnum+=1\n\t\t\tif classnum%10 == 1:\n\t\t\t\tsys.stderr.write(\"\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b\")\n\t\t\t\tsys.stderr.write(str(classnum)+\" of \"+(str(len(classkeys))))\n\n\t\t\t# loop through particles in class\n\t\t\tclassfile = self.rootname+\"-class.lst\"\n\t\t\tclassf = open(classfile, 'w')\n\t\t\tclassf.write('#LST\\n')\n\t\t\tnptcls=0\n\t\t\tfor ptcl in classes[key]['particles']:\n\t\t\t\t# translate DB into EMAN\n\t\t\t\tpartnum = ptcl['particle']['particleNumber'] - 1\n\t\t\t\tif partnum in keeplist:\n\t\t\t\t\tif ptcl['mirror']:\n\t\t\t\t\t\tmirror=1\n\t\t\t\t\telse:\n\t\t\t\t\t\tmirror=0\n\t\t\t\t\trot = ptcl['euler3']*math.pi/180.0\n\t\t\t\t\tclassf.write(\n\t\t\t\t\t\t\"%d\\t%s\\t%f,\\t%f,%f,%f,%d\\n\" %\n\t\t\t\t\t\t(partnum, stackpath, ptcl['quality_factor'],\n\t\t\t\t\t\trot, ptcl['shiftx'], ptcl['shifty'], mirror))\n\t\t\t\t\tnptcls+=1\n\t\t\t\t\tfinallist.append(partnum)\n\t\t\tclassf.close()\n\n\t\t\tif nptcls<1:\n\t\t\t\tcontinue\n\t\t\tself.makeClassAverages(classfile, self.params['outputstack'], classes[key], self.params['mask'])\n\t\t\tif self.params['eotest'] is True:\n\t\t\t\tself.makeEvenOddClasses(classfile, self.params['outputstack'], classes[key], self.params['mask'])\n\n\t\t\tapFile.removeFile(classfile)\n\n\t\tsys.stderr.write(\"\\n\")\n\t\tfinalfilename = self.rootname+\"-keep.lst\"\n\t\tfinalf = open(finalfilename, 'w')\n\t\tfinallist.sort()\n\t\tfor partnum in finallist:\n\t\t\tfinalf.write('%d\\n' % (partnum,) )\n\t\tfinalf.close()\n\t\tstackstr = str(stackdata.dbid)\n\t\treconstr = str(self.params['reconid'])\n\n\t\t### recon 3d volumes\n\t\tthreedname = os.path.join(self.params['rundir'], self.rootname+\".\"+str(self.params['iter'])+\"a.mrc\")\n\t\temancmd = ( \"make3d \"+self.params['outputstack']+\" out=\"\n\t\t\t+threedname+\" hard=50 sym=\"+self.params['symmname']+\" pad=240 mask=\"+str(self.params['mask'])+\"; echo ''\" )\n\t\t#print emancmd\n\t\tapEMAN.executeEmanCmd(emancmd, verbose=False, showcmd=True, logfile=self.rootname+\"-eman.log\")\n\t\tthreednameb = os.path.join(self.params['rundir'], self.rootname+\".\"+str(self.params['iter'])+\"b.mrc\")\n\t\temancmd = ( \"proc3d \"+threedname+\" \"+threednameb\n\t\t\t+\" apix=1.63 norm=0,1 lp=8 origin=0,0,0 mask=\"+str(self.params['mask'])+\"; echo '' \" )\n\t\tapEMAN.executeEmanCmd(emancmd, verbose=False, showcmd=True, logfile=self.rootname+\"-eman.log\")\n\t\tif self.params['eotest'] is True:\n\t\t\t# even\n\t\t\tevenname = os.path.join(self.params['rundir'], self.rootname+\"-even.\"+str(self.params['iter'])+\"a.mrc\")\n\t\t\tif os.path.isfile(self.params['evenstack']):\n\t\t\t\tevenemancmd = ( \"make3d \"+self.params['evenstack']+\" out=\"\n\t\t\t\t\t+evenname+\" hard=50 sym=\"+self.params['symmname']+\" pad=240 mask=\"+str(self.params['mask'])+\"; echo ''\" )\n\t\t\t\t#print evenemancmd\n\t\t\t\tapEMAN.executeEmanCmd(evenemancmd, verbose=False, showcmd=True, logfile=self.rootname+\"-eveneman.log\")\n\t\t\telse:\n\t\t\t\tapDisplay.printWarning(\"file \"+self.params['evenstack']+\" does not exist\")\n\n\t\t\t# odd\n\t\t\toddname = os.path.join(self.params['rundir'], self.rootname+\"-odd.\"+str(self.params['iter'])+\"a.mrc\")\n\t\t\tif os.path.isfile(self.params['oddstack']):\n\t\t\t\toddemancmd = ( \"make3d \"+self.params['oddstack']+\" out=\"\n\t\t\t\t\t+oddname+\" hard=50 sym=\"+self.params['symmname']+\" pad=240 mask=\"+str(self.params['mask'])+\"; echo ''\" )\n\t\t\t\t#print oddemancmd\n\t\t\t\tapEMAN.executeEmanCmd(oddemancmd, verbose=False, showcmd=True, logfile=self.rootname+\"-oddeman.log\")\n\t\t\telse:\n\t\t\t\tapDisplay.printWarning(\"file \"+self.params['oddstack']+\" does not exist\")\n\n\t\t\t#eotest\n\t\t\tfscout = os.path.join(self.params['rundir'], self.rootname+\"-fsc.eotest\")\n\t\t\tif os.path.isfile(oddname) and os.path.isfile(evenname):\n\t\t\t\teotestcmd = \"proc3d \"+oddname+\" \"+evenname+\" fsc=\"+fscout\n\t\t\t\tapEMAN.executeEmanCmd(eotestcmd, verbose=True, showcmd=True)\n\t\t\telse:\n\t\t\t\tapDisplay.printWarning(\"could not perform eotest\")\n\n\t\t\tif os.path.isfile(fscout):\n\t\t\t\tres = apRecon.getResolutionFromFSCFile(fscout, 160.0, 1.63)\n\t\t\t\tapDisplay.printColor( (\"resolution: %.5f\" % (res)), \"cyan\")\n\t\t\t\tresfile = self.rootname+\"-res.txt\"\n\t\t\t\tf = open(resfile, 'a')\n\t\t\t\tf.write(\"[ %s ]\\nresolution: %.5f\\n\" % (time.asctime(), res))\n\t\t\t\tf.close()\n\n#=====================\n#=====================\nif __name__ == '__main__':\n\tsatavg = satAverageScript()\n\tsatavg.start()\n\tsatavg.close()\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import inspect
import json
import os
import re
import urllib.request
from functools import wraps
from ..errors import NotFoundError
class API:
def __init__(self, base_url, version=1):
self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)
self.PROFILE = self.BASE + '/player'
self.CLUB = self.BASE + '/club'
self.LEADERBOARD = self.BASE + '/leaderboards'
self.EVENTS = self.BASE + '/events'
self.MISC = self.BASE + '/misc'
self.BATTLELOG = self.PROFILE + '/battlelog'
self.CLUB_SEARCH = self.CLUB + '/search'
self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'
# self.BRAWLERS = [
# 'shelly', 'nita', 'colt', 'bull', 'jessie', # league reward 0-500
# 'brock', 'dynamike', 'bo', 'tick', '8-bit' # league reward 1000+
# 'el primo', 'barley', 'poco', 'rosa', # rare
# 'rico', 'penny', 'darryl', 'carl', # super rare
# 'frank', 'pam', 'piper', 'bibi', # epic
# 'mortis', 'tara', 'gene', # mythic
# 'spike', 'crow', 'leon' # legendary
# ]
path = os.path.join(os.path.dirname(__file__), os.path.pardir)
with open(os.path.join(path, '__init__.py')) as f:
self.VERSION = re.search(r'^__version__ = [\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
try:
data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())
except (TypeError, urllib.error.HTTPError, urllib.error.URLError):
self.BRAWLERS = {}
else:
if data:
self.BRAWLERS = {b['tID'].lower(): str(b['scId'])[:2] + '0' + str(b['scId'])[2:] for b in data['characters'] if b['tID']}
else:
self.BRAWLERS = {}
def bstag(tag):
tag = tag.strip('#').upper().replace('O', '0')
allowed = '0289PYLQGRJCUV'
if len(tag) < 3:
raise NotFoundError('Tag less than 3 characters.', 404)
invalid = [c for c in tag if c not in allowed]
if invalid:
raise NotFoundError(invalid, 404)
return tag
def typecasted(func):
'''Decorator that converts arguments via annotations.
Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11'''
signature = inspect.signature(func).parameters.items()
@wraps(func)
def wrapper(*args, **kwargs):
args = list(args)
new_args = []
new_kwargs = {}
for _, param in signature:
converter = param.annotation
if converter is inspect._empty:
converter = lambda a: a # do nothing
if param.kind is param.POSITIONAL_OR_KEYWORD:
if args:
to_conv = args.pop(0)
new_args.append(converter(to_conv))
elif param.kind is param.VAR_POSITIONAL:
for a in args:
new_args.append(converter(a))
else:
for k, v in kwargs.items():
nk, nv = converter(k, v)
new_kwargs[nk] = nv
return func(*new_args, **new_kwargs)
return wrapper
|
normal
|
{
"blob_id": "3f3db7e8813f49fe0265e110236b6dc4fed6cd1b",
"index": 7214,
"step-1": "<mask token>\n\n\nclass API:\n\n def __init__(self, base_url, version=1):\n self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)\n self.PROFILE = self.BASE + '/player'\n self.CLUB = self.BASE + '/club'\n self.LEADERBOARD = self.BASE + '/leaderboards'\n self.EVENTS = self.BASE + '/events'\n self.MISC = self.BASE + '/misc'\n self.BATTLELOG = self.PROFILE + '/battlelog'\n self.CLUB_SEARCH = self.CLUB + '/search'\n self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'\n path = os.path.join(os.path.dirname(__file__), os.path.pardir)\n with open(os.path.join(path, '__init__.py')) as f:\n self.VERSION = re.search('^__version__ = [\\\\\\'\"]([^\\\\\\'\"]*)[\\\\\\'\"]'\n , f.read(), re.MULTILINE).group(1)\n try:\n data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())\n except (TypeError, urllib.error.HTTPError, urllib.error.URLError):\n self.BRAWLERS = {}\n else:\n if data:\n self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +\n '0' + str(b['scId'])[2:]) for b in data['characters'] if\n b['tID']}\n else:\n self.BRAWLERS = {}\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass API:\n\n def __init__(self, base_url, version=1):\n self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)\n self.PROFILE = self.BASE + '/player'\n self.CLUB = self.BASE + '/club'\n self.LEADERBOARD = self.BASE + '/leaderboards'\n self.EVENTS = self.BASE + '/events'\n self.MISC = self.BASE + '/misc'\n self.BATTLELOG = self.PROFILE + '/battlelog'\n self.CLUB_SEARCH = self.CLUB + '/search'\n self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'\n path = os.path.join(os.path.dirname(__file__), os.path.pardir)\n with open(os.path.join(path, '__init__.py')) as f:\n self.VERSION = re.search('^__version__ = [\\\\\\'\"]([^\\\\\\'\"]*)[\\\\\\'\"]'\n , f.read(), re.MULTILINE).group(1)\n try:\n data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())\n except (TypeError, urllib.error.HTTPError, urllib.error.URLError):\n self.BRAWLERS = {}\n else:\n if data:\n self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +\n '0' + str(b['scId'])[2:]) for b in data['characters'] if\n b['tID']}\n else:\n self.BRAWLERS = {}\n\n\n<mask token>\n\n\ndef typecasted(func):\n \"\"\"Decorator that converts arguments via annotations.\n Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11\"\"\"\n signature = inspect.signature(func).parameters.items()\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n args = list(args)\n new_args = []\n new_kwargs = {}\n for _, param in signature:\n converter = param.annotation\n if converter is inspect._empty:\n converter = lambda a: a\n if param.kind is param.POSITIONAL_OR_KEYWORD:\n if args:\n to_conv = args.pop(0)\n new_args.append(converter(to_conv))\n elif param.kind is param.VAR_POSITIONAL:\n for a in args:\n new_args.append(converter(a))\n else:\n for k, v in kwargs.items():\n nk, nv = converter(k, v)\n new_kwargs[nk] = nv\n return func(*new_args, **new_kwargs)\n return wrapper\n",
"step-3": "<mask token>\n\n\nclass API:\n\n def __init__(self, base_url, version=1):\n self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)\n self.PROFILE = self.BASE + '/player'\n self.CLUB = self.BASE + '/club'\n self.LEADERBOARD = self.BASE + '/leaderboards'\n self.EVENTS = self.BASE + '/events'\n self.MISC = self.BASE + '/misc'\n self.BATTLELOG = self.PROFILE + '/battlelog'\n self.CLUB_SEARCH = self.CLUB + '/search'\n self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'\n path = os.path.join(os.path.dirname(__file__), os.path.pardir)\n with open(os.path.join(path, '__init__.py')) as f:\n self.VERSION = re.search('^__version__ = [\\\\\\'\"]([^\\\\\\'\"]*)[\\\\\\'\"]'\n , f.read(), re.MULTILINE).group(1)\n try:\n data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())\n except (TypeError, urllib.error.HTTPError, urllib.error.URLError):\n self.BRAWLERS = {}\n else:\n if data:\n self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +\n '0' + str(b['scId'])[2:]) for b in data['characters'] if\n b['tID']}\n else:\n self.BRAWLERS = {}\n\n\ndef bstag(tag):\n tag = tag.strip('#').upper().replace('O', '0')\n allowed = '0289PYLQGRJCUV'\n if len(tag) < 3:\n raise NotFoundError('Tag less than 3 characters.', 404)\n invalid = [c for c in tag if c not in allowed]\n if invalid:\n raise NotFoundError(invalid, 404)\n return tag\n\n\ndef typecasted(func):\n \"\"\"Decorator that converts arguments via annotations.\n Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11\"\"\"\n signature = inspect.signature(func).parameters.items()\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n args = list(args)\n new_args = []\n new_kwargs = {}\n for _, param in signature:\n converter = param.annotation\n if converter is inspect._empty:\n converter = lambda a: a\n if param.kind is param.POSITIONAL_OR_KEYWORD:\n if args:\n to_conv = args.pop(0)\n new_args.append(converter(to_conv))\n elif param.kind is param.VAR_POSITIONAL:\n for a in args:\n new_args.append(converter(a))\n else:\n for k, v in kwargs.items():\n nk, nv = converter(k, v)\n new_kwargs[nk] = nv\n return func(*new_args, **new_kwargs)\n return wrapper\n",
"step-4": "import inspect\nimport json\nimport os\nimport re\nimport urllib.request\nfrom functools import wraps\nfrom ..errors import NotFoundError\n\n\nclass API:\n\n def __init__(self, base_url, version=1):\n self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)\n self.PROFILE = self.BASE + '/player'\n self.CLUB = self.BASE + '/club'\n self.LEADERBOARD = self.BASE + '/leaderboards'\n self.EVENTS = self.BASE + '/events'\n self.MISC = self.BASE + '/misc'\n self.BATTLELOG = self.PROFILE + '/battlelog'\n self.CLUB_SEARCH = self.CLUB + '/search'\n self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'\n path = os.path.join(os.path.dirname(__file__), os.path.pardir)\n with open(os.path.join(path, '__init__.py')) as f:\n self.VERSION = re.search('^__version__ = [\\\\\\'\"]([^\\\\\\'\"]*)[\\\\\\'\"]'\n , f.read(), re.MULTILINE).group(1)\n try:\n data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())\n except (TypeError, urllib.error.HTTPError, urllib.error.URLError):\n self.BRAWLERS = {}\n else:\n if data:\n self.BRAWLERS = {b['tID'].lower(): (str(b['scId'])[:2] +\n '0' + str(b['scId'])[2:]) for b in data['characters'] if\n b['tID']}\n else:\n self.BRAWLERS = {}\n\n\ndef bstag(tag):\n tag = tag.strip('#').upper().replace('O', '0')\n allowed = '0289PYLQGRJCUV'\n if len(tag) < 3:\n raise NotFoundError('Tag less than 3 characters.', 404)\n invalid = [c for c in tag if c not in allowed]\n if invalid:\n raise NotFoundError(invalid, 404)\n return tag\n\n\ndef typecasted(func):\n \"\"\"Decorator that converts arguments via annotations.\n Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11\"\"\"\n signature = inspect.signature(func).parameters.items()\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n args = list(args)\n new_args = []\n new_kwargs = {}\n for _, param in signature:\n converter = param.annotation\n if converter is inspect._empty:\n converter = lambda a: a\n if param.kind is param.POSITIONAL_OR_KEYWORD:\n if args:\n to_conv = args.pop(0)\n new_args.append(converter(to_conv))\n elif param.kind is param.VAR_POSITIONAL:\n for a in args:\n new_args.append(converter(a))\n else:\n for k, v in kwargs.items():\n nk, nv = converter(k, v)\n new_kwargs[nk] = nv\n return func(*new_args, **new_kwargs)\n return wrapper\n",
"step-5": "import inspect\nimport json\nimport os\nimport re\nimport urllib.request\nfrom functools import wraps\n\nfrom ..errors import NotFoundError\n\n\nclass API:\n def __init__(self, base_url, version=1):\n self.BASE = base_url or 'https://api.starlist.pro/v{}'.format(version)\n self.PROFILE = self.BASE + '/player'\n self.CLUB = self.BASE + '/club'\n self.LEADERBOARD = self.BASE + '/leaderboards'\n self.EVENTS = self.BASE + '/events'\n self.MISC = self.BASE + '/misc'\n self.BATTLELOG = self.PROFILE + '/battlelog'\n self.CLUB_SEARCH = self.CLUB + '/search'\n self.CONSTANTS = 'https://fourjr.herokuapp.com/bs/constants/'\n # self.BRAWLERS = [\n # 'shelly', 'nita', 'colt', 'bull', 'jessie', # league reward 0-500\n # 'brock', 'dynamike', 'bo', 'tick', '8-bit' # league reward 1000+\n # 'el primo', 'barley', 'poco', 'rosa', # rare\n # 'rico', 'penny', 'darryl', 'carl', # super rare\n # 'frank', 'pam', 'piper', 'bibi', # epic\n # 'mortis', 'tara', 'gene', # mythic\n # 'spike', 'crow', 'leon' # legendary\n # ]\n\n path = os.path.join(os.path.dirname(__file__), os.path.pardir)\n with open(os.path.join(path, '__init__.py')) as f:\n self.VERSION = re.search(r'^__version__ = [\\'\"]([^\\'\"]*)[\\'\"]', f.read(), re.MULTILINE).group(1)\n\n try:\n data = json.loads(urllib.request.urlopen(self.CONSTANTS).read())\n except (TypeError, urllib.error.HTTPError, urllib.error.URLError):\n self.BRAWLERS = {}\n else:\n if data:\n self.BRAWLERS = {b['tID'].lower(): str(b['scId'])[:2] + '0' + str(b['scId'])[2:] for b in data['characters'] if b['tID']}\n else:\n self.BRAWLERS = {}\n\n\ndef bstag(tag):\n tag = tag.strip('#').upper().replace('O', '0')\n allowed = '0289PYLQGRJCUV'\n if len(tag) < 3:\n raise NotFoundError('Tag less than 3 characters.', 404)\n invalid = [c for c in tag if c not in allowed]\n if invalid:\n raise NotFoundError(invalid, 404)\n return tag\n\ndef typecasted(func):\n '''Decorator that converts arguments via annotations.\n Source: https://github.com/cgrok/clashroyale/blob/master/clashroyale/official_api/utils.py#L11'''\n signature = inspect.signature(func).parameters.items()\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n args = list(args)\n new_args = []\n new_kwargs = {}\n for _, param in signature:\n converter = param.annotation\n if converter is inspect._empty:\n converter = lambda a: a # do nothing\n if param.kind is param.POSITIONAL_OR_KEYWORD:\n if args:\n to_conv = args.pop(0)\n new_args.append(converter(to_conv))\n elif param.kind is param.VAR_POSITIONAL:\n for a in args:\n new_args.append(converter(a))\n else:\n for k, v in kwargs.items():\n nk, nv = converter(k, v)\n new_kwargs[nk] = nv\n return func(*new_args, **new_kwargs)\n return wrapper\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# Generated by Django 2.0.4 on 2018-06-09 05:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lists', '0004_auto_20180608_1835'),
]
operations = [
migrations.AlterModelOptions(
name='todo',
options={'ordering': ('-created_at',)},
),
migrations.AddField(
model_name='todo',
name='content',
field=models.TextField(default='', max_length=500),
),
]
|
normal
|
{
"blob_id": "b27913d2cd29f174d79652af6da2846e397373fc",
"index": 1549,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lists', '0004_auto_20180608_1835')]\n operations = [migrations.AlterModelOptions(name='todo', options={\n 'ordering': ('-created_at',)}), migrations.AddField(model_name=\n 'todo', name='content', field=models.TextField(default='',\n max_length=500))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('lists', '0004_auto_20180608_1835')]\n operations = [migrations.AlterModelOptions(name='todo', options={\n 'ordering': ('-created_at',)}), migrations.AddField(model_name=\n 'todo', name='content', field=models.TextField(default='',\n max_length=500))]\n",
"step-5": "# Generated by Django 2.0.4 on 2018-06-09 05:09\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('lists', '0004_auto_20180608_1835'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='todo',\n options={'ordering': ('-created_at',)},\n ),\n migrations.AddField(\n model_name='todo',\n name='content',\n field=models.TextField(default='', max_length=500),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Jython/Walk_comprehension.py
import os
restFiles = [os.path.join(d[0], f) for d in os.walk(".")
for f in d[2] if f.endswith(".java") and
"PythonInterpreter" in open(os.path.join(d[0], f)).read()]
for r in restFiles:
print(r)
|
normal
|
{
"blob_id": "61085eecc8fd0b70bc11e5a85c3958ba3b905eaf",
"index": 3118,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor r in restFiles:\n print(r)\n",
"step-3": "<mask token>\nrestFiles = [os.path.join(d[0], f) for d in os.walk('.') for f in d[2] if f\n .endswith('.java') and 'PythonInterpreter' in open(os.path.join(d[0], f\n )).read()]\nfor r in restFiles:\n print(r)\n",
"step-4": "import os\nrestFiles = [os.path.join(d[0], f) for d in os.walk('.') for f in d[2] if f\n .endswith('.java') and 'PythonInterpreter' in open(os.path.join(d[0], f\n )).read()]\nfor r in restFiles:\n print(r)\n",
"step-5": "# Jython/Walk_comprehension.py\nimport os\nrestFiles = [os.path.join(d[0], f) for d in os.walk(\".\")\n for f in d[2] if f.endswith(\".java\") and \n \"PythonInterpreter\" in open(os.path.join(d[0], f)).read()]\nfor r in restFiles:\n print(r)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
#read data from file
#read data from file
theFile = open('datapri.txt','r')
temp = []
#n la so phan tu cua mang mau
n = int(theFile.readline().format())
for val in theFile.read().split():
temp.append(int(val))
theFile.close()
arr = np.random.rand(n,n)
k = 0
for i in range(n):
for j in range(n):
arr[i,j] = temp[k]
k = k+1
# print(arr)
#tao 1 mang de chua ma tran cac dinh ke
ke = []
for i in range(n):
ke.append([])
trongso = []
for i in range(n):
trongso.append([])
#dua cac dinh vao mang ke
for i in range(n):
for j in range(n):
if(arr[i,j] != 0):
ke[i].append(j)
trongso[i].append(arr[i,j])
print(trongso[1])
# available = [False for i in range(n)]
# vertex = [0 for i in range(n)]
#
# def CorlorGraph():
# #khoi tao dinh dau tien duoc to mau dau tien
# vertex[0] = 0
#
# #khoi tao cac dinh con lai chua duoc to mau
# for i in range(1,n):
# vertex[i] = -1
#
# #to mau cac dinh con lai
# for i in range(1,n):
# for j in (ke[i]):
# if(vertex[j] != -1):
# available[vertex[j]] = True
#
# crz = 0
# for k in range(n):
# if (available[k] == False):
# break
# crz = crz + 1
# vertex[i] = crz
# for j in (ke[i]):
# if (vertex[j] != -1):
# available[vertex[j]] = False
# for i in range(n):
# print("ke",i,"-",ke[i])
# CorlorGraph()
# print("Cac dinh da duoc to mau: ")
# for i in range(n):
# print(i,vertex[i])
|
normal
|
{
"blob_id": "aa801bc8398cdf69a15d04188dd8429e4624150e",
"index": 5574,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\n<mask token>\nfor i in range(n):\n for j in range(n):\n arr[i, j] = temp[k]\n k = k + 1\n<mask token>\nfor i in range(n):\n ke.append([])\n<mask token>\nfor i in range(n):\n trongso.append([])\nfor i in range(n):\n for j in range(n):\n if arr[i, j] != 0:\n ke[i].append(j)\n trongso[i].append(arr[i, j])\nprint(trongso[1])\n",
"step-3": "<mask token>\ntheFile = open('datapri.txt', 'r')\ntemp = []\nn = int(theFile.readline().format())\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\narr = np.random.rand(n, n)\nk = 0\nfor i in range(n):\n for j in range(n):\n arr[i, j] = temp[k]\n k = k + 1\nke = []\nfor i in range(n):\n ke.append([])\ntrongso = []\nfor i in range(n):\n trongso.append([])\nfor i in range(n):\n for j in range(n):\n if arr[i, j] != 0:\n ke[i].append(j)\n trongso[i].append(arr[i, j])\nprint(trongso[1])\n",
"step-4": "import numpy as np\ntheFile = open('datapri.txt', 'r')\ntemp = []\nn = int(theFile.readline().format())\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\narr = np.random.rand(n, n)\nk = 0\nfor i in range(n):\n for j in range(n):\n arr[i, j] = temp[k]\n k = k + 1\nke = []\nfor i in range(n):\n ke.append([])\ntrongso = []\nfor i in range(n):\n trongso.append([])\nfor i in range(n):\n for j in range(n):\n if arr[i, j] != 0:\n ke[i].append(j)\n trongso[i].append(arr[i, j])\nprint(trongso[1])\n",
"step-5": "import numpy as np\n#read data from file\n#read data from file\n\ntheFile = open('datapri.txt','r')\ntemp = []\n#n la so phan tu cua mang mau\nn = int(theFile.readline().format())\nfor val in theFile.read().split():\n temp.append(int(val))\ntheFile.close()\n\narr = np.random.rand(n,n)\nk = 0\nfor i in range(n):\n for j in range(n):\n arr[i,j] = temp[k]\n k = k+1\n# print(arr)\n#tao 1 mang de chua ma tran cac dinh ke\nke = []\nfor i in range(n):\n ke.append([])\ntrongso = []\nfor i in range(n):\n trongso.append([])\n#dua cac dinh vao mang ke\nfor i in range(n):\n for j in range(n):\n if(arr[i,j] != 0):\n ke[i].append(j)\n trongso[i].append(arr[i,j])\nprint(trongso[1])\n# available = [False for i in range(n)]\n# vertex = [0 for i in range(n)]\n#\n# def CorlorGraph():\n# #khoi tao dinh dau tien duoc to mau dau tien\n# vertex[0] = 0\n#\n# #khoi tao cac dinh con lai chua duoc to mau\n# for i in range(1,n):\n# vertex[i] = -1\n#\n# #to mau cac dinh con lai\n# for i in range(1,n):\n# for j in (ke[i]):\n# if(vertex[j] != -1):\n# available[vertex[j]] = True\n#\n# crz = 0\n# for k in range(n):\n# if (available[k] == False):\n# break\n# crz = crz + 1\n# vertex[i] = crz\n# for j in (ke[i]):\n# if (vertex[j] != -1):\n# available[vertex[j]] = False\n# for i in range(n):\n# print(\"ke\",i,\"-\",ke[i])\n# CorlorGraph()\n# print(\"Cac dinh da duoc to mau: \")\n# for i in range(n):\n# print(i,vertex[i])\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import datetime
from flask import request
from flask_babel import _
from markupsafe import escape
from app import app
from app.data_access.audit_log_controller import create_audit_log_confirmation_entry
from app.data_access.user_controller import user_exists, create_user
from app.data_access.user_controller_errors import UserAlreadyExistsError
from app.elster_client import elster_client
from app.elster_client.elster_errors import ElsterProcessNotSuccessful
from app.forms.flows.multistep_flow import MultiStepFlow
from app.forms.steps.unlock_code_request_steps import UnlockCodeRequestInputStep, UnlockCodeRequestSuccessStep, \
UnlockCodeRequestFailureStep
class UnlockCodeRequestMultiStepFlow(MultiStepFlow):
_DEBUG_DATA = (
UnlockCodeRequestInputStep,
{
'idnr': '04452397687',
'dob': datetime.date(1985, 1, 1),
'registration_confirm_data_privacy': True,
'registration_confirm_terms_of_service': True,
'registration_confirm_incomes': True,
'registration_confirm_e_data': True,
}
)
def __init__(self, endpoint):
super(UnlockCodeRequestMultiStepFlow, self).__init__(
title=_('form.auth-request.title'),
steps=[
UnlockCodeRequestInputStep,
UnlockCodeRequestFailureStep,
UnlockCodeRequestSuccessStep
],
endpoint=endpoint,
)
# TODO: Use inheritance to clean up this method
def _handle_specifics_for_step(self, step, render_info, stored_data):
render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self)._handle_specifics_for_step(step, render_info, stored_data)
if isinstance(step, UnlockCodeRequestInputStep):
render_info.additional_info['next_button_label'] = _('form.register')
if request.method == 'POST' and render_info.form.validate():
create_audit_log_confirmation_entry('Confirmed registration data privacy', request.remote_addr,
stored_data['idnr'], 'registration_confirm_data_privacy',
stored_data['registration_confirm_data_privacy'])
create_audit_log_confirmation_entry('Confirmed registration terms of service', request.remote_addr,
stored_data['idnr'], 'registration_confirm_terms_of_service',
stored_data['registration_confirm_terms_of_service'])
create_audit_log_confirmation_entry('Confirmed registration incomes', request.remote_addr,
stored_data['idnr'], 'registration_confirm_incomes',
stored_data['registration_confirm_incomes'])
create_audit_log_confirmation_entry('Confirmed registration edata', request.remote_addr,
stored_data['idnr'], 'registration_confirm_e_data',
stored_data['registration_confirm_e_data'])
try:
self._register_user(stored_data)
# prevent going to failure page as in normal flow
render_info.next_url = self.url_for_step(UnlockCodeRequestSuccessStep.name)
except (UserAlreadyExistsError, ElsterProcessNotSuccessful):
app.logger.info("Could not request unlock code for user", exc_info=True)
pass # go to failure step
elif isinstance(step, UnlockCodeRequestFailureStep):
render_info.next_url = None
elif isinstance(step, UnlockCodeRequestSuccessStep):
render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep.name)
return render_info, stored_data
@staticmethod
def _register_user(request_form):
"""
This method requests an unlock code with Elster for not registered users. If successful
the users will be registered.
:param request_form: The form attribute of the request. It should contain an idnr and a dob element.
"""
idnr = request_form['idnr']
if user_exists(idnr):
raise UserAlreadyExistsError(idnr)
response = elster_client.send_unlock_code_request_with_elster(request_form, request.remote_addr)
request_id = escape(response['elster_request_id'])
create_user(idnr, request_form['dob'].strftime("%d.%m.%Y"), request_id)
|
normal
|
{
"blob_id": "cddb16a305f74eb1a3f2854208f8508c4a7a8953",
"index": 649,
"step-1": "<mask token>\n\n\nclass UnlockCodeRequestMultiStepFlow(MultiStepFlow):\n <mask token>\n\n def __init__(self, endpoint):\n super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(\n 'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,\n UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],\n endpoint=endpoint)\n <mask token>\n\n @staticmethod\n def _register_user(request_form):\n \"\"\"\n This method requests an unlock code with Elster for not registered users. If successful\n the users will be registered.\n\n :param request_form: The form attribute of the request. It should contain an idnr and a dob element.\n \"\"\"\n idnr = request_form['idnr']\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n response = elster_client.send_unlock_code_request_with_elster(\n request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)\n",
"step-2": "<mask token>\n\n\nclass UnlockCodeRequestMultiStepFlow(MultiStepFlow):\n <mask token>\n\n def __init__(self, endpoint):\n super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(\n 'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,\n UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],\n endpoint=endpoint)\n\n def _handle_specifics_for_step(self, step, render_info, stored_data):\n render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self\n )._handle_specifics_for_step(step, render_info, stored_data)\n if isinstance(step, UnlockCodeRequestInputStep):\n render_info.additional_info['next_button_label'] = _(\n 'form.register')\n if request.method == 'POST' and render_info.form.validate():\n create_audit_log_confirmation_entry(\n 'Confirmed registration data privacy', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_data_privacy', stored_data[\n 'registration_confirm_data_privacy'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration terms of service', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_terms_of_service', stored_data[\n 'registration_confirm_terms_of_service'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration incomes', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_incomes',\n stored_data['registration_confirm_incomes'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration edata', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_e_data',\n stored_data['registration_confirm_e_data'])\n try:\n self._register_user(stored_data)\n render_info.next_url = self.url_for_step(\n UnlockCodeRequestSuccessStep.name)\n except (UserAlreadyExistsError, ElsterProcessNotSuccessful):\n app.logger.info('Could not request unlock code for user',\n exc_info=True)\n pass\n elif isinstance(step, UnlockCodeRequestFailureStep):\n render_info.next_url = None\n elif isinstance(step, UnlockCodeRequestSuccessStep):\n render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep\n .name)\n return render_info, stored_data\n\n @staticmethod\n def _register_user(request_form):\n \"\"\"\n This method requests an unlock code with Elster for not registered users. If successful\n the users will be registered.\n\n :param request_form: The form attribute of the request. It should contain an idnr and a dob element.\n \"\"\"\n idnr = request_form['idnr']\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n response = elster_client.send_unlock_code_request_with_elster(\n request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)\n",
"step-3": "<mask token>\n\n\nclass UnlockCodeRequestMultiStepFlow(MultiStepFlow):\n _DEBUG_DATA = UnlockCodeRequestInputStep, {'idnr': '04452397687', 'dob':\n datetime.date(1985, 1, 1), 'registration_confirm_data_privacy': \n True, 'registration_confirm_terms_of_service': True,\n 'registration_confirm_incomes': True, 'registration_confirm_e_data':\n True}\n\n def __init__(self, endpoint):\n super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(\n 'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,\n UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],\n endpoint=endpoint)\n\n def _handle_specifics_for_step(self, step, render_info, stored_data):\n render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self\n )._handle_specifics_for_step(step, render_info, stored_data)\n if isinstance(step, UnlockCodeRequestInputStep):\n render_info.additional_info['next_button_label'] = _(\n 'form.register')\n if request.method == 'POST' and render_info.form.validate():\n create_audit_log_confirmation_entry(\n 'Confirmed registration data privacy', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_data_privacy', stored_data[\n 'registration_confirm_data_privacy'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration terms of service', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_terms_of_service', stored_data[\n 'registration_confirm_terms_of_service'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration incomes', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_incomes',\n stored_data['registration_confirm_incomes'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration edata', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_e_data',\n stored_data['registration_confirm_e_data'])\n try:\n self._register_user(stored_data)\n render_info.next_url = self.url_for_step(\n UnlockCodeRequestSuccessStep.name)\n except (UserAlreadyExistsError, ElsterProcessNotSuccessful):\n app.logger.info('Could not request unlock code for user',\n exc_info=True)\n pass\n elif isinstance(step, UnlockCodeRequestFailureStep):\n render_info.next_url = None\n elif isinstance(step, UnlockCodeRequestSuccessStep):\n render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep\n .name)\n return render_info, stored_data\n\n @staticmethod\n def _register_user(request_form):\n \"\"\"\n This method requests an unlock code with Elster for not registered users. If successful\n the users will be registered.\n\n :param request_form: The form attribute of the request. It should contain an idnr and a dob element.\n \"\"\"\n idnr = request_form['idnr']\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n response = elster_client.send_unlock_code_request_with_elster(\n request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)\n",
"step-4": "import datetime\nfrom flask import request\nfrom flask_babel import _\nfrom markupsafe import escape\nfrom app import app\nfrom app.data_access.audit_log_controller import create_audit_log_confirmation_entry\nfrom app.data_access.user_controller import user_exists, create_user\nfrom app.data_access.user_controller_errors import UserAlreadyExistsError\nfrom app.elster_client import elster_client\nfrom app.elster_client.elster_errors import ElsterProcessNotSuccessful\nfrom app.forms.flows.multistep_flow import MultiStepFlow\nfrom app.forms.steps.unlock_code_request_steps import UnlockCodeRequestInputStep, UnlockCodeRequestSuccessStep, UnlockCodeRequestFailureStep\n\n\nclass UnlockCodeRequestMultiStepFlow(MultiStepFlow):\n _DEBUG_DATA = UnlockCodeRequestInputStep, {'idnr': '04452397687', 'dob':\n datetime.date(1985, 1, 1), 'registration_confirm_data_privacy': \n True, 'registration_confirm_terms_of_service': True,\n 'registration_confirm_incomes': True, 'registration_confirm_e_data':\n True}\n\n def __init__(self, endpoint):\n super(UnlockCodeRequestMultiStepFlow, self).__init__(title=_(\n 'form.auth-request.title'), steps=[UnlockCodeRequestInputStep,\n UnlockCodeRequestFailureStep, UnlockCodeRequestSuccessStep],\n endpoint=endpoint)\n\n def _handle_specifics_for_step(self, step, render_info, stored_data):\n render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self\n )._handle_specifics_for_step(step, render_info, stored_data)\n if isinstance(step, UnlockCodeRequestInputStep):\n render_info.additional_info['next_button_label'] = _(\n 'form.register')\n if request.method == 'POST' and render_info.form.validate():\n create_audit_log_confirmation_entry(\n 'Confirmed registration data privacy', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_data_privacy', stored_data[\n 'registration_confirm_data_privacy'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration terms of service', request.\n remote_addr, stored_data['idnr'],\n 'registration_confirm_terms_of_service', stored_data[\n 'registration_confirm_terms_of_service'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration incomes', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_incomes',\n stored_data['registration_confirm_incomes'])\n create_audit_log_confirmation_entry(\n 'Confirmed registration edata', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_e_data',\n stored_data['registration_confirm_e_data'])\n try:\n self._register_user(stored_data)\n render_info.next_url = self.url_for_step(\n UnlockCodeRequestSuccessStep.name)\n except (UserAlreadyExistsError, ElsterProcessNotSuccessful):\n app.logger.info('Could not request unlock code for user',\n exc_info=True)\n pass\n elif isinstance(step, UnlockCodeRequestFailureStep):\n render_info.next_url = None\n elif isinstance(step, UnlockCodeRequestSuccessStep):\n render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep\n .name)\n return render_info, stored_data\n\n @staticmethod\n def _register_user(request_form):\n \"\"\"\n This method requests an unlock code with Elster for not registered users. If successful\n the users will be registered.\n\n :param request_form: The form attribute of the request. It should contain an idnr and a dob element.\n \"\"\"\n idnr = request_form['idnr']\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n response = elster_client.send_unlock_code_request_with_elster(\n request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n create_user(idnr, request_form['dob'].strftime('%d.%m.%Y'), request_id)\n",
"step-5": "import datetime\n\nfrom flask import request\nfrom flask_babel import _\nfrom markupsafe import escape\n\nfrom app import app\nfrom app.data_access.audit_log_controller import create_audit_log_confirmation_entry\nfrom app.data_access.user_controller import user_exists, create_user\nfrom app.data_access.user_controller_errors import UserAlreadyExistsError\nfrom app.elster_client import elster_client\nfrom app.elster_client.elster_errors import ElsterProcessNotSuccessful\nfrom app.forms.flows.multistep_flow import MultiStepFlow\nfrom app.forms.steps.unlock_code_request_steps import UnlockCodeRequestInputStep, UnlockCodeRequestSuccessStep, \\\n UnlockCodeRequestFailureStep\n\n\nclass UnlockCodeRequestMultiStepFlow(MultiStepFlow):\n\n _DEBUG_DATA = (\n UnlockCodeRequestInputStep,\n {\n 'idnr': '04452397687',\n 'dob': datetime.date(1985, 1, 1),\n 'registration_confirm_data_privacy': True,\n 'registration_confirm_terms_of_service': True,\n 'registration_confirm_incomes': True,\n 'registration_confirm_e_data': True,\n }\n )\n\n def __init__(self, endpoint):\n super(UnlockCodeRequestMultiStepFlow, self).__init__(\n title=_('form.auth-request.title'),\n steps=[\n UnlockCodeRequestInputStep,\n UnlockCodeRequestFailureStep,\n UnlockCodeRequestSuccessStep\n ],\n endpoint=endpoint,\n )\n\n # TODO: Use inheritance to clean up this method\n def _handle_specifics_for_step(self, step, render_info, stored_data):\n render_info, stored_data = super(UnlockCodeRequestMultiStepFlow, self)._handle_specifics_for_step(step, render_info, stored_data)\n\n if isinstance(step, UnlockCodeRequestInputStep):\n render_info.additional_info['next_button_label'] = _('form.register')\n if request.method == 'POST' and render_info.form.validate():\n create_audit_log_confirmation_entry('Confirmed registration data privacy', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_data_privacy',\n stored_data['registration_confirm_data_privacy'])\n create_audit_log_confirmation_entry('Confirmed registration terms of service', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_terms_of_service',\n stored_data['registration_confirm_terms_of_service'])\n create_audit_log_confirmation_entry('Confirmed registration incomes', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_incomes',\n stored_data['registration_confirm_incomes'])\n create_audit_log_confirmation_entry('Confirmed registration edata', request.remote_addr,\n stored_data['idnr'], 'registration_confirm_e_data',\n stored_data['registration_confirm_e_data'])\n try:\n self._register_user(stored_data)\n # prevent going to failure page as in normal flow\n render_info.next_url = self.url_for_step(UnlockCodeRequestSuccessStep.name)\n except (UserAlreadyExistsError, ElsterProcessNotSuccessful):\n app.logger.info(\"Could not request unlock code for user\", exc_info=True)\n pass # go to failure step\n elif isinstance(step, UnlockCodeRequestFailureStep):\n render_info.next_url = None\n elif isinstance(step, UnlockCodeRequestSuccessStep):\n render_info.prev_url = self.url_for_step(UnlockCodeRequestInputStep.name)\n\n return render_info, stored_data\n\n @staticmethod\n def _register_user(request_form):\n \"\"\"\n This method requests an unlock code with Elster for not registered users. If successful\n the users will be registered.\n\n :param request_form: The form attribute of the request. It should contain an idnr and a dob element.\n \"\"\"\n idnr = request_form['idnr']\n\n if user_exists(idnr):\n raise UserAlreadyExistsError(idnr)\n\n response = elster_client.send_unlock_code_request_with_elster(request_form, request.remote_addr)\n request_id = escape(response['elster_request_id'])\n\n create_user(idnr, request_form['dob'].strftime(\"%d.%m.%Y\"), request_id)\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from django.db import models
class Course(models.Model):
cid = models.CharField(max_length=100)
title = models.CharField(max_length=500)
link = models.CharField(max_length=300)
|
normal
|
{
"blob_id": "226fc85dc8b6d549fddef0ca43ad629875ac0717",
"index": 3080,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Course(models.Model):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Course(models.Model):\n cid = models.CharField(max_length=100)\n title = models.CharField(max_length=500)\n link = models.CharField(max_length=300)\n",
"step-4": "from django.db import models\n\n\nclass Course(models.Model):\n cid = models.CharField(max_length=100)\n title = models.CharField(max_length=500)\n link = models.CharField(max_length=300)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
from dkfileutils.path import Path
def line_endings(fname):
"""Return all line endings in the file.
"""
_endings = {line[-2:] for line in open(fname, 'rb').readlines()}
res = set()
for e in _endings:
if e.endswith(b'\r'):
res.add(b'\r')
elif e.endswith(b'\r\n'):
res.add(b'\r\n')
elif e.endswith(b'\n'):
res.add(b'\n')
return res
def chomp(s):
"""Remove line terminator if it exists.
"""
if s[-2:] == b'\r\n':
return s[:-2]
if s[-1:] == b'\r' or s[-1:] == b'\n':
return s[:-1]
return s
def fix_line_endings(fname, eol=b'\n'):
"""Change all line endings to ``eol``.
"""
lines = [chomp(line) for line in open(fname, 'rb').readlines()]
with open(fname, 'wb') as fp:
for line in lines:
fp.write(line + eol)
def copy(ctx, source, dest, force=False):
"""Copy ``source`` to ``dest``, which can be a file or directory.
"""
# print "COPY:", locals()
# print "COPY:", ctx.force, ctx.verbose
if source == dest:
return dest
source = os.path.normcase(os.path.normpath(str(source)))
dest = os.path.normcase(os.path.normpath(str(dest)))
flags = ""
if sys.platform == 'win32':
if force:
flags += " /Y"
# print 'copy {flags} {source} {dest}'.format(**locals())
ctx.run('copy {flags} {source} {dest}'.format(**locals()))
else: # pragma: nocover
if force:
flags += " --force"
ctx.run('cp {flags} {source} {dest}'.format(**locals()))
return dest
def concat(ctx, dest, *sources, **kw):
force = kw.pop('force', False) # noqa
placement = Path(dest).dirname()
placement.makedirs()
with open(dest, 'w') as out:
print("Opened:", dest, "for writing.")
for s in sources:
with open(s, 'r') as inp:
print(" appending:", s)
out.writelines(inp.readlines())
out.write('\n')
# flags = ""
# if sys.platform == 'win32':
# if force:
# flags += " /Y"
# source = '+'.join(sources)
# source = source.replace('/', '\\')
# ctx.run('copy {flags} {source} {dest}'.format(**locals()))
# else: # pragma: nocover
# if force:
# pass
# # flags += " --force"
# source = ' '.join(sources)
# # print 'cat {flags} {source} > {dest}'.format(**locals())
# ctx.run('cat {flags} {source} > {dest}'.format(**locals()))
fix_line_endings(dest)
# if len(line_endings(dest)) > 1:
# fix_line_endings(dest)
return dest
|
normal
|
{
"blob_id": "be279fe44b0d52c9d473e08d8b9c28d5b6386b45",
"index": 5184,
"step-1": "<mask token>\n\n\ndef line_endings(fname):\n \"\"\"Return all line endings in the file.\n \"\"\"\n _endings = {line[-2:] for line in open(fname, 'rb').readlines()}\n res = set()\n for e in _endings:\n if e.endswith(b'\\r'):\n res.add(b'\\r')\n elif e.endswith(b'\\r\\n'):\n res.add(b'\\r\\n')\n elif e.endswith(b'\\n'):\n res.add(b'\\n')\n return res\n\n\n<mask token>\n\n\ndef fix_line_endings(fname, eol=b'\\n'):\n \"\"\"Change all line endings to ``eol``.\n \"\"\"\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)\n\n\n<mask token>\n\n\ndef concat(ctx, dest, *sources, **kw):\n force = kw.pop('force', False)\n placement = Path(dest).dirname()\n placement.makedirs()\n with open(dest, 'w') as out:\n print('Opened:', dest, 'for writing.')\n for s in sources:\n with open(s, 'r') as inp:\n print(' appending:', s)\n out.writelines(inp.readlines())\n out.write('\\n')\n fix_line_endings(dest)\n return dest\n",
"step-2": "<mask token>\n\n\ndef line_endings(fname):\n \"\"\"Return all line endings in the file.\n \"\"\"\n _endings = {line[-2:] for line in open(fname, 'rb').readlines()}\n res = set()\n for e in _endings:\n if e.endswith(b'\\r'):\n res.add(b'\\r')\n elif e.endswith(b'\\r\\n'):\n res.add(b'\\r\\n')\n elif e.endswith(b'\\n'):\n res.add(b'\\n')\n return res\n\n\ndef chomp(s):\n \"\"\"Remove line terminator if it exists.\n \"\"\"\n if s[-2:] == b'\\r\\n':\n return s[:-2]\n if s[-1:] == b'\\r' or s[-1:] == b'\\n':\n return s[:-1]\n return s\n\n\ndef fix_line_endings(fname, eol=b'\\n'):\n \"\"\"Change all line endings to ``eol``.\n \"\"\"\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)\n\n\n<mask token>\n\n\ndef concat(ctx, dest, *sources, **kw):\n force = kw.pop('force', False)\n placement = Path(dest).dirname()\n placement.makedirs()\n with open(dest, 'w') as out:\n print('Opened:', dest, 'for writing.')\n for s in sources:\n with open(s, 'r') as inp:\n print(' appending:', s)\n out.writelines(inp.readlines())\n out.write('\\n')\n fix_line_endings(dest)\n return dest\n",
"step-3": "<mask token>\n\n\ndef line_endings(fname):\n \"\"\"Return all line endings in the file.\n \"\"\"\n _endings = {line[-2:] for line in open(fname, 'rb').readlines()}\n res = set()\n for e in _endings:\n if e.endswith(b'\\r'):\n res.add(b'\\r')\n elif e.endswith(b'\\r\\n'):\n res.add(b'\\r\\n')\n elif e.endswith(b'\\n'):\n res.add(b'\\n')\n return res\n\n\ndef chomp(s):\n \"\"\"Remove line terminator if it exists.\n \"\"\"\n if s[-2:] == b'\\r\\n':\n return s[:-2]\n if s[-1:] == b'\\r' or s[-1:] == b'\\n':\n return s[:-1]\n return s\n\n\ndef fix_line_endings(fname, eol=b'\\n'):\n \"\"\"Change all line endings to ``eol``.\n \"\"\"\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)\n\n\ndef copy(ctx, source, dest, force=False):\n \"\"\"Copy ``source`` to ``dest``, which can be a file or directory.\n \"\"\"\n if source == dest:\n return dest\n source = os.path.normcase(os.path.normpath(str(source)))\n dest = os.path.normcase(os.path.normpath(str(dest)))\n flags = ''\n if sys.platform == 'win32':\n if force:\n flags += ' /Y'\n ctx.run('copy {flags} {source} {dest}'.format(**locals()))\n else:\n if force:\n flags += ' --force'\n ctx.run('cp {flags} {source} {dest}'.format(**locals()))\n return dest\n\n\ndef concat(ctx, dest, *sources, **kw):\n force = kw.pop('force', False)\n placement = Path(dest).dirname()\n placement.makedirs()\n with open(dest, 'w') as out:\n print('Opened:', dest, 'for writing.')\n for s in sources:\n with open(s, 'r') as inp:\n print(' appending:', s)\n out.writelines(inp.readlines())\n out.write('\\n')\n fix_line_endings(dest)\n return dest\n",
"step-4": "from __future__ import print_function\nimport os\nimport sys\nfrom dkfileutils.path import Path\n\n\ndef line_endings(fname):\n \"\"\"Return all line endings in the file.\n \"\"\"\n _endings = {line[-2:] for line in open(fname, 'rb').readlines()}\n res = set()\n for e in _endings:\n if e.endswith(b'\\r'):\n res.add(b'\\r')\n elif e.endswith(b'\\r\\n'):\n res.add(b'\\r\\n')\n elif e.endswith(b'\\n'):\n res.add(b'\\n')\n return res\n\n\ndef chomp(s):\n \"\"\"Remove line terminator if it exists.\n \"\"\"\n if s[-2:] == b'\\r\\n':\n return s[:-2]\n if s[-1:] == b'\\r' or s[-1:] == b'\\n':\n return s[:-1]\n return s\n\n\ndef fix_line_endings(fname, eol=b'\\n'):\n \"\"\"Change all line endings to ``eol``.\n \"\"\"\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)\n\n\ndef copy(ctx, source, dest, force=False):\n \"\"\"Copy ``source`` to ``dest``, which can be a file or directory.\n \"\"\"\n if source == dest:\n return dest\n source = os.path.normcase(os.path.normpath(str(source)))\n dest = os.path.normcase(os.path.normpath(str(dest)))\n flags = ''\n if sys.platform == 'win32':\n if force:\n flags += ' /Y'\n ctx.run('copy {flags} {source} {dest}'.format(**locals()))\n else:\n if force:\n flags += ' --force'\n ctx.run('cp {flags} {source} {dest}'.format(**locals()))\n return dest\n\n\ndef concat(ctx, dest, *sources, **kw):\n force = kw.pop('force', False)\n placement = Path(dest).dirname()\n placement.makedirs()\n with open(dest, 'w') as out:\n print('Opened:', dest, 'for writing.')\n for s in sources:\n with open(s, 'r') as inp:\n print(' appending:', s)\n out.writelines(inp.readlines())\n out.write('\\n')\n fix_line_endings(dest)\n return dest\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nimport os\nimport sys\n\nfrom dkfileutils.path import Path\n\n\ndef line_endings(fname):\n \"\"\"Return all line endings in the file.\n \"\"\"\n _endings = {line[-2:] for line in open(fname, 'rb').readlines()}\n res = set()\n for e in _endings:\n if e.endswith(b'\\r'):\n res.add(b'\\r')\n elif e.endswith(b'\\r\\n'):\n res.add(b'\\r\\n')\n elif e.endswith(b'\\n'):\n res.add(b'\\n')\n return res\n\n\ndef chomp(s):\n \"\"\"Remove line terminator if it exists.\n \"\"\"\n if s[-2:] == b'\\r\\n':\n return s[:-2]\n if s[-1:] == b'\\r' or s[-1:] == b'\\n':\n return s[:-1]\n return s\n\n\ndef fix_line_endings(fname, eol=b'\\n'):\n \"\"\"Change all line endings to ``eol``.\n \"\"\"\n lines = [chomp(line) for line in open(fname, 'rb').readlines()]\n with open(fname, 'wb') as fp:\n for line in lines:\n fp.write(line + eol)\n\n\ndef copy(ctx, source, dest, force=False):\n \"\"\"Copy ``source`` to ``dest``, which can be a file or directory.\n \"\"\"\n # print \"COPY:\", locals()\n # print \"COPY:\", ctx.force, ctx.verbose\n if source == dest:\n return dest\n\n source = os.path.normcase(os.path.normpath(str(source)))\n dest = os.path.normcase(os.path.normpath(str(dest)))\n flags = \"\"\n if sys.platform == 'win32':\n if force:\n flags += \" /Y\"\n # print 'copy {flags} {source} {dest}'.format(**locals())\n ctx.run('copy {flags} {source} {dest}'.format(**locals()))\n else: # pragma: nocover\n if force:\n flags += \" --force\"\n ctx.run('cp {flags} {source} {dest}'.format(**locals()))\n return dest\n\n\ndef concat(ctx, dest, *sources, **kw):\n force = kw.pop('force', False) # noqa\n placement = Path(dest).dirname()\n placement.makedirs()\n\n with open(dest, 'w') as out:\n print(\"Opened:\", dest, \"for writing.\")\n for s in sources:\n with open(s, 'r') as inp:\n print(\" appending:\", s)\n out.writelines(inp.readlines())\n out.write('\\n')\n\n # flags = \"\"\n # if sys.platform == 'win32':\n # if force:\n # flags += \" /Y\"\n # source = '+'.join(sources)\n # source = source.replace('/', '\\\\')\n # ctx.run('copy {flags} {source} {dest}'.format(**locals()))\n # else: # pragma: nocover\n # if force:\n # pass\n # # flags += \" --force\"\n # source = ' '.join(sources)\n # # print 'cat {flags} {source} > {dest}'.format(**locals())\n # ctx.run('cat {flags} {source} > {dest}'.format(**locals()))\n\n fix_line_endings(dest)\n # if len(line_endings(dest)) > 1:\n # fix_line_endings(dest)\n\n return dest\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import graph as Graph
def BFS(graph: Graph.Graph, start, end):
visited = set()
parent = dict()
parent[start] = None
queue = []
queue.append(start)
visited.add(start)
while queue:
current = queue.pop(0)
if current == end:
break
for v in graph.neighbors(current):
if v not in visited:
queue.append(v)
visited.add(v)
parent[v] = current
return parent
|
normal
|
{
"blob_id": "5c5f00084f37837b749e1fbb52a18d515e09ba06",
"index": 773,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef BFS(graph: Graph.Graph, start, end):\n visited = set()\n parent = dict()\n parent[start] = None\n queue = []\n queue.append(start)\n visited.add(start)\n while queue:\n current = queue.pop(0)\n if current == end:\n break\n for v in graph.neighbors(current):\n if v not in visited:\n queue.append(v)\n visited.add(v)\n parent[v] = current\n return parent\n",
"step-3": "import graph as Graph\n\n\ndef BFS(graph: Graph.Graph, start, end):\n visited = set()\n parent = dict()\n parent[start] = None\n queue = []\n queue.append(start)\n visited.add(start)\n while queue:\n current = queue.pop(0)\n if current == end:\n break\n for v in graph.neighbors(current):\n if v not in visited:\n queue.append(v)\n visited.add(v)\n parent[v] = current\n return parent\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""@brief the routes for Flask application
"""
import hashlib
import json
import time
import requests
from flask import render_template, url_for
from soco import SoCo
from app import app
app.config.from_pyfile("settings.py")
sonos = SoCo(app.config["SPEAKER_IP"])
def gen_sig():
"""@brief return the MD5 checksum """
return hashlib.md5(
(
app.config["ROVI_API_KEY"]
+ app.config["ROVI_SHARED_SECRET"]
+ repr(int(time.time()))
).encode("utf-8")
).hexdigest()
def get_track_image(artist, album):
"""@brief get the track image from Rovi """
blank_image = url_for("static", filename="img/blank.jpg")
if "ROVI_SHARED_SECRET" not in app.config:
return blank_image
if "ROVI_API_KEY" not in app.config:
return blank_image
headers = {"Accept-Encoding": "gzip"}
req = requests.get(
"http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey="
+ app.config["ROVI_API_KEY"]
+ "&sig="
+ gen_sig()
+ "&name= "
+ album
+ "&performername="
+ artist
+ "&include=images&size=1",
headers=headers,
timeout=30,
)
if req.status_code != requests.codes.ok:
return blank_image
result = json.loads(req.content)
try:
return result["matchResponse"]["results"][0]["album"]["images"][0]["front"][3]["url"]
except (KeyError, IndexError):
return blank_image
def current_track():
"""@brief get the current track information from Sonos """
track = sonos.get_current_track_info()
track["title"] = track["title"][:30]
track["artist"] = track["artist"][:30]
return track
@app.route("/play")
def play():
"""@brief the play function """
sonos.play()
return "Ok"
@app.route("/pause")
def pause():
"""@brief the pause function """
sonos.pause()
return "Ok"
@app.route("/following")
def following():
"""@brief the following function """
sonos.next()
return "Ok"
@app.route("/previous")
def previous():
"""@brief the previous function """
sonos.previous()
return "Ok"
@app.route("/volume")
def volume():
"""@brief get the actual volume """
vol = sonos.volume
return vol
@app.route("/volume_up")
def volume_up():
"""@brief the volume up function """
sonos.set_relative_volume(10)
return "Ok"
@app.route("/volume_down")
def volume_down():
"""@brief the volume down function """
sonos.set_relative_volume(-10)
return "Ok"
@app.route("/volume_mute")
def volume_mute():
"""@brief the mute function """
sonos.mute = True
return "Ok"
@app.route("/volume_unmute")
def volume_unmute():
"""@brief the unmute function """
sonos.mute = False
return "Ok"
@app.route("/track_01")
def track_01():
"""@brief switch to new track """
sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title='FM4.ORF.AT', force_radio=True)
return "Ok"
@app.route("/track_02")
def track_02():
"""@brief switch to new track """
sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer', title='Radio PSR Live', force_radio=True)
return "Ok"
@app.route("/track_03")
def track_03():
"""@brief switch to new track """
sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen', force_radio=True)
return "Ok"
@app.route("/track_04")
def track_04():
"""@brief switch to new track """
sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title='Sunshine Live', force_radio=True)
return "Ok"
@app.route("/info-light")
def info_light():
"""@brief the info-light function """
track = current_track()
return json.dumps(track)
@app.route("/info")
def info():
"""@brief the info function """
track = current_track()
track["image"] = get_track_image(track["artist"], track["album"])
transport = sonos.get_current_transport_info()
track["playing"] = transport["current_transport_state"] != "STOPPED"
track["mute"] = sonos.mute
return json.dumps(track)
@app.route("/")
@app.route('/index')
def index():
"""@brief the index function """
track = current_track()
track["image"] = get_track_image(track["artist"], track["album"])
return render_template("index.html", track=track)
|
normal
|
{
"blob_id": "86f33895e9ae0e026d7d6e40e611796b2dc2c713",
"index": 8394,
"step-1": "<mask token>\n\n\ndef gen_sig():\n \"\"\"@brief return the MD5 checksum \"\"\"\n return hashlib.md5((app.config['ROVI_API_KEY'] + app.config[\n 'ROVI_SHARED_SECRET'] + repr(int(time.time()))).encode('utf-8')\n ).hexdigest()\n\n\ndef get_track_image(artist, album):\n \"\"\"@brief get the track image from Rovi \"\"\"\n blank_image = url_for('static', filename='img/blank.jpg')\n if 'ROVI_SHARED_SECRET' not in app.config:\n return blank_image\n if 'ROVI_API_KEY' not in app.config:\n return blank_image\n headers = {'Accept-Encoding': 'gzip'}\n req = requests.get(\n 'http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey=' +\n app.config['ROVI_API_KEY'] + '&sig=' + gen_sig() + '&name= ' +\n album + '&performername=' + artist + '&include=images&size=1',\n headers=headers, timeout=30)\n if req.status_code != requests.codes.ok:\n return blank_image\n result = json.loads(req.content)\n try:\n return result['matchResponse']['results'][0]['album']['images'][0][\n 'front'][3]['url']\n except (KeyError, IndexError):\n return blank_image\n\n\ndef current_track():\n \"\"\"@brief get the current track information from Sonos \"\"\"\n track = sonos.get_current_track_info()\n track['title'] = track['title'][:30]\n track['artist'] = track['artist'][:30]\n return track\n\n\[email protected]('/play')\ndef play():\n \"\"\"@brief the play function \"\"\"\n sonos.play()\n return 'Ok'\n\n\[email protected]('/pause')\ndef pause():\n \"\"\"@brief the pause function \"\"\"\n sonos.pause()\n return 'Ok'\n\n\[email protected]('/following')\ndef following():\n \"\"\"@brief the following function \"\"\"\n sonos.next()\n return 'Ok'\n\n\[email protected]('/previous')\ndef previous():\n \"\"\"@brief the previous function \"\"\"\n sonos.previous()\n return 'Ok'\n\n\[email protected]('/volume')\ndef volume():\n \"\"\"@brief get the actual volume \"\"\"\n vol = sonos.volume\n return vol\n\n\[email protected]('/volume_up')\ndef volume_up():\n \"\"\"@brief the volume up function \"\"\"\n sonos.set_relative_volume(10)\n return 'Ok'\n\n\[email protected]('/volume_down')\ndef volume_down():\n \"\"\"@brief the volume down function \"\"\"\n sonos.set_relative_volume(-10)\n return 'Ok'\n\n\[email protected]('/volume_mute')\ndef volume_mute():\n \"\"\"@brief the mute function \"\"\"\n sonos.mute = True\n return 'Ok'\n\n\[email protected]('/volume_unmute')\ndef volume_unmute():\n \"\"\"@brief the unmute function \"\"\"\n sonos.mute = False\n return 'Ok'\n\n\n<mask token>\n\n\[email protected]('/track_02')\ndef track_02():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer',\n title='Radio PSR Live', force_radio=True)\n return 'Ok'\n\n\n<mask token>\n\n\[email protected]('/track_04')\ndef track_04():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title=\n 'Sunshine Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/info-light')\ndef info_light():\n \"\"\"@brief the info-light function \"\"\"\n track = current_track()\n return json.dumps(track)\n\n\[email protected]('/info')\ndef info():\n \"\"\"@brief the info function \"\"\"\n track = current_track()\n track['image'] = get_track_image(track['artist'], track['album'])\n transport = sonos.get_current_transport_info()\n track['playing'] = transport['current_transport_state'] != 'STOPPED'\n track['mute'] = sonos.mute\n return json.dumps(track)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef gen_sig():\n \"\"\"@brief return the MD5 checksum \"\"\"\n return hashlib.md5((app.config['ROVI_API_KEY'] + app.config[\n 'ROVI_SHARED_SECRET'] + repr(int(time.time()))).encode('utf-8')\n ).hexdigest()\n\n\ndef get_track_image(artist, album):\n \"\"\"@brief get the track image from Rovi \"\"\"\n blank_image = url_for('static', filename='img/blank.jpg')\n if 'ROVI_SHARED_SECRET' not in app.config:\n return blank_image\n if 'ROVI_API_KEY' not in app.config:\n return blank_image\n headers = {'Accept-Encoding': 'gzip'}\n req = requests.get(\n 'http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey=' +\n app.config['ROVI_API_KEY'] + '&sig=' + gen_sig() + '&name= ' +\n album + '&performername=' + artist + '&include=images&size=1',\n headers=headers, timeout=30)\n if req.status_code != requests.codes.ok:\n return blank_image\n result = json.loads(req.content)\n try:\n return result['matchResponse']['results'][0]['album']['images'][0][\n 'front'][3]['url']\n except (KeyError, IndexError):\n return blank_image\n\n\ndef current_track():\n \"\"\"@brief get the current track information from Sonos \"\"\"\n track = sonos.get_current_track_info()\n track['title'] = track['title'][:30]\n track['artist'] = track['artist'][:30]\n return track\n\n\[email protected]('/play')\ndef play():\n \"\"\"@brief the play function \"\"\"\n sonos.play()\n return 'Ok'\n\n\[email protected]('/pause')\ndef pause():\n \"\"\"@brief the pause function \"\"\"\n sonos.pause()\n return 'Ok'\n\n\[email protected]('/following')\ndef following():\n \"\"\"@brief the following function \"\"\"\n sonos.next()\n return 'Ok'\n\n\[email protected]('/previous')\ndef previous():\n \"\"\"@brief the previous function \"\"\"\n sonos.previous()\n return 'Ok'\n\n\[email protected]('/volume')\ndef volume():\n \"\"\"@brief get the actual volume \"\"\"\n vol = sonos.volume\n return vol\n\n\[email protected]('/volume_up')\ndef volume_up():\n \"\"\"@brief the volume up function \"\"\"\n sonos.set_relative_volume(10)\n return 'Ok'\n\n\[email protected]('/volume_down')\ndef volume_down():\n \"\"\"@brief the volume down function \"\"\"\n sonos.set_relative_volume(-10)\n return 'Ok'\n\n\[email protected]('/volume_mute')\ndef volume_mute():\n \"\"\"@brief the mute function \"\"\"\n sonos.mute = True\n return 'Ok'\n\n\[email protected]('/volume_unmute')\ndef volume_unmute():\n \"\"\"@brief the unmute function \"\"\"\n sonos.mute = False\n return 'Ok'\n\n\[email protected]('/track_01')\ndef track_01():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title=\n 'FM4.ORF.AT', force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_02')\ndef track_02():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer',\n title='Radio PSR Live', force_radio=True)\n return 'Ok'\n\n\n<mask token>\n\n\[email protected]('/track_04')\ndef track_04():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title=\n 'Sunshine Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/info-light')\ndef info_light():\n \"\"\"@brief the info-light function \"\"\"\n track = current_track()\n return json.dumps(track)\n\n\[email protected]('/info')\ndef info():\n \"\"\"@brief the info function \"\"\"\n track = current_track()\n track['image'] = get_track_image(track['artist'], track['album'])\n transport = sonos.get_current_transport_info()\n track['playing'] = transport['current_transport_state'] != 'STOPPED'\n track['mute'] = sonos.mute\n return json.dumps(track)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef gen_sig():\n \"\"\"@brief return the MD5 checksum \"\"\"\n return hashlib.md5((app.config['ROVI_API_KEY'] + app.config[\n 'ROVI_SHARED_SECRET'] + repr(int(time.time()))).encode('utf-8')\n ).hexdigest()\n\n\ndef get_track_image(artist, album):\n \"\"\"@brief get the track image from Rovi \"\"\"\n blank_image = url_for('static', filename='img/blank.jpg')\n if 'ROVI_SHARED_SECRET' not in app.config:\n return blank_image\n if 'ROVI_API_KEY' not in app.config:\n return blank_image\n headers = {'Accept-Encoding': 'gzip'}\n req = requests.get(\n 'http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey=' +\n app.config['ROVI_API_KEY'] + '&sig=' + gen_sig() + '&name= ' +\n album + '&performername=' + artist + '&include=images&size=1',\n headers=headers, timeout=30)\n if req.status_code != requests.codes.ok:\n return blank_image\n result = json.loads(req.content)\n try:\n return result['matchResponse']['results'][0]['album']['images'][0][\n 'front'][3]['url']\n except (KeyError, IndexError):\n return blank_image\n\n\ndef current_track():\n \"\"\"@brief get the current track information from Sonos \"\"\"\n track = sonos.get_current_track_info()\n track['title'] = track['title'][:30]\n track['artist'] = track['artist'][:30]\n return track\n\n\[email protected]('/play')\ndef play():\n \"\"\"@brief the play function \"\"\"\n sonos.play()\n return 'Ok'\n\n\[email protected]('/pause')\ndef pause():\n \"\"\"@brief the pause function \"\"\"\n sonos.pause()\n return 'Ok'\n\n\[email protected]('/following')\ndef following():\n \"\"\"@brief the following function \"\"\"\n sonos.next()\n return 'Ok'\n\n\[email protected]('/previous')\ndef previous():\n \"\"\"@brief the previous function \"\"\"\n sonos.previous()\n return 'Ok'\n\n\[email protected]('/volume')\ndef volume():\n \"\"\"@brief get the actual volume \"\"\"\n vol = sonos.volume\n return vol\n\n\[email protected]('/volume_up')\ndef volume_up():\n \"\"\"@brief the volume up function \"\"\"\n sonos.set_relative_volume(10)\n return 'Ok'\n\n\[email protected]('/volume_down')\ndef volume_down():\n \"\"\"@brief the volume down function \"\"\"\n sonos.set_relative_volume(-10)\n return 'Ok'\n\n\[email protected]('/volume_mute')\ndef volume_mute():\n \"\"\"@brief the mute function \"\"\"\n sonos.mute = True\n return 'Ok'\n\n\[email protected]('/volume_unmute')\ndef volume_unmute():\n \"\"\"@brief the unmute function \"\"\"\n sonos.mute = False\n return 'Ok'\n\n\[email protected]('/track_01')\ndef track_01():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title=\n 'FM4.ORF.AT', force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_02')\ndef track_02():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer',\n title='Radio PSR Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_03')\ndef track_03():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen',\n force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_04')\ndef track_04():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title=\n 'Sunshine Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/info-light')\ndef info_light():\n \"\"\"@brief the info-light function \"\"\"\n track = current_track()\n return json.dumps(track)\n\n\[email protected]('/info')\ndef info():\n \"\"\"@brief the info function \"\"\"\n track = current_track()\n track['image'] = get_track_image(track['artist'], track['album'])\n transport = sonos.get_current_transport_info()\n track['playing'] = transport['current_transport_state'] != 'STOPPED'\n track['mute'] = sonos.mute\n return json.dumps(track)\n\n\n<mask token>\n",
"step-4": "<mask token>\napp.config.from_pyfile('settings.py')\n<mask token>\n\n\ndef gen_sig():\n \"\"\"@brief return the MD5 checksum \"\"\"\n return hashlib.md5((app.config['ROVI_API_KEY'] + app.config[\n 'ROVI_SHARED_SECRET'] + repr(int(time.time()))).encode('utf-8')\n ).hexdigest()\n\n\ndef get_track_image(artist, album):\n \"\"\"@brief get the track image from Rovi \"\"\"\n blank_image = url_for('static', filename='img/blank.jpg')\n if 'ROVI_SHARED_SECRET' not in app.config:\n return blank_image\n if 'ROVI_API_KEY' not in app.config:\n return blank_image\n headers = {'Accept-Encoding': 'gzip'}\n req = requests.get(\n 'http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey=' +\n app.config['ROVI_API_KEY'] + '&sig=' + gen_sig() + '&name= ' +\n album + '&performername=' + artist + '&include=images&size=1',\n headers=headers, timeout=30)\n if req.status_code != requests.codes.ok:\n return blank_image\n result = json.loads(req.content)\n try:\n return result['matchResponse']['results'][0]['album']['images'][0][\n 'front'][3]['url']\n except (KeyError, IndexError):\n return blank_image\n\n\ndef current_track():\n \"\"\"@brief get the current track information from Sonos \"\"\"\n track = sonos.get_current_track_info()\n track['title'] = track['title'][:30]\n track['artist'] = track['artist'][:30]\n return track\n\n\[email protected]('/play')\ndef play():\n \"\"\"@brief the play function \"\"\"\n sonos.play()\n return 'Ok'\n\n\[email protected]('/pause')\ndef pause():\n \"\"\"@brief the pause function \"\"\"\n sonos.pause()\n return 'Ok'\n\n\[email protected]('/following')\ndef following():\n \"\"\"@brief the following function \"\"\"\n sonos.next()\n return 'Ok'\n\n\[email protected]('/previous')\ndef previous():\n \"\"\"@brief the previous function \"\"\"\n sonos.previous()\n return 'Ok'\n\n\[email protected]('/volume')\ndef volume():\n \"\"\"@brief get the actual volume \"\"\"\n vol = sonos.volume\n return vol\n\n\[email protected]('/volume_up')\ndef volume_up():\n \"\"\"@brief the volume up function \"\"\"\n sonos.set_relative_volume(10)\n return 'Ok'\n\n\[email protected]('/volume_down')\ndef volume_down():\n \"\"\"@brief the volume down function \"\"\"\n sonos.set_relative_volume(-10)\n return 'Ok'\n\n\[email protected]('/volume_mute')\ndef volume_mute():\n \"\"\"@brief the mute function \"\"\"\n sonos.mute = True\n return 'Ok'\n\n\[email protected]('/volume_unmute')\ndef volume_unmute():\n \"\"\"@brief the unmute function \"\"\"\n sonos.mute = False\n return 'Ok'\n\n\[email protected]('/track_01')\ndef track_01():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title=\n 'FM4.ORF.AT', force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_02')\ndef track_02():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer',\n title='Radio PSR Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_03')\ndef track_03():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen',\n force_radio=True)\n return 'Ok'\n\n\[email protected]('/track_04')\ndef track_04():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title=\n 'Sunshine Live', force_radio=True)\n return 'Ok'\n\n\[email protected]('/info-light')\ndef info_light():\n \"\"\"@brief the info-light function \"\"\"\n track = current_track()\n return json.dumps(track)\n\n\[email protected]('/info')\ndef info():\n \"\"\"@brief the info function \"\"\"\n track = current_track()\n track['image'] = get_track_image(track['artist'], track['album'])\n transport = sonos.get_current_transport_info()\n track['playing'] = transport['current_transport_state'] != 'STOPPED'\n track['mute'] = sonos.mute\n return json.dumps(track)\n\n\[email protected]('/')\[email protected]('/index')\ndef index():\n \"\"\"@brief the index function \"\"\"\n track = current_track()\n track['image'] = get_track_image(track['artist'], track['album'])\n return render_template('index.html', track=track)\n",
"step-5": "\"\"\"@brief the routes for Flask application\n\"\"\"\nimport hashlib\nimport json\nimport time\n\nimport requests\nfrom flask import render_template, url_for\nfrom soco import SoCo\nfrom app import app\n\napp.config.from_pyfile(\"settings.py\")\nsonos = SoCo(app.config[\"SPEAKER_IP\"])\n\n\ndef gen_sig():\n \"\"\"@brief return the MD5 checksum \"\"\"\n return hashlib.md5(\n (\n app.config[\"ROVI_API_KEY\"]\n + app.config[\"ROVI_SHARED_SECRET\"]\n + repr(int(time.time()))\n ).encode(\"utf-8\")\n ).hexdigest()\n\n\ndef get_track_image(artist, album):\n \"\"\"@brief get the track image from Rovi \"\"\"\n blank_image = url_for(\"static\", filename=\"img/blank.jpg\")\n if \"ROVI_SHARED_SECRET\" not in app.config:\n return blank_image\n if \"ROVI_API_KEY\" not in app.config:\n return blank_image\n\n headers = {\"Accept-Encoding\": \"gzip\"}\n req = requests.get(\n \"http://api.rovicorp.com/recognition/v2.1/music/match/album?apikey=\"\n + app.config[\"ROVI_API_KEY\"]\n + \"&sig=\"\n + gen_sig()\n + \"&name= \"\n + album\n + \"&performername=\"\n + artist\n + \"&include=images&size=1\",\n headers=headers,\n timeout=30,\n )\n\n if req.status_code != requests.codes.ok:\n return blank_image\n\n result = json.loads(req.content)\n try:\n return result[\"matchResponse\"][\"results\"][0][\"album\"][\"images\"][0][\"front\"][3][\"url\"]\n except (KeyError, IndexError):\n return blank_image\n\n\ndef current_track():\n \"\"\"@brief get the current track information from Sonos \"\"\"\n track = sonos.get_current_track_info()\n track[\"title\"] = track[\"title\"][:30]\n track[\"artist\"] = track[\"artist\"][:30]\n return track\n\n\[email protected](\"/play\")\ndef play():\n \"\"\"@brief the play function \"\"\"\n sonos.play()\n return \"Ok\"\n\n\[email protected](\"/pause\")\ndef pause():\n \"\"\"@brief the pause function \"\"\"\n sonos.pause()\n return \"Ok\"\n\n\[email protected](\"/following\")\ndef following():\n \"\"\"@brief the following function \"\"\"\n sonos.next()\n return \"Ok\"\n\n\[email protected](\"/previous\")\ndef previous():\n \"\"\"@brief the previous function \"\"\"\n sonos.previous()\n return \"Ok\"\n\n\[email protected](\"/volume\")\ndef volume():\n \"\"\"@brief get the actual volume \"\"\"\n vol = sonos.volume\n return vol\n\n\[email protected](\"/volume_up\")\ndef volume_up():\n \"\"\"@brief the volume up function \"\"\"\n sonos.set_relative_volume(10)\n return \"Ok\"\n\n\[email protected](\"/volume_down\")\ndef volume_down():\n \"\"\"@brief the volume down function \"\"\"\n sonos.set_relative_volume(-10)\n return \"Ok\"\n\n\[email protected](\"/volume_mute\")\ndef volume_mute():\n \"\"\"@brief the mute function \"\"\"\n sonos.mute = True\n return \"Ok\"\n\n\[email protected](\"/volume_unmute\")\ndef volume_unmute():\n \"\"\"@brief the unmute function \"\"\"\n sonos.mute = False\n return \"Ok\"\n\n\[email protected](\"/track_01\")\ndef track_01():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://mp3stream1.apasf.apa.at:8000', title='FM4.ORF.AT', force_radio=True)\n return \"Ok\"\n\n\[email protected](\"/track_02\")\ndef track_02():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://streams.radiopsr.de/psr-live/mp3-192/mediaplayer', title='Radio PSR Live', force_radio=True)\n return \"Ok\"\n\n\[email protected](\"/track_03\")\ndef track_03():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://nrj.de/sachsen', title='Energy Sachsen', force_radio=True)\n return \"Ok\"\n\n\[email protected](\"/track_04\")\ndef track_04():\n \"\"\"@brief switch to new track \"\"\"\n sonos.play_uri('http://stream.sunshine-live.de/live/mp3-192', title='Sunshine Live', force_radio=True)\n return \"Ok\"\n\n\[email protected](\"/info-light\")\ndef info_light():\n \"\"\"@brief the info-light function \"\"\"\n track = current_track()\n return json.dumps(track)\n\n\[email protected](\"/info\")\ndef info():\n \"\"\"@brief the info function \"\"\"\n track = current_track()\n track[\"image\"] = get_track_image(track[\"artist\"], track[\"album\"])\n transport = sonos.get_current_transport_info()\n track[\"playing\"] = transport[\"current_transport_state\"] != \"STOPPED\"\n track[\"mute\"] = sonos.mute\n return json.dumps(track)\n\n\[email protected](\"/\")\[email protected]('/index')\ndef index():\n \"\"\"@brief the index function \"\"\"\n track = current_track()\n track[\"image\"] = get_track_image(track[\"artist\"], track[\"album\"])\n return render_template(\"index.html\", track=track)\n",
"step-ids": [
16,
17,
18,
20,
23
]
}
|
[
16,
17,
18,
20,
23
] |
import math
# 1
long_phrase = 'Насколько проще было бы писать программы, если бы не заказчики'
short_phrase = '640Кб должно хватить для любых задач. Билл Гейтс (по легенде)'
def compare (long, short):
print(len(long)>len(short))
compare(long_phrase, short_phrase)
# 2.1
text = 'Если программист в 9-00 утра на работе, значит, он там и ночевал'
d=dict()
for letter in text:
if letter not in d:
d[letter]=1
if letter in d:
d[letter]+=1
result='В строке text {} букв "а" и {} букв "и"'.format(d['а'], d['и'])
print(result)
# 2.2
if len(text.replace('и','')) < len(text.replace('а','')):
print('В строке больше букв "и"')
else:
print('В строке больше букв "а"')
# 3
byte=213680000
megabyte=byte/(10**6)
print('Объем файла равен {}Mb'.format(megabyte))
# 4
sin=math.sin(math.pi/6)
print(sin)
''' 5 дробные числа не могут быть представлены в точности в бинарном виде,
поэтому значения округляются, и такие операции,
как 0.1+0.2, дают неточный результат '''
# 5
def exchange (a, b):
b=b-a
a=a+b
b=a-b
print('a=',a,'b=',b)
exchange(120,1)
# 6
# разбиваем число на элементы, получаем спискок
num=10011
st=str(num)
st.split()
l=len(st)-1
print(l)
# создаем новый список куда добавим вычисляемые значения
new_num=list()
# вычисляем каждый элемент (умножение на 2 в степени номера эл-та)
k=-1
for i in st:
k=k+1
i=int(i)*(2**(l-k))
print(i)
new_num.append(i)
result=sum(new_num)
print(result)
|
normal
|
{
"blob_id": "f29637cd670524baebac6549962a1c50fc1b91c6",
"index": 6835,
"step-1": "<mask token>\n\n\ndef compare(long, short):\n print(len(long) > len(short))\n\n\n<mask token>\n\n\ndef exchange(a, b):\n b = b - a\n a = a + b\n b = a - b\n print('a=', a, 'b=', b)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef compare(long, short):\n print(len(long) > len(short))\n\n\ncompare(long_phrase, short_phrase)\n<mask token>\nfor letter in text:\n if letter not in d:\n d[letter] = 1\n if letter in d:\n d[letter] += 1\n<mask token>\nprint(result)\nif len(text.replace('и', '')) < len(text.replace('а', '')):\n print('В строке больше букв \"и\"')\nelse:\n print('В строке больше букв \"а\"')\n<mask token>\nprint('Объем файла равен {}Mb'.format(megabyte))\n<mask token>\nprint(sin)\n<mask token>\n\n\ndef exchange(a, b):\n b = b - a\n a = a + b\n b = a - b\n print('a=', a, 'b=', b)\n\n\nexchange(120, 1)\n<mask token>\nst.split()\n<mask token>\nprint(l)\n<mask token>\nfor i in st:\n k = k + 1\n i = int(i) * 2 ** (l - k)\n print(i)\n new_num.append(i)\n<mask token>\nprint(result)\n",
"step-3": "<mask token>\nlong_phrase = 'Насколько проще было бы писать программы, если бы не заказчики'\nshort_phrase = '640Кб должно хватить для любых задач. Билл Гейтс (по легенде)'\n\n\ndef compare(long, short):\n print(len(long) > len(short))\n\n\ncompare(long_phrase, short_phrase)\ntext = 'Если программист в 9-00 утра на работе, значит, он там и ночевал'\nd = dict()\nfor letter in text:\n if letter not in d:\n d[letter] = 1\n if letter in d:\n d[letter] += 1\nresult = 'В строке text {} букв \"а\" и {} букв \"и\"'.format(d['а'], d['и'])\nprint(result)\nif len(text.replace('и', '')) < len(text.replace('а', '')):\n print('В строке больше букв \"и\"')\nelse:\n print('В строке больше букв \"а\"')\nbyte = 213680000\nmegabyte = byte / 10 ** 6\nprint('Объем файла равен {}Mb'.format(megabyte))\nsin = math.sin(math.pi / 6)\nprint(sin)\n<mask token>\n\n\ndef exchange(a, b):\n b = b - a\n a = a + b\n b = a - b\n print('a=', a, 'b=', b)\n\n\nexchange(120, 1)\nnum = 10011\nst = str(num)\nst.split()\nl = len(st) - 1\nprint(l)\nnew_num = list()\nk = -1\nfor i in st:\n k = k + 1\n i = int(i) * 2 ** (l - k)\n print(i)\n new_num.append(i)\nresult = sum(new_num)\nprint(result)\n",
"step-4": "import math\nlong_phrase = 'Насколько проще было бы писать программы, если бы не заказчики'\nshort_phrase = '640Кб должно хватить для любых задач. Билл Гейтс (по легенде)'\n\n\ndef compare(long, short):\n print(len(long) > len(short))\n\n\ncompare(long_phrase, short_phrase)\ntext = 'Если программист в 9-00 утра на работе, значит, он там и ночевал'\nd = dict()\nfor letter in text:\n if letter not in d:\n d[letter] = 1\n if letter in d:\n d[letter] += 1\nresult = 'В строке text {} букв \"а\" и {} букв \"и\"'.format(d['а'], d['и'])\nprint(result)\nif len(text.replace('и', '')) < len(text.replace('а', '')):\n print('В строке больше букв \"и\"')\nelse:\n print('В строке больше букв \"а\"')\nbyte = 213680000\nmegabyte = byte / 10 ** 6\nprint('Объем файла равен {}Mb'.format(megabyte))\nsin = math.sin(math.pi / 6)\nprint(sin)\n<mask token>\n\n\ndef exchange(a, b):\n b = b - a\n a = a + b\n b = a - b\n print('a=', a, 'b=', b)\n\n\nexchange(120, 1)\nnum = 10011\nst = str(num)\nst.split()\nl = len(st) - 1\nprint(l)\nnew_num = list()\nk = -1\nfor i in st:\n k = k + 1\n i = int(i) * 2 ** (l - k)\n print(i)\n new_num.append(i)\nresult = sum(new_num)\nprint(result)\n",
"step-5": "import math\n\n# 1\nlong_phrase = 'Насколько проще было бы писать программы, если бы не заказчики'\nshort_phrase = '640Кб должно хватить для любых задач. Билл Гейтс (по легенде)'\ndef compare (long, short):\n\tprint(len(long)>len(short))\n\ncompare(long_phrase, short_phrase)\n\n# 2.1\ntext = 'Если программист в 9-00 утра на работе, значит, он там и ночевал'\nd=dict()\nfor letter in text:\n if letter not in d:\n d[letter]=1\n if letter in d:\n d[letter]+=1\nresult='В строке text {} букв \"а\" и {} букв \"и\"'.format(d['а'], d['и'])\nprint(result)\n\n# 2.2\n\nif len(text.replace('и','')) < len(text.replace('а','')):\n print('В строке больше букв \"и\"')\nelse:\n print('В строке больше букв \"а\"')\n\n# 3\nbyte=213680000\nmegabyte=byte/(10**6)\nprint('Объем файла равен {}Mb'.format(megabyte))\n\n# 4\nsin=math.sin(math.pi/6)\nprint(sin)\n\n''' 5 дробные числа не могут быть представлены в точности в бинарном виде,\nпоэтому значения округляются, и такие операции,\nкак 0.1+0.2, дают неточный результат '''\n\n\n# 5\ndef exchange (a, b):\n b=b-a\n a=a+b\n b=a-b\n print('a=',a,'b=',b)\nexchange(120,1)\n\n# 6\n# разбиваем число на элементы, получаем спискок\nnum=10011\nst=str(num)\nst.split()\nl=len(st)-1\nprint(l)\n# создаем новый список куда добавим вычисляемые значения\nnew_num=list()\n# вычисляем каждый элемент (умножение на 2 в степени номера эл-та)\nk=-1\nfor i in st:\n k=k+1\n i=int(i)*(2**(l-k))\n print(i)\n new_num.append(i)\nresult=sum(new_num)\nprint(result)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# coding=utf-8
"""
@Author: Freshield
@Contact: [email protected]
@File: a1_test_call.py
@Time: 2021-01-20 17:40
@Last_update: 2021-01-20 17:40
@Desc: None
@==============================================@
@ _____ _ _ _ _ @
@ | __|___ ___ ___| |_|_|___| |_| | @
@ | __| _| -_|_ -| | | -_| | . | @
@ |__| |_| |___|___|_|_|_|___|_|___| @
@ Freshield @
@==============================================@
"""
import requests
import json
url = 'https://www.baidu.com'
url = 'http://www.baidu.com/s?wd=python'
r = requests.get(url)
print(r.url)
print(r.text)
url = 'http://www.baidu.com/s'
params = {'wd': 'python'}
r = requests.get(url, params=params)
print(r.text)
print(r.url)
data = {'key1': 'value1', 'key2': 'value2'}
data = json.dumps(data)
r = requests.post('https://www.baidu.com', data=data)
print(r.text)
print(r)
|
normal
|
{
"blob_id": "325770130473153d092d3058587e9666625e12d0",
"index": 5670,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(r.url)\nprint(r.text)\n<mask token>\nprint(r.text)\nprint(r.url)\n<mask token>\nprint(r.text)\nprint(r)\n",
"step-3": "<mask token>\nurl = 'https://www.baidu.com'\nurl = 'http://www.baidu.com/s?wd=python'\nr = requests.get(url)\nprint(r.url)\nprint(r.text)\nurl = 'http://www.baidu.com/s'\nparams = {'wd': 'python'}\nr = requests.get(url, params=params)\nprint(r.text)\nprint(r.url)\ndata = {'key1': 'value1', 'key2': 'value2'}\ndata = json.dumps(data)\nr = requests.post('https://www.baidu.com', data=data)\nprint(r.text)\nprint(r)\n",
"step-4": "<mask token>\nimport requests\nimport json\nurl = 'https://www.baidu.com'\nurl = 'http://www.baidu.com/s?wd=python'\nr = requests.get(url)\nprint(r.url)\nprint(r.text)\nurl = 'http://www.baidu.com/s'\nparams = {'wd': 'python'}\nr = requests.get(url, params=params)\nprint(r.text)\nprint(r.url)\ndata = {'key1': 'value1', 'key2': 'value2'}\ndata = json.dumps(data)\nr = requests.post('https://www.baidu.com', data=data)\nprint(r.text)\nprint(r)\n",
"step-5": "# coding=utf-8\n\"\"\"\n@Author: Freshield\n@Contact: [email protected]\n@File: a1_test_call.py\n@Time: 2021-01-20 17:40\n@Last_update: 2021-01-20 17:40\n@Desc: None\n@==============================================@\n@ _____ _ _ _ _ @\n@ | __|___ ___ ___| |_|_|___| |_| | @\n@ | __| _| -_|_ -| | | -_| | . | @\n@ |__| |_| |___|___|_|_|_|___|_|___| @\n@ Freshield @\n@==============================================@\n\"\"\"\nimport requests\nimport json\n\nurl = 'https://www.baidu.com'\nurl = 'http://www.baidu.com/s?wd=python'\n\nr = requests.get(url)\n\nprint(r.url)\nprint(r.text)\n\nurl = 'http://www.baidu.com/s'\nparams = {'wd': 'python'}\nr = requests.get(url, params=params)\nprint(r.text)\nprint(r.url)\n\ndata = {'key1': 'value1', 'key2': 'value2'}\ndata = json.dumps(data)\nr = requests.post('https://www.baidu.com', data=data)\nprint(r.text)\nprint(r)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generates an infinite series of odd numbers
def odds():
n = 1
while True:
yield n
n += 2
def pi_series():
odd_nums = odds()
approximation = 0
while True:
approximation += (4 / next(odd_nums))
yield approximation
approximation -= (4 / next(odd_nums))
yield approximation
approx_pi = pi_series()
# The higher the range used here the closer to an acurate approximation of PI.
for x in range(10000):
print(next(approx_pi))
|
normal
|
{
"blob_id": "26ef7de89e2e38c419310cc66a33d5dc0575fc0d",
"index": 5012,
"step-1": "def odds():\n n = 1\n while True:\n yield n\n n += 2\n\n\n<mask token>\n",
"step-2": "def odds():\n n = 1\n while True:\n yield n\n n += 2\n\n\ndef pi_series():\n odd_nums = odds()\n approximation = 0\n while True:\n approximation += 4 / next(odd_nums)\n yield approximation\n approximation -= 4 / next(odd_nums)\n yield approximation\n\n\n<mask token>\n",
"step-3": "def odds():\n n = 1\n while True:\n yield n\n n += 2\n\n\ndef pi_series():\n odd_nums = odds()\n approximation = 0\n while True:\n approximation += 4 / next(odd_nums)\n yield approximation\n approximation -= 4 / next(odd_nums)\n yield approximation\n\n\n<mask token>\nfor x in range(10000):\n print(next(approx_pi))\n",
"step-4": "def odds():\n n = 1\n while True:\n yield n\n n += 2\n\n\ndef pi_series():\n odd_nums = odds()\n approximation = 0\n while True:\n approximation += 4 / next(odd_nums)\n yield approximation\n approximation -= 4 / next(odd_nums)\n yield approximation\n\n\napprox_pi = pi_series()\nfor x in range(10000):\n print(next(approx_pi))\n",
"step-5": "# Generates an infinite series of odd numbers\ndef odds():\n n = 1\n while True:\n yield n\n n += 2\n\n\ndef pi_series():\n odd_nums = odds()\n approximation = 0\n while True:\n approximation += (4 / next(odd_nums))\n yield approximation\n approximation -= (4 / next(odd_nums))\n yield approximation\n\napprox_pi = pi_series()\n\n# The higher the range used here the closer to an acurate approximation of PI.\nfor x in range(10000):\n print(next(approx_pi))\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
a=input("Please enter the elements with spaces between them:").split()
n=len(a)
for i in range(n):
a[i]=int(a[i])
for i in range(n-1):
for j in range(n-i-1):
if a[j]>a[j+1]:
a[j],a[j+1]=a[j+1],a[j]
print("Sortes array :",a)
|
normal
|
{
"blob_id": "5c2a6802e89314c25f0264bbe2bc7ed2689a255a",
"index": 782,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(n):\n a[i] = int(a[i])\nfor i in range(n - 1):\n for j in range(n - i - 1):\n if a[j] > a[j + 1]:\n a[j], a[j + 1] = a[j + 1], a[j]\nprint('Sortes array :', a)\n",
"step-3": "a = input('Please enter the elements with spaces between them:').split()\nn = len(a)\nfor i in range(n):\n a[i] = int(a[i])\nfor i in range(n - 1):\n for j in range(n - i - 1):\n if a[j] > a[j + 1]:\n a[j], a[j + 1] = a[j + 1], a[j]\nprint('Sortes array :', a)\n",
"step-4": "a=input(\"Please enter the elements with spaces between them:\").split()\nn=len(a)\nfor i in range(n):\n a[i]=int(a[i])\nfor i in range(n-1):\n for j in range(n-i-1):\n if a[j]>a[j+1]:\n a[j],a[j+1]=a[j+1],a[j]\nprint(\"Sortes array :\",a)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
class Solution(object):
def twoSum(self, numbers, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
idx1 = 0
idx2 = len(numbers)-1
while(idx1<idx2): # can also use a for-loop: for num in numbers:
left = numbers[idx1]
right = numbers[idx2]
if (left + right) < target:
idx1 += 1
elif (left + right) > target:
idx2 -= 1
else:
return [idx1+1,idx2+1]
|
normal
|
{
"blob_id": "51b3beee8659bccee0fbb64b80fdce18b693674b",
"index": 9481,
"step-1": "<mask token>\n",
"step-2": "class Solution(object):\n <mask token>\n",
"step-3": "class Solution(object):\n\n def twoSum(self, numbers, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n idx1 = 0\n idx2 = len(numbers) - 1\n while idx1 < idx2:\n left = numbers[idx1]\n right = numbers[idx2]\n if left + right < target:\n idx1 += 1\n elif left + right > target:\n idx2 -= 1\n else:\n return [idx1 + 1, idx2 + 1]\n",
"step-4": "class Solution(object):\n def twoSum(self, numbers, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n idx1 = 0\n idx2 = len(numbers)-1\n while(idx1<idx2): # can also use a for-loop: for num in numbers: \n left = numbers[idx1]\n right = numbers[idx2]\n if (left + right) < target:\n idx1 += 1\n elif (left + right) > target:\n idx2 -= 1\n else:\n return [idx1+1,idx2+1]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""updateimage URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from updateapp.views import jsonre_data,jsonView,JsonView2,SerializeView,Serializeall
from updateapp.api import views,urls
from django.conf.urls import url,include
urlpatterns = [
url(r'^$',jsonre_data),
url(r'^serialize/$',SerializeView.as_view()),
url(r'^serialize/$',SerializeView.as_view()),
url(r'^all/$',Serializeall.as_view()),
url(r'^cbv1/$',jsonView.as_view()),
url(r'^cbv2/$',JsonView2.as_view()),
url(r'^api/updates/',include('updateapp.api.urls')),
path('admin/', admin.site.urls),
]
|
normal
|
{
"blob_id": "b3b5f7eeb81e10a51eb0322bc5278d33ee5f8e97",
"index": 9222,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('^$', jsonre_data), url('^serialize/$', SerializeView.\n as_view()), url('^serialize/$', SerializeView.as_view()), url('^all/$',\n Serializeall.as_view()), url('^cbv1/$', jsonView.as_view()), url(\n '^cbv2/$', JsonView2.as_view()), url('^api/updates/', include(\n 'updateapp.api.urls')), path('admin/', admin.site.urls)]\n",
"step-3": "<mask token>\nfrom django.contrib import admin\nfrom django.urls import path\nfrom updateapp.views import jsonre_data, jsonView, JsonView2, SerializeView, Serializeall\nfrom updateapp.api import views, urls\nfrom django.conf.urls import url, include\nurlpatterns = [url('^$', jsonre_data), url('^serialize/$', SerializeView.\n as_view()), url('^serialize/$', SerializeView.as_view()), url('^all/$',\n Serializeall.as_view()), url('^cbv1/$', jsonView.as_view()), url(\n '^cbv2/$', JsonView2.as_view()), url('^api/updates/', include(\n 'updateapp.api.urls')), path('admin/', admin.site.urls)]\n",
"step-4": "\"\"\"updateimage URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom updateapp.views import jsonre_data,jsonView,JsonView2,SerializeView,Serializeall\nfrom updateapp.api import views,urls\nfrom django.conf.urls import url,include\n\nurlpatterns = [\n url(r'^$',jsonre_data),\n \n url(r'^serialize/$',SerializeView.as_view()),\n \n url(r'^serialize/$',SerializeView.as_view()),\n \n url(r'^all/$',Serializeall.as_view()),\n url(r'^cbv1/$',jsonView.as_view()),\n url(r'^cbv2/$',JsonView2.as_view()),\n url(r'^api/updates/',include('updateapp.api.urls')),\n\n path('admin/', admin.site.urls),\n\n]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
import time
import numpy as np
import vii
import cnn
from cnn._utils import (FLOAT_DTYPE,
_multi_convolve_image,
_opencl_multi_convolve_image,
_relu_max_pool_image,
_opencl_relu_max_pool_image)
GROUPS = 25, 20, 1
def subsample(x, pool_size):
# Make sure it works with pool size > 2 !!!!
dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)]
return x[:dx:2, :dy:2]
def probe_time(func):
def wrapper(*args, **kwargs):
t0 = time.time()
res = func(*args, **kwargs)
dt = time.time() - t0
print('Time (%s): %f' % (func.__name__, dt))
return res
return wrapper
@probe_time
def cpu_multi_convolve_image(*args):
return _multi_convolve_image(*args)
@probe_time
def cpu_relu_max_pool_image(*args):
return _relu_max_pool_image(*args)
@probe_time
def opencl_multi_convolve_image(*args):
return _opencl_multi_convolve_image(*args)
@probe_time
def opencl_relu_max_pool_image(*args):
return _opencl_relu_max_pool_image(*args)
###########################################################################
fimg = 'pizza.png'
fmod = 'feb2.h5'
device = 0
brute_force = False
if len(sys.argv) > 1:
fimg = sys.argv[1]
if len(sys.argv) > 2:
fmod = sys.argv[2]
if len(sys.argv) > 3:
device = int(sys.argv[3])
if device < 0:
device = None
img = vii.load_image(fimg)
classif = cnn.load_image_classifier(fmod)
def multi_convolve_image(data, kernel, bias, dil_x, dil_y):
if device < 0:
return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)
else:
return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y, device, *(GROUPS[0:2]))
def relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):
if device < 0:
return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)
else:
return opencl_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y, device, *GROUPS)
###########################################################################
print('CNN test')
x = np.random.randint(img.dims[0] - classif.image_size[0] + 1)
y = np.random.randint(img.dims[1] - classif.image_size[1] + 1)
data = img.get_data().astype(FLOAT_DTYPE)[x:(x + classif.image_size[0]), y:(y + classif.image_size[1])] / 255
gold = classif.run(data)
flow = data
for i in range(len(classif.conv_filters)):
kernel, bias = classif.get_weights(i)
flow = multi_convolve_image(flow, kernel, bias, 1, 1)[1:-1, 1:-1, :]
flow = subsample(relu_max_pool_image(flow, classif.pool_size, classif.pool_size, 1, 1), 2)
flow = flow.flatten()
for i in range(len(classif.conv_filters), len(classif.layers)):
kernel, bias = classif.get_weights(i)
flow = np.sum(kernel * np.expand_dims(flow, 1), 0) + bias
if i < (len(classif.layers) - 1):
flow = np.maximum(flow, 0)
silver = cnn.softmax(flow)
print('error = %f' % np.max(np.abs(gold - silver)))
|
normal
|
{
"blob_id": "8ec257d5dfe84e363e3c3aa5adee3470c20d1765",
"index": 5866,
"step-1": "<mask token>\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef subsample(x, pool_size):\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)\n ]\n return x[:dx:2, :dy:2]\n\n\n<mask token>\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n<mask token>\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,\n dil_y, device, *GROUPS)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef subsample(x, pool_size):\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)\n ]\n return x[:dx:2, :dy:2]\n\n\n<mask token>\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n@probe_time\ndef cpu_relu_max_pool_image(*args):\n return _relu_max_pool_image(*args)\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,\n dil_y, device, *GROUPS)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef subsample(x, pool_size):\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)\n ]\n return x[:dx:2, :dy:2]\n\n\ndef probe_time(func):\n\n def wrapper(*args, **kwargs):\n t0 = time.time()\n res = func(*args, **kwargs)\n dt = time.time() - t0\n print('Time (%s): %f' % (func.__name__, dt))\n return res\n return wrapper\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n@probe_time\ndef cpu_relu_max_pool_image(*args):\n return _relu_max_pool_image(*args)\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n<mask token>\nif len(sys.argv) > 1:\n fimg = sys.argv[1]\n if len(sys.argv) > 2:\n fmod = sys.argv[2]\n if len(sys.argv) > 3:\n device = int(sys.argv[3])\n if device < 0:\n device = None\n<mask token>\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y,\n device, *GROUPS[0:2])\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x,\n dil_y, device, *GROUPS)\n\n\nprint('CNN test')\n<mask token>\nfor i in range(len(classif.conv_filters)):\n kernel, bias = classif.get_weights(i)\n flow = multi_convolve_image(flow, kernel, bias, 1, 1)[1:-1, 1:-1, :]\n flow = subsample(relu_max_pool_image(flow, classif.pool_size, classif.\n pool_size, 1, 1), 2)\n<mask token>\nfor i in range(len(classif.conv_filters), len(classif.layers)):\n kernel, bias = classif.get_weights(i)\n flow = np.sum(kernel * np.expand_dims(flow, 1), 0) + bias\n if i < len(classif.layers) - 1:\n flow = np.maximum(flow, 0)\n<mask token>\nprint('error = %f' % np.max(np.abs(gold - silver)))\n",
"step-5": "import sys\nimport time\nimport numpy as np\n\nimport vii\n\nimport cnn\nfrom cnn._utils import (FLOAT_DTYPE,\n _multi_convolve_image,\n _opencl_multi_convolve_image,\n _relu_max_pool_image,\n _opencl_relu_max_pool_image)\n\n\nGROUPS = 25, 20, 1\n\ndef subsample(x, pool_size):\n # Make sure it works with pool size > 2 !!!!\n dx, dy = [int(p) for p in pool_size * (np.array(x.shape[0:2]) // pool_size)]\n return x[:dx:2, :dy:2]\n\n\ndef probe_time(func):\n def wrapper(*args, **kwargs):\n t0 = time.time()\n res = func(*args, **kwargs)\n dt = time.time() - t0\n print('Time (%s): %f' % (func.__name__, dt))\n return res\n return wrapper\n\n\n\n@probe_time\ndef cpu_multi_convolve_image(*args):\n return _multi_convolve_image(*args)\n\n\n@probe_time\ndef cpu_relu_max_pool_image(*args):\n return _relu_max_pool_image(*args)\n\n\n@probe_time\ndef opencl_multi_convolve_image(*args):\n return _opencl_multi_convolve_image(*args)\n\n\n@probe_time\ndef opencl_relu_max_pool_image(*args):\n return _opencl_relu_max_pool_image(*args)\n\n\n###########################################################################\n\nfimg = 'pizza.png'\nfmod = 'feb2.h5'\ndevice = 0\nbrute_force = False\nif len(sys.argv) > 1:\n fimg = sys.argv[1]\n if len(sys.argv) > 2:\n fmod = sys.argv[2]\n if len(sys.argv) > 3:\n device = int(sys.argv[3])\n if device < 0:\n device = None\nimg = vii.load_image(fimg)\nclassif = cnn.load_image_classifier(fmod)\n\n\ndef multi_convolve_image(data, kernel, bias, dil_x, dil_y):\n if device < 0:\n return cpu_multi_convolve_image(data, kernel, bias, dil_x, dil_y)\n else:\n return opencl_multi_convolve_image(data, kernel, bias, dil_x, dil_y, device, *(GROUPS[0:2]))\n\n\ndef relu_max_pool_image(data, size_x, size_y, dil_x, dil_y):\n if device < 0:\n return cpu_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y)\n else:\n return opencl_relu_max_pool_image(data, size_x, size_y, dil_x, dil_y, device, *GROUPS)\n\n\n###########################################################################\n \nprint('CNN test')\n\nx = np.random.randint(img.dims[0] - classif.image_size[0] + 1)\ny = np.random.randint(img.dims[1] - classif.image_size[1] + 1)\n\ndata = img.get_data().astype(FLOAT_DTYPE)[x:(x + classif.image_size[0]), y:(y + classif.image_size[1])] / 255\ngold = classif.run(data)\n\nflow = data\nfor i in range(len(classif.conv_filters)):\n kernel, bias = classif.get_weights(i)\n flow = multi_convolve_image(flow, kernel, bias, 1, 1)[1:-1, 1:-1, :]\n flow = subsample(relu_max_pool_image(flow, classif.pool_size, classif.pool_size, 1, 1), 2)\nflow = flow.flatten()\n\nfor i in range(len(classif.conv_filters), len(classif.layers)):\n kernel, bias = classif.get_weights(i)\n flow = np.sum(kernel * np.expand_dims(flow, 1), 0) + bias\n if i < (len(classif.layers) - 1):\n flow = np.maximum(flow, 0)\n\nsilver = cnn.softmax(flow)\n\nprint('error = %f' % np.max(np.abs(gold - silver))) \n",
"step-ids": [
2,
6,
7,
9,
12
]
}
|
[
2,
6,
7,
9,
12
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 9 20:06:32 2020
@author: Supriyo
"""
import networkx as nx
import matplotlib.pyplot as plt
g=nx.Graph()
#l=[1,2,3]
# g.add_node(1)
# g.add_node(2)
# g.add_node(3)
# g.add_nodes_from(l)
# g.add_edge(1,2)
# g.add_edge(2,3)
# g.add_edge(3,1)
# print(g.nodes())
# print(g.edges())
g=nx.complete_graph(10)
h=nx.gnp_random_graph(10,0.5)#0.55 is the probability
nx.draw(g)
nx.draw(h)
plt.show()
nx.write_gexf(g,"test.gexf")
|
normal
|
{
"blob_id": "3bfa9d42e3fd61cf6b7ffaac687f66c2f4bc073e",
"index": 3906,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nnx.draw(g)\nnx.draw(h)\nplt.show()\nnx.write_gexf(g, 'test.gexf')\n",
"step-3": "<mask token>\ng = nx.Graph()\ng = nx.complete_graph(10)\nh = nx.gnp_random_graph(10, 0.5)\nnx.draw(g)\nnx.draw(h)\nplt.show()\nnx.write_gexf(g, 'test.gexf')\n",
"step-4": "<mask token>\nimport networkx as nx\nimport matplotlib.pyplot as plt\ng = nx.Graph()\ng = nx.complete_graph(10)\nh = nx.gnp_random_graph(10, 0.5)\nnx.draw(g)\nnx.draw(h)\nplt.show()\nnx.write_gexf(g, 'test.gexf')\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 9 20:06:32 2020\r\n\r\n@author: Supriyo\r\n\"\"\"\r\n\r\nimport networkx as nx\r\nimport matplotlib.pyplot as plt \r\n\r\ng=nx.Graph()\r\n\r\n\r\n#l=[1,2,3]\r\n\r\n# g.add_node(1)\r\n# g.add_node(2)\r\n# g.add_node(3)\r\n # g.add_nodes_from(l)\r\n \r\n # g.add_edge(1,2)\r\n # g.add_edge(2,3)\r\n # g.add_edge(3,1)\r\n\r\n# print(g.nodes())\r\n# print(g.edges())\r\n\r\ng=nx.complete_graph(10)\r\n\r\nh=nx.gnp_random_graph(10,0.5)#0.55 is the probability\r\nnx.draw(g)\r\nnx.draw(h)\r\nplt.show()\r\n\r\nnx.write_gexf(g,\"test.gexf\")",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-13 02:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('stores', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Assistants',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name_assistants', models.CharField(max_length=255)),
('phone_assistants', models.IntegerField()),
('email_assistants', models.EmailField(max_length=254)),
('address_assistants', models.TextField()),
('timestamp', models.DateField(auto_now=True)),
('fkstore', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assistants', to='stores.Store')),
],
),
]
|
normal
|
{
"blob_id": "e95de58828c63dc8ae24efff314665a308f6ce0c",
"index": 983,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('stores', '0001_initial')]\n operations = [migrations.CreateModel(name='Assistants', fields=[('id',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False)), ('name_assistants', models.CharField(\n max_length=255)), ('phone_assistants', models.IntegerField()), (\n 'email_assistants', models.EmailField(max_length=254)), (\n 'address_assistants', models.TextField()), ('timestamp', models.\n DateField(auto_now=True)), ('fkstore', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='assistants', to=\n 'stores.Store'))])]\n",
"step-4": "from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('stores', '0001_initial')]\n operations = [migrations.CreateModel(name='Assistants', fields=[('id',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False)), ('name_assistants', models.CharField(\n max_length=255)), ('phone_assistants', models.IntegerField()), (\n 'email_assistants', models.EmailField(max_length=254)), (\n 'address_assistants', models.TextField()), ('timestamp', models.\n DateField(auto_now=True)), ('fkstore', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='assistants', to=\n 'stores.Store'))])]\n",
"step-5": "# -*- coding: utf-8 -*-\n# Generated by Django 1.11.7 on 2017-12-13 02:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('stores', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Assistants',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('name_assistants', models.CharField(max_length=255)),\n ('phone_assistants', models.IntegerField()),\n ('email_assistants', models.EmailField(max_length=254)),\n ('address_assistants', models.TextField()),\n ('timestamp', models.DateField(auto_now=True)),\n ('fkstore', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assistants', to='stores.Store')),\n ],\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
t = int(input())
while t:
x = list(map(int, input().split()))
x.sort()
if(x[0]+x[1]==x[2]):
print("YES")
else:
print("NO")
t-=1
|
normal
|
{
"blob_id": "d1200006b8d7a18b11b01eff4fbf38d9dfd8958e",
"index": 5758,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile t:\n x = list(map(int, input().split()))\n x.sort()\n if x[0] + x[1] == x[2]:\n print('YES')\n else:\n print('NO')\n t -= 1\n",
"step-3": "t = int(input())\nwhile t:\n x = list(map(int, input().split()))\n x.sort()\n if x[0] + x[1] == x[2]:\n print('YES')\n else:\n print('NO')\n t -= 1\n",
"step-4": "t = int(input())\r\nwhile t:\r\n\tx = list(map(int, input().split()))\r\n\tx.sort()\r\n\tif(x[0]+x[1]==x[2]):\r\n\t\tprint(\"YES\")\r\n\telse:\r\n\t\tprint(\"NO\")\r\n\tt-=1",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from pprint import pprint
from collections import Counter
from copy import deepcopy
class Sudoku():
def __init__(self, grid):
'''
Initializes the grid
'''
self.grid = grid
self.sub_grid = self.create_sub_grid(self.grid)
def create_sub_grid(self, grid):
'''
Creates a Sub grid, containing the possible numbers within a cell
Returns a Sub grid
'''
sub_grid = []
for i in range(9):
sub = []
for j in range(9):
if grid[i][j] == 0:
sub.append(self.missing_numbers(i,j))
else:
sub.append([grid[i][j]])
sub_grid.append(sub)
del sub
return sub_grid
def missing_numbers(self, row, column):
'''
Returs the possible set of numbers of a particular row and column
'''
rrow, ccolumn = self.row_and_column(self.grid, row, column)
cell = self.cell_3by3(row, column)
missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn + cell))
return missing_num
def cell_3by3(self, row, column):
'''
Returns grid of 3 X 3
'''
cell = []
a = row // 3
b = column // 3
for i in range(9):
for j in range(9):
if i // 3 == a and j // 3 == b :
cell.append(grid[i][j])
return cell
def row_and_column(self, grid, row, column):
'''
Returns rows and columns
'''
r = grid[row]
c = []
for j in range(9):
c.append(grid[j][column])
return r, c
def step_1(self, sub_grid, num):
'''
Reducing a list of clues to a single value based on row and column elimination
Returns a refined sub grid
'''
row,column = self.row_and_column(sub_grid,num,num)
row_flatten = sum(row,[])
single_values = [i for i,j in Counter(row_flatten).items() if j == 1 ]
# For Rows
for i in range(len(sub_grid)):
for j in single_values:
if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:
sub_grid[num][i] = [j]
# For Columns
column_flatten = sum(column, [])
column_single_values = [i for i,j in Counter(column_flatten).items() if j == 1 ]
for i in range(len(sub_grid)):
for j in column_single_values:
if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:
sub_grid[i][num] = [j]
return sub_grid
def step_2(self, sub_grid, num):
'''
Removes a number 'n' that fits at its correct position from other lists corresponding its row and column
Returns refined sub grid
'''
row,column = self.row_and_column(sub_grid,num,num)
# For Rows
single_value_list = []
for i in range(len(row)):
if len(sub_grid[num][i]) == 1:
single_value_list.append(sub_grid[num][i])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[num][i]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[num][i]:
sub_grid[num][i].remove(j)
# For Columns
single_value_list = []
for i in range(len(column)):
if len(sub_grid[i][num]) == 1:
single_value_list.append(sub_grid[i][num])
single_value_list_flatten = sum(single_value_list, [])
for i in range(len(sub_grid)):
if len(sub_grid[i][num]) != 1:
for j in single_value_list_flatten:
if j in sub_grid[i][num]:
sub_grid[i][num].remove(j)
return sub_grid
def step_3(self, sub_grid, num):
pass
def perform(self):
'''
Performs the step_1 and step_2 untill the Sub grid is solved
Returns None
'''
temp = []
while self.sub_grid != temp:
temp = deepcopy(self.sub_grid)
for i in range(len(grid)):
self.sub_grid = self.step_1(self.sub_grid, i)
self.sub_grid = self.step_2(self.sub_grid, i)
def solve(self):
'''
Solves the Sub grid and prints the sub grid
Returns None
'''
self.perform()
for i in range(9):
for j in range(9):
print(self.sub_grid[i][j], end=' ')
print()
# grid = [
# [0,3,0,0,1,0,0,6,0],
# [7,5,0,0,3,0,0,4,8],
# [0,0,6,9,8,4,3,0,0],
# [0,0,3,0,0,0,8,0,0],
# [9,1,2,0,0,0,6,7,4],
# [0,0,4,0,0,0,5,0,0],
# [0,0,1,6,7,5,2,0,0],
# [6,8,0,0,9,0,0,1,5],
# [0,9,0,0,4,0,0,3,0]
# ]
# grid = [
# [6,0,0,1,0,8,2,0,3],
# [0,2,0,0,4,0,0,9,0],
# [8,0,3,0,0,5,4,0,0],
# [5,0,4,6,0,7,0,0,9],
# [0,3,0,0,0,0,0,5,0],
# [7,0,0,8,0,3,1,0,2],
# [0,0,1,7,0,0,9,0,6],
# [0,8,0,0,3,0,0,2,0],
# [3,0,2,9,0,4,0,0,5]
# ]
grid = [
[8,0,6,0,0,0,4,0,9],
[0,0,0,0,0,0,0,0,0],
[0,9,2,0,0,0,5,0,8],
[0,0,9,0,7,1,3,0,0],
[5,0,8,0,0,0,0,2,0],
[0,0,4,0,5,0,0,0,0],
[0,0,0,0,0,7,9,1,0],
[0,0,0,9,0,0,0,0,7],
[0,7,0,0,0,3,0,0,4],
]
mat = Sudoku(grid)
mat.solve()
|
normal
|
{
"blob_id": "4032503bba8a1dd273015d503f52b6ea2d932d1d",
"index": 3564,
"step-1": "<mask token>\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n <mask token>\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n\n def step_2(self, sub_grid, num):\n \"\"\"\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\n Returns refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n single_value_list = []\n for i in range(len(row)):\n if len(sub_grid[num][i]) == 1:\n single_value_list.append(sub_grid[num][i])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[num][i]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[num][i]:\n sub_grid[num][i].remove(j)\n single_value_list = []\n for i in range(len(column)):\n if len(sub_grid[i][num]) == 1:\n single_value_list.append(sub_grid[i][num])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[i][num]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[i][num]:\n sub_grid[i][num].remove(j)\n return sub_grid\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\n<mask token>\nmat.solve()\n",
"step-3": "<mask token>\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n\n def step_2(self, sub_grid, num):\n \"\"\"\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\n Returns refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n single_value_list = []\n for i in range(len(row)):\n if len(sub_grid[num][i]) == 1:\n single_value_list.append(sub_grid[num][i])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[num][i]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[num][i]:\n sub_grid[num][i].remove(j)\n single_value_list = []\n for i in range(len(column)):\n if len(sub_grid[i][num]) == 1:\n single_value_list.append(sub_grid[i][num])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[i][num]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[i][num]:\n sub_grid[i][num].remove(j)\n return sub_grid\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\ngrid = [[8, 0, 6, 0, 0, 0, 4, 0, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 9, 2,\n 0, 0, 0, 5, 0, 8], [0, 0, 9, 0, 7, 1, 3, 0, 0], [5, 0, 8, 0, 0, 0, 0, 2,\n 0], [0, 0, 4, 0, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 9, 1, 0], [0, 0, 0,\n 9, 0, 0, 0, 0, 7], [0, 7, 0, 0, 0, 3, 0, 0, 4]]\nmat = Sudoku(grid)\nmat.solve()\n",
"step-4": "from pprint import pprint\nfrom collections import Counter\nfrom copy import deepcopy\n\n\nclass Sudoku:\n\n def __init__(self, grid):\n \"\"\"\n Initializes the grid\n \"\"\"\n self.grid = grid\n self.sub_grid = self.create_sub_grid(self.grid)\n\n def create_sub_grid(self, grid):\n \"\"\" \n Creates a Sub grid, containing the possible numbers within a cell\n Returns a Sub grid\n \"\"\"\n sub_grid = []\n for i in range(9):\n sub = []\n for j in range(9):\n if grid[i][j] == 0:\n sub.append(self.missing_numbers(i, j))\n else:\n sub.append([grid[i][j]])\n sub_grid.append(sub)\n del sub\n return sub_grid\n\n def missing_numbers(self, row, column):\n \"\"\"\n Returs the possible set of numbers of a particular row and column\n \"\"\"\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\n cell = self.cell_3by3(row, column)\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn +\n cell))\n return missing_num\n\n def cell_3by3(self, row, column):\n \"\"\"\n Returns grid of 3 X 3\n \"\"\"\n cell = []\n a = row // 3\n b = column // 3\n for i in range(9):\n for j in range(9):\n if i // 3 == a and j // 3 == b:\n cell.append(grid[i][j])\n return cell\n\n def row_and_column(self, grid, row, column):\n \"\"\"\n Returns rows and columns\n \"\"\"\n r = grid[row]\n c = []\n for j in range(9):\n c.append(grid[j][column])\n return r, c\n\n def step_1(self, sub_grid, num):\n \"\"\"\n Reducing a list of clues to a single value based on row and column elimination\n Returns a refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n row_flatten = sum(row, [])\n single_values = [i for i, j in Counter(row_flatten).items() if j == 1]\n for i in range(len(sub_grid)):\n for j in single_values:\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\n sub_grid[num][i] = [j]\n column_flatten = sum(column, [])\n column_single_values = [i for i, j in Counter(column_flatten).items\n () if j == 1]\n for i in range(len(sub_grid)):\n for j in column_single_values:\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\n sub_grid[i][num] = [j]\n return sub_grid\n\n def step_2(self, sub_grid, num):\n \"\"\"\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\n Returns refined sub grid\n \"\"\"\n row, column = self.row_and_column(sub_grid, num, num)\n single_value_list = []\n for i in range(len(row)):\n if len(sub_grid[num][i]) == 1:\n single_value_list.append(sub_grid[num][i])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[num][i]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[num][i]:\n sub_grid[num][i].remove(j)\n single_value_list = []\n for i in range(len(column)):\n if len(sub_grid[i][num]) == 1:\n single_value_list.append(sub_grid[i][num])\n single_value_list_flatten = sum(single_value_list, [])\n for i in range(len(sub_grid)):\n if len(sub_grid[i][num]) != 1:\n for j in single_value_list_flatten:\n if j in sub_grid[i][num]:\n sub_grid[i][num].remove(j)\n return sub_grid\n\n def step_3(self, sub_grid, num):\n pass\n\n def perform(self):\n \"\"\"\n Performs the step_1 and step_2 untill the Sub grid is solved\n Returns None\n \"\"\"\n temp = []\n while self.sub_grid != temp:\n temp = deepcopy(self.sub_grid)\n for i in range(len(grid)):\n self.sub_grid = self.step_1(self.sub_grid, i)\n self.sub_grid = self.step_2(self.sub_grid, i)\n\n def solve(self):\n \"\"\"\n Solves the Sub grid and prints the sub grid\n Returns None\n \"\"\"\n self.perform()\n for i in range(9):\n for j in range(9):\n print(self.sub_grid[i][j], end=' ')\n print()\n\n\ngrid = [[8, 0, 6, 0, 0, 0, 4, 0, 9], [0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 9, 2,\n 0, 0, 0, 5, 0, 8], [0, 0, 9, 0, 7, 1, 3, 0, 0], [5, 0, 8, 0, 0, 0, 0, 2,\n 0], [0, 0, 4, 0, 5, 0, 0, 0, 0], [0, 0, 0, 0, 0, 7, 9, 1, 0], [0, 0, 0,\n 9, 0, 0, 0, 0, 7], [0, 7, 0, 0, 0, 3, 0, 0, 4]]\nmat = Sudoku(grid)\nmat.solve()\n",
"step-5": "\r\n\r\n\r\nfrom pprint import pprint\r\nfrom collections import Counter\r\nfrom copy import deepcopy\r\n\r\n\r\nclass Sudoku():\r\n def __init__(self, grid):\r\n '''\r\n Initializes the grid\r\n '''\r\n self.grid = grid\r\n self.sub_grid = self.create_sub_grid(self.grid)\r\n\r\n def create_sub_grid(self, grid):\r\n ''' \r\n Creates a Sub grid, containing the possible numbers within a cell\r\n Returns a Sub grid\r\n '''\r\n sub_grid = []\r\n for i in range(9):\r\n sub = []\r\n for j in range(9):\r\n if grid[i][j] == 0:\r\n sub.append(self.missing_numbers(i,j))\r\n else:\r\n sub.append([grid[i][j]])\r\n sub_grid.append(sub)\r\n del sub\r\n return sub_grid\r\n\r\n\r\n def missing_numbers(self, row, column):\r\n '''\r\n Returs the possible set of numbers of a particular row and column\r\n '''\r\n\r\n rrow, ccolumn = self.row_and_column(self.grid, row, column)\r\n cell = self.cell_3by3(row, column)\r\n \r\n missing_num = list({i for i in range(1, 10)} - set(rrow + ccolumn + cell))\r\n return missing_num\r\n\r\n\r\n\r\n def cell_3by3(self, row, column):\r\n '''\r\n Returns grid of 3 X 3\r\n '''\r\n\r\n cell = []\r\n a = row // 3\r\n b = column // 3\r\n for i in range(9):\r\n for j in range(9):\r\n if i // 3 == a and j // 3 == b : \r\n cell.append(grid[i][j])\r\n return cell\r\n\r\n def row_and_column(self, grid, row, column): \r\n '''\r\n Returns rows and columns\r\n '''\r\n r = grid[row]\r\n c = []\r\n for j in range(9):\r\n c.append(grid[j][column])\r\n return r, c\r\n\r\n\r\n\r\n\r\n def step_1(self, sub_grid, num):\r\n '''\r\n Reducing a list of clues to a single value based on row and column elimination\r\n Returns a refined sub grid\r\n '''\r\n\r\n\r\n row,column = self.row_and_column(sub_grid,num,num)\r\n\r\n row_flatten = sum(row,[])\r\n single_values = [i for i,j in Counter(row_flatten).items() if j == 1 ]\r\n\r\n # For Rows\r\n for i in range(len(sub_grid)):\r\n for j in single_values:\r\n if j in sub_grid[num][i] and len(sub_grid[num][i]) != 1:\r\n sub_grid[num][i] = [j] \r\n\r\n # For Columns\r\n column_flatten = sum(column, [])\r\n column_single_values = [i for i,j in Counter(column_flatten).items() if j == 1 ]\r\n for i in range(len(sub_grid)):\r\n for j in column_single_values:\r\n if j in sub_grid[i][num] and len(sub_grid[i][num]) != 1:\r\n sub_grid[i][num] = [j]\r\n\r\n\r\n\r\n return sub_grid\r\n\r\n def step_2(self, sub_grid, num):\r\n '''\r\n Removes a number 'n' that fits at its correct position from other lists corresponding its row and column\r\n Returns refined sub grid\r\n '''\r\n\r\n row,column = self.row_and_column(sub_grid,num,num)\r\n\r\n # For Rows\r\n single_value_list = []\r\n for i in range(len(row)):\r\n if len(sub_grid[num][i]) == 1:\r\n single_value_list.append(sub_grid[num][i])\r\n single_value_list_flatten = sum(single_value_list, [])\r\n\r\n for i in range(len(sub_grid)):\r\n if len(sub_grid[num][i]) != 1: \r\n for j in single_value_list_flatten:\r\n if j in sub_grid[num][i]:\r\n sub_grid[num][i].remove(j)\r\n\r\n # For Columns\r\n single_value_list = []\r\n for i in range(len(column)):\r\n if len(sub_grid[i][num]) == 1:\r\n single_value_list.append(sub_grid[i][num])\r\n single_value_list_flatten = sum(single_value_list, [])\r\n\r\n for i in range(len(sub_grid)):\r\n if len(sub_grid[i][num]) != 1: \r\n for j in single_value_list_flatten:\r\n if j in sub_grid[i][num]:\r\n sub_grid[i][num].remove(j)\r\n\r\n return sub_grid\r\n\r\n def step_3(self, sub_grid, num):\r\n pass\r\n\r\n \r\n\r\n\r\n def perform(self):\r\n '''\r\n Performs the step_1 and step_2 untill the Sub grid is solved\r\n Returns None\r\n '''\r\n\r\n temp = []\r\n while self.sub_grid != temp: \r\n temp = deepcopy(self.sub_grid) \r\n for i in range(len(grid)):\r\n self.sub_grid = self.step_1(self.sub_grid, i)\r\n self.sub_grid = self.step_2(self.sub_grid, i)\r\n\r\n\r\n def solve(self):\r\n '''\r\n Solves the Sub grid and prints the sub grid\r\n Returns None\r\n '''\r\n\r\n self.perform()\r\n for i in range(9):\r\n for j in range(9):\r\n print(self.sub_grid[i][j], end=' ')\r\n print()\r\n\r\n\r\n# grid = [\r\n# [0,3,0,0,1,0,0,6,0],\r\n# [7,5,0,0,3,0,0,4,8],\r\n# [0,0,6,9,8,4,3,0,0],\r\n# [0,0,3,0,0,0,8,0,0],\r\n# [9,1,2,0,0,0,6,7,4],\r\n# [0,0,4,0,0,0,5,0,0],\r\n# [0,0,1,6,7,5,2,0,0],\r\n# [6,8,0,0,9,0,0,1,5],\r\n# [0,9,0,0,4,0,0,3,0]\r\n# ]\r\n\r\n# grid = [\r\n# [6,0,0,1,0,8,2,0,3],\r\n# [0,2,0,0,4,0,0,9,0],\r\n# [8,0,3,0,0,5,4,0,0],\r\n# [5,0,4,6,0,7,0,0,9],\r\n# [0,3,0,0,0,0,0,5,0],\r\n# [7,0,0,8,0,3,1,0,2],\r\n# [0,0,1,7,0,0,9,0,6],\r\n# [0,8,0,0,3,0,0,2,0],\r\n# [3,0,2,9,0,4,0,0,5]\r\n# ]\r\ngrid = [\r\n [8,0,6,0,0,0,4,0,9],\r\n [0,0,0,0,0,0,0,0,0],\r\n [0,9,2,0,0,0,5,0,8],\r\n [0,0,9,0,7,1,3,0,0],\r\n [5,0,8,0,0,0,0,2,0],\r\n [0,0,4,0,5,0,0,0,0],\r\n [0,0,0,0,0,7,9,1,0],\r\n [0,0,0,9,0,0,0,0,7],\r\n [0,7,0,0,0,3,0,0,4],\r\n]\r\n\r\nmat = Sudoku(grid)\r\nmat.solve()\r\n",
"step-ids": [
10,
12,
13,
14,
15
]
}
|
[
10,
12,
13,
14,
15
] |
seq = input('write a sequence of numbers: ')
print(seq.split(','))
print(tuple(seq.split(',')))
|
normal
|
{
"blob_id": "be867d600f5f267986368f5573006f63004dbf9e",
"index": 5094,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(seq.split(','))\nprint(tuple(seq.split(',')))\n",
"step-3": "seq = input('write a sequence of numbers: ')\nprint(seq.split(','))\nprint(tuple(seq.split(',')))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.shortcuts import render
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from .models import Document, Organization, UserProfile, Shop
#from .forms import DocUploadForm, ShopEditForm
from django.shortcuts import render_to_response, get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.forms import ModelForm
from django.utils.translation import ugettext_lazy as _
from django import forms
from .models import *
class DocUploadForm(forms.ModelForm):
tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())
class Meta:
model = Document
# widgets = {'tags' : autocomplete_light.MultipleChoiceWidget('TagAutocomplete')}
# autocomplete_fields = ('tags','topic','university',)
exclude = ['organization','private_user','is_public','is_user_private','display']
class ShopForm(forms.Form):
shopName = forms.CharField(max_length=100)
email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),
label=_(u'email address'), required=False)
address = forms.CharField(widget= forms.Textarea())
pincode = forms.IntegerField()
nearest_college = forms.CharField(max_length=200, required=False)
nearest_town = forms.CharField(max_length=200, required=False)
telephone = forms.CharField(max_length=14)
longitude = forms.DecimalField(max_digits=11, decimal_places=7)
latitude = forms.DecimalField(max_digits=11, decimal_places=7)
username = forms.CharField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'User Name'}),
label=_(u'Username'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),
label=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
# def clean_email(self):
# if 'email' in self.cleaned_data:
# try:
# user = User.objects.get(username= self.cleaned_data["username"])
# raise forms.ValidationError(_(u'Already this Username is Registered'))
# except User.DoesNotExist:
# pass
# return self.cleaned_data["email"]
class ShopEditForm(forms.ModelForm):
class Meta:
model = Shop
exclude = ['latitude','longitude','is_active']
@login_required
def indexEmp(request):
context = {'shop':shopid}
return render(request,'index.html',context)
@login_required
def docUpload(request):
user = UserProfile.objects.get(user=request.user)
if(request.method=='POST'):
# import ipdb; ipdb.set_trace();
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
elif(user.userType == 2):
org = Organization.objects.get(employee = request.user)
data = DocUploadForm(request.POST,request.FILES)
new_doc = data.save(commit=False)
new_doc.organization = org
new_doc.is_public = True
new_doc.save()
data.save_m2m()
if(user.userType == 1 ):
return HttpResponseRedirect(reverse('documentListOwner'))
elif(user.userType == 2):
return HttpResponseRedirect(reverse('documentListEmp'))
else:
form = DocUploadForm()
if(user.userType == 1 ):
context = { "docUploadForm" : form}
return render(request,'printo_app/docUpload-owner.html',context)
if(user.userType == 2 ):
shopRate = Shop.objects.get(employee=request.user).rate
context = { "docUploadForm" : form,"rate":shopRate }
return render(request,'printo_app/docUpload-emp.html',context)
@login_required
def docList(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org)
context = {"docs":docList}
return render(request,'printo_app/docList-owner.html',context)
elif(user.userType == 2):
org = Organization.objects.get(employee = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org).order_by('-uploadedDate')
context = {"docs":docList}
return render(request,'printo_app/docList-emp.html',context)
@login_required
def docListOwner(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1 ):
org = Organization.objects.get(owner = request.user)
docList = Document.objects.filter(is_public=True).filter(organization=org)
context = {"docs":docList}
return render(request,'printo_app/docList-owner.html',context)
@login_required
def docDetail(request,docid):
docDetail = Document.objects.get(id=docid)
form = DocUploadForm(instance = docDetail)
context = {"docEditForm":form,"doc":docDetail}
return render(request,'printo_app/docDetail.html',context)
@login_required
def docEditSave(request,docid):
currentDoc = Document.objects.get(id=docid)
docDetail = DocUploadForm(request.POST,request.FILES,instance=currentDoc)
docDetail.save()
context = { "msg":docDetail }
return HttpResponseRedirect(reverse('documentList'))
@login_required
def shopProfile(request,shopid=None):
context = {}
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1):
pass
elif(user.userType == 2):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm()
context = {'shopForm':shopForm,'details':shop}
return render(request,'printo_app/shopProfile.html',context)
@login_required
def shopEditSave(request):
shop = Shop.objects.get(employee=request.user)
shopForm = ShopEditForm(request.POST,instance=shop)
shopForm.save()
return HttpResponseRedirect(reverse('shopProfile'))
@login_required
def indexEmp(request,shopid=None):
user = UserProfile.objects.get(user=request.user)
is_owner = False
if(user.userType == 1):
is_owner = True
elif(user.userType == 2):
is_owner = False
context = {'is_owner':is_owner}
return HttpResponseRedirect(reverse('orderList'))
@login_required
def orderList(request,shopid=None):
shop = Shop.objects.get(employee = request.user)
orderList = Order.objects.filter(shop=shop)
new_count = orderList.filter(is_new=True).count()
pending_count = orderList.filter(is_accepted=True).count()
completed_count = orderList.filter(is_printed=True).count()
delivered_count = orderList.filter(is_delivered=True).count()
context = {"orders":orderList,"new_count":new_count,"pending_count":pending_count,"completed_count":completed_count,"delivered_count":delivered_count}
return render(request,'printo_app/ordersList.html',context)
@login_required
def shopList(request):
org = Organization.objects.get(owner = request.user)
shops = Shop.objects.filter(owner = org )
context={'shops' : shops}
return render(request,'printo_app/shopList.html',context)
@login_required
def shopCreate(request):
uprofile =get_object_or_404(UserProfile, user=request.user)
if uprofile.userType==1:
pass
else:
return HttpResponse("You don't have permission")
if(request.method=='POST'):
form = ShopForm(request.POST)
import ipdb; ipdb.set_trace()
if(form.is_valid()):
username = form.cleaned_data.get("username", None)
password = form.cleaned_data.get("password", None)
telephone = form.cleaned_data.get("telephone", None)
email = request.user.email
# email = form.cleaned_data.get("email", None)
# if email == None:
# email = request.user.email
if username != None:
user = User.objects.create_user(username=username,email=email, password=password)
userprofile = UserProfile()
userprofile.user = user
userprofile.userType = 2
if telephone !=None:
userprofile.telephone = telephone
userprofile.save()
# shop = Shop()
shopprofile = Shop()
shopprofile.employee = user
shopprofile.owner = Organization.objects.get(owner = request.user)
shopprofile.email = email
shopprofile.shopName = form.cleaned_data.get("shopName", None)
shopprofile.pincode = form.cleaned_data.get("pincode",None)
shopprofile.address = form.cleaned_data.get("address",None)
shopprofile.latitude = form.cleaned_data.get("latitude",None)
shopprofile.longitude = form.cleaned_data.get("longitude",None)
shopprofile.telephone = form.cleaned_data.get("telephone",None)
shopprofile.save()
shopprofile.services = form.cleaned_data.get("services",None)
# shop.save_m2m()
return HttpResponseRedirect(reverse('shopList'))
else:
userform = 'this form is to be deleted'
shopform = ShopForm()
context = { 'shopCreateForm' : shopform, 'userForm' : userform }
return render(request,'printo_app/shopCreate.html',context)
@login_required
def index(request):
user = UserProfile.objects.get(user=request.user)
if(user.userType == 1):
return HttpResponseRedirect(reverse('OwnerMain'))
elif(user.userType == 2):
return HttpResponseRedirect(reverse('EmployeeMain'))
return None
class RegistrationForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),
label=_(u'email address'))
password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),
label=_(u'Password'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),
label=_(u'Password Again'))
mobile = forms.CharField(max_length=14)
def clean(self):
"""
Verifiy that the values entered into the two password fields
match. Note that an error here will end up in
``non_field_errors()`` because it doesn't apply to a single
field.
"""
if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:
if self.cleaned_data['password1'] != self.cleaned_data['password']:
raise forms.ValidationError(_(u'You must type the same password each time'))
return self.cleaned_data
def clean_email(self):
if 'email' in self.cleaned_data:
try:
user = User.objects.get(username= self.cleaned_data["email"])
raise forms.ValidationError(_(u'Already Email Address is registered'))
except User.DoesNotExist:
pass
return self.cleaned_data["email"]
def index_main(request):
if request.user.is_authenticated()==True:
return HttpResponseRedirect(reverse("main"))
else:
if request.method=="POST":
form= RegistrationForm(request.POST)
if form.is_valid():
u = User.objects.create_user(form.cleaned_data["email"], form.cleaned_data["email"], form.cleaned_data["password"],)
# Send a mail with verification code
profile = UserProfile()
profile.user =u
profile.userType =1
profile.mobile = form.cleaned_data["mobile"]
profile.save()
org= Organization()
org.owner = u
org.save()
return HttpResponse("Thanks")
else:
form =RegistrationForm()
return render( request, 'index_main.html', context={"form":form},)
def docListOwner(request):
pass
def docUploadOwner(request):
pass
@login_required
def indexOwner(request):
context = {}
return render(request,'ownerMain.html',context)
# ====================================
# DATA PROVIDERS
# ====================================
import json
from django.core import serializers
def get_universitys(request):
p={}
# import ipdb; ipdb.set_trace()
for c in University.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_publishers(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Publisher.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_courses(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Course.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_topics(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Topic.objects.all():
p[c.name] = (c.name,c.pk)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_tags(request):
p={}
# import ipdb; ipdb.set_tra ce()
for c in Tag.objects.all():
p[c.name] = (c.name,c.id)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_services(request):
p={}
# import ipdb; ipdb.set_trace()
for c in Service.objects.all():
p[c.name] = (c.name,c.id)
return HttpResponse(json.dumps(p), content_type="application/json")
def get_colleges(request):
p={}
for c in College.objects.all():
p[c.name] =(str(c.latitude), str(c.longitude))
return HttpResponse(json.dumps(p), content_type="application/json")
def get_cities(request):
p={}
for c in City.objects.all():
p[c.name] =(str(c.latitude), str(c.longitude))
return HttpResponse(json.dumps(p), content_type="application/json")
|
normal
|
{
"blob_id": "d2c5d306591216e100b5bd8e8822b24fd137d092",
"index": 9208,
"step-1": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n<mask token>\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n<mask token>\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n<mask token>\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n<mask token>\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-2": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n<mask token>\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n<mask token>\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n<mask token>\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner=request.user)\n shops = Shop.objects.filter(owner=org)\n context = {'shops': shops}\n return render(request, 'printo_app/shopList.html', context)\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n<mask token>\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-3": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n@login_required\ndef indexEmp(request):\n context = {'shop': shopid}\n return render(request, 'index.html', context)\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n<mask token>\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n@login_required\ndef shopProfile(request, shopid=None):\n context = {}\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n pass\n elif user.userType == 2:\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm()\n context = {'shopForm': shopForm, 'details': shop}\n return render(request, 'printo_app/shopProfile.html', context)\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n@login_required\ndef indexEmp(request, shopid=None):\n user = UserProfile.objects.get(user=request.user)\n is_owner = False\n if user.userType == 1:\n is_owner = True\n elif user.userType == 2:\n is_owner = False\n context = {'is_owner': is_owner}\n return HttpResponseRedirect(reverse('orderList'))\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner=request.user)\n shops = Shop.objects.filter(owner=org)\n context = {'shops': shops}\n return render(request, 'printo_app/shopList.html', context)\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n@login_required\ndef index(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n return HttpResponseRedirect(reverse('OwnerMain'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('EmployeeMain'))\n return None\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\ndef docUploadOwner(request):\n pass\n\n\n@login_required\ndef indexOwner(request):\n context = {}\n return render(request, 'ownerMain.html', context)\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_courses(request):\n p = {}\n for c in Course.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_services(request):\n p = {}\n for c in Service.objects.all():\n p[c.name] = c.name, c.id\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-4": "<mask token>\n\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n\n\n class Meta:\n model = Document\n exclude = ['organization', 'private_user', 'is_public',\n 'is_user_private', 'display']\n\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'),\n required=False)\n address = forms.CharField(widget=forms.Textarea())\n pincode = forms.IntegerField()\n nearest_college = forms.CharField(max_length=200, required=False)\n nearest_town = forms.CharField(max_length=200, required=False)\n telephone = forms.CharField(max_length=14)\n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'User Name'}), label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n\nclass ShopEditForm(forms.ModelForm):\n\n\n class Meta:\n model = Shop\n exclude = ['latitude', 'longitude', 'is_active']\n\n\n@login_required\ndef indexEmp(request):\n context = {'shop': shopid}\n return render(request, 'index.html', context)\n\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if request.method == 'POST':\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n data = DocUploadForm(request.POST, request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m()\n if user.userType == 1:\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if user.userType == 1:\n context = {'docUploadForm': form}\n return render(request, 'printo_app/docUpload-owner.html', context)\n if user.userType == 2:\n shopRate = Shop.objects.get(employee=request.user).rate\n context = {'docUploadForm': form, 'rate': shopRate}\n return render(request, 'printo_app/docUpload-emp.html', context)\n\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n elif user.userType == 2:\n org = Organization.objects.get(employee=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org\n ).order_by('-uploadedDate')\n context = {'docs': docList}\n return render(request, 'printo_app/docList-emp.html', context)\n\n\n@login_required\ndef docListOwner(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n org = Organization.objects.get(owner=request.user)\n docList = Document.objects.filter(is_public=True).filter(organization\n =org)\n context = {'docs': docList}\n return render(request, 'printo_app/docList-owner.html', context)\n\n\n@login_required\ndef docDetail(request, docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance=docDetail)\n context = {'docEditForm': form, 'doc': docDetail}\n return render(request, 'printo_app/docDetail.html', context)\n\n\n@login_required\ndef docEditSave(request, docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST, request.FILES, instance=currentDoc)\n docDetail.save()\n context = {'msg': docDetail}\n return HttpResponseRedirect(reverse('documentList'))\n\n\n@login_required\ndef shopProfile(request, shopid=None):\n context = {}\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n pass\n elif user.userType == 2:\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm()\n context = {'shopForm': shopForm, 'details': shop}\n return render(request, 'printo_app/shopProfile.html', context)\n\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST, instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n\n@login_required\ndef indexEmp(request, shopid=None):\n user = UserProfile.objects.get(user=request.user)\n is_owner = False\n if user.userType == 1:\n is_owner = True\n elif user.userType == 2:\n is_owner = False\n context = {'is_owner': is_owner}\n return HttpResponseRedirect(reverse('orderList'))\n\n\n@login_required\ndef orderList(request, shopid=None):\n shop = Shop.objects.get(employee=request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {'orders': orderList, 'new_count': new_count, 'pending_count':\n pending_count, 'completed_count': completed_count,\n 'delivered_count': delivered_count}\n return render(request, 'printo_app/ordersList.html', context)\n\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner=request.user)\n shops = Shop.objects.filter(owner=org)\n context = {'shops': shops}\n return render(request, 'printo_app/shopList.html', context)\n\n\n@login_required\ndef shopCreate(request):\n uprofile = get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType == 1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n if request.method == 'POST':\n form = ShopForm(request.POST)\n import ipdb\n ipdb.set_trace()\n if form.is_valid():\n username = form.cleaned_data.get('username', None)\n password = form.cleaned_data.get('password', None)\n telephone = form.cleaned_data.get('telephone', None)\n email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username, email=\n email, password=password)\n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone != None:\n userprofile.telephone = telephone\n userprofile.save()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner=request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get('shopName', None)\n shopprofile.pincode = form.cleaned_data.get('pincode', None)\n shopprofile.address = form.cleaned_data.get('address', None)\n shopprofile.latitude = form.cleaned_data.get('latitude', None)\n shopprofile.longitude = form.cleaned_data.get('longitude', None)\n shopprofile.telephone = form.cleaned_data.get('telephone', None)\n shopprofile.save()\n shopprofile.services = form.cleaned_data.get('services', None)\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n shopform = ShopForm()\n context = {'shopCreateForm': shopform, 'userForm': userform}\n return render(request, 'printo_app/shopCreate.html', context)\n\n\n@login_required\ndef index(request):\n user = UserProfile.objects.get(user=request.user)\n if user.userType == 1:\n return HttpResponseRedirect(reverse('OwnerMain'))\n elif user.userType == 2:\n return HttpResponseRedirect(reverse('EmployeeMain'))\n return None\n\n\nclass RegistrationForm(forms.Form):\n email = forms.EmailField(widget=forms.TextInput(attrs={'class':\n 'mandatory', 'placeholder': 'Email'}), label=_(u'email address'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': 'Password'}, render_value=False), label\n =_(u'Password'))\n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class':\n 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n mobile = forms.CharField(max_length=14)\n\n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if ('password1' in self.cleaned_data and 'password' in self.\n cleaned_data):\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(\n u'You must type the same password each time'))\n return self.cleaned_data\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n try:\n user = User.objects.get(username=self.cleaned_data['email'])\n raise forms.ValidationError(_(\n u'Already Email Address is registered'))\n except User.DoesNotExist:\n pass\n return self.cleaned_data['email']\n\n\ndef index_main(request):\n if request.user.is_authenticated() == True:\n return HttpResponseRedirect(reverse('main'))\n else:\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data['email'],\n form.cleaned_data['email'], form.cleaned_data['password'])\n profile = UserProfile()\n profile.user = u\n profile.userType = 1\n profile.mobile = form.cleaned_data['mobile']\n profile.save()\n org = Organization()\n org.owner = u\n org.save()\n return HttpResponse('Thanks')\n else:\n form = RegistrationForm()\n return render(request, 'index_main.html', context={'form': form})\n\n\ndef docListOwner(request):\n pass\n\n\ndef docUploadOwner(request):\n pass\n\n\n@login_required\ndef indexOwner(request):\n context = {}\n return render(request, 'ownerMain.html', context)\n\n\n<mask token>\n\n\ndef get_universitys(request):\n p = {}\n for c in University.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_publishers(request):\n p = {}\n for c in Publisher.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_courses(request):\n p = {}\n for c in Course.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_topics(request):\n p = {}\n for c in Topic.objects.all():\n p[c.name] = c.name, c.pk\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\n<mask token>\n\n\ndef get_services(request):\n p = {}\n for c in Service.objects.all():\n p[c.name] = c.name, c.id\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_colleges(request):\n p = {}\n for c in College.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n\n\ndef get_cities(request):\n p = {}\n for c in City.objects.all():\n p[c.name] = str(c.latitude), str(c.longitude)\n return HttpResponse(json.dumps(p), content_type='application/json')\n",
"step-5": "from django.shortcuts import render\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom .models import Document, Organization, UserProfile, Shop\n#from .forms import DocUploadForm, ShopEditForm\nfrom django.shortcuts import render_to_response, get_object_or_404\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth import authenticate, login\nfrom django.shortcuts import get_object_or_404\n\nfrom django.contrib.auth.decorators import login_required\nfrom django.forms import ModelForm\nfrom django.utils.translation import ugettext_lazy as _\nfrom django import forms\nfrom .models import *\n\nclass DocUploadForm(forms.ModelForm):\n tags = forms.ModelMultipleChoiceField(queryset=Tag.objects.all())\n class Meta:\n model = Document\n # widgets = {'tags' : autocomplete_light.MultipleChoiceWidget('TagAutocomplete')}\n # autocomplete_fields = ('tags','topic','university',)\n exclude = ['organization','private_user','is_public','is_user_private','display']\n\nclass ShopForm(forms.Form):\n shopName = forms.CharField(max_length=100)\n email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),\n label=_(u'email address'), required=False)\n \n address = forms.CharField(widget= forms.Textarea())\n pincode = forms.IntegerField()\n \n nearest_college = forms.CharField(max_length=200, required=False)\n \n nearest_town = forms.CharField(max_length=200, required=False)\n \n telephone = forms.CharField(max_length=14)\n \n longitude = forms.DecimalField(max_digits=11, decimal_places=7)\n latitude = forms.DecimalField(max_digits=11, decimal_places=7)\n username = forms.CharField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'User Name'}),\n label=_(u'Username'))\n password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),\n label=_(u'Password'))\n \n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n services = forms.ModelMultipleChoiceField(queryset=Service.objects.all())\n \n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(u'You must type the same password each time'))\n return self.cleaned_data \n\n\n # def clean_email(self):\n # if 'email' in self.cleaned_data:\n\n # try:\n # user = User.objects.get(username= self.cleaned_data[\"username\"])\n # raise forms.ValidationError(_(u'Already this Username is Registered'))\n \n # except User.DoesNotExist:\n \n # pass\n # return self.cleaned_data[\"email\"]\n\nclass ShopEditForm(forms.ModelForm):\n class Meta:\n model = Shop\n exclude = ['latitude','longitude','is_active']\n\n@login_required\ndef indexEmp(request):\n context = {'shop':shopid}\n return render(request,'index.html',context)\n\n@login_required\ndef docUpload(request):\n user = UserProfile.objects.get(user=request.user)\n if(request.method=='POST'):\n # import ipdb; ipdb.set_trace();\n \n if(user.userType == 1 ):\n org = Organization.objects.get(owner = request.user)\n elif(user.userType == 2):\n org = Organization.objects.get(employee = request.user)\n\n data = DocUploadForm(request.POST,request.FILES)\n new_doc = data.save(commit=False)\n new_doc.organization = org\n new_doc.is_public = True\n new_doc.save()\n data.save_m2m() \n if(user.userType == 1 ):\n return HttpResponseRedirect(reverse('documentListOwner'))\n elif(user.userType == 2):\n return HttpResponseRedirect(reverse('documentListEmp'))\n else:\n form = DocUploadForm()\n if(user.userType == 1 ):\n context = { \"docUploadForm\" : form}\n return render(request,'printo_app/docUpload-owner.html',context)\n if(user.userType == 2 ):\n shopRate = Shop.objects.get(employee=request.user).rate\n context = { \"docUploadForm\" : form,\"rate\":shopRate }\n return render(request,'printo_app/docUpload-emp.html',context)\n\n@login_required\ndef docList(request):\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1 ):\n org = Organization.objects.get(owner = request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org)\n context = {\"docs\":docList}\n return render(request,'printo_app/docList-owner.html',context)\n elif(user.userType == 2):\n org = Organization.objects.get(employee = request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org).order_by('-uploadedDate')\n \n context = {\"docs\":docList}\n return render(request,'printo_app/docList-emp.html',context)\n\n@login_required\ndef docListOwner(request):\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1 ):\n org = Organization.objects.get(owner = request.user)\n docList = Document.objects.filter(is_public=True).filter(organization=org)\n context = {\"docs\":docList}\n return render(request,'printo_app/docList-owner.html',context)\n\n@login_required\ndef docDetail(request,docid):\n docDetail = Document.objects.get(id=docid)\n form = DocUploadForm(instance = docDetail)\n context = {\"docEditForm\":form,\"doc\":docDetail}\n return render(request,'printo_app/docDetail.html',context)\n\n@login_required\ndef docEditSave(request,docid):\n currentDoc = Document.objects.get(id=docid)\n docDetail = DocUploadForm(request.POST,request.FILES,instance=currentDoc)\n docDetail.save() \n context = { \"msg\":docDetail }\n return HttpResponseRedirect(reverse('documentList'))\n\n@login_required\ndef shopProfile(request,shopid=None):\n context = {}\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1):\n pass\n elif(user.userType == 2):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm()\n context = {'shopForm':shopForm,'details':shop}\n return render(request,'printo_app/shopProfile.html',context)\n\n@login_required\ndef shopEditSave(request):\n shop = Shop.objects.get(employee=request.user)\n shopForm = ShopEditForm(request.POST,instance=shop)\n shopForm.save()\n return HttpResponseRedirect(reverse('shopProfile'))\n\n@login_required\ndef indexEmp(request,shopid=None):\n user = UserProfile.objects.get(user=request.user)\n is_owner = False\n if(user.userType == 1):\n is_owner = True\n elif(user.userType == 2):\n is_owner = False\n context = {'is_owner':is_owner}\n return HttpResponseRedirect(reverse('orderList'))\n\n@login_required\ndef orderList(request,shopid=None):\n shop = Shop.objects.get(employee = request.user)\n orderList = Order.objects.filter(shop=shop)\n new_count = orderList.filter(is_new=True).count()\n pending_count = orderList.filter(is_accepted=True).count()\n completed_count = orderList.filter(is_printed=True).count()\n delivered_count = orderList.filter(is_delivered=True).count()\n context = {\"orders\":orderList,\"new_count\":new_count,\"pending_count\":pending_count,\"completed_count\":completed_count,\"delivered_count\":delivered_count}\n return render(request,'printo_app/ordersList.html',context)\n\n@login_required\ndef shopList(request):\n org = Organization.objects.get(owner = request.user)\n shops = Shop.objects.filter(owner = org )\n context={'shops' : shops}\n return render(request,'printo_app/shopList.html',context)\n\n@login_required\ndef shopCreate(request):\n uprofile =get_object_or_404(UserProfile, user=request.user)\n if uprofile.userType==1:\n pass\n else:\n return HttpResponse(\"You don't have permission\")\n \n if(request.method=='POST'):\n form = ShopForm(request.POST)\n import ipdb; ipdb.set_trace()\n if(form.is_valid()):\n username = form.cleaned_data.get(\"username\", None)\n password = form.cleaned_data.get(\"password\", None)\n telephone = form.cleaned_data.get(\"telephone\", None)\n email = request.user.email\n # email = form.cleaned_data.get(\"email\", None)\n # if email == None:\n # email = request.user.email\n if username != None:\n user = User.objects.create_user(username=username,email=email, password=password)\n \n userprofile = UserProfile()\n userprofile.user = user\n userprofile.userType = 2\n if telephone !=None:\n userprofile.telephone = telephone \n userprofile.save()\n \n # shop = Shop()\n shopprofile = Shop()\n shopprofile.employee = user\n shopprofile.owner = Organization.objects.get(owner = request.user)\n shopprofile.email = email\n shopprofile.shopName = form.cleaned_data.get(\"shopName\", None)\n shopprofile.pincode = form.cleaned_data.get(\"pincode\",None)\n shopprofile.address = form.cleaned_data.get(\"address\",None)\n shopprofile.latitude = form.cleaned_data.get(\"latitude\",None)\n shopprofile.longitude = form.cleaned_data.get(\"longitude\",None)\n shopprofile.telephone = form.cleaned_data.get(\"telephone\",None)\n \n shopprofile.save()\n shopprofile.services = form.cleaned_data.get(\"services\",None)\n # shop.save_m2m()\n\n return HttpResponseRedirect(reverse('shopList'))\n else:\n userform = 'this form is to be deleted'\n\n shopform = ShopForm()\n context = { 'shopCreateForm' : shopform, 'userForm' : userform }\n return render(request,'printo_app/shopCreate.html',context)\n\n@login_required\ndef index(request):\n user = UserProfile.objects.get(user=request.user)\n if(user.userType == 1):\n return HttpResponseRedirect(reverse('OwnerMain'))\n elif(user.userType == 2):\n return HttpResponseRedirect(reverse('EmployeeMain'))\n return None\n\nclass RegistrationForm(forms.Form):\n \n \n \n email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'mandatory', 'placeholder': 'Email'}),\n label=_(u'email address'))\n \n password = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': 'Password'}, render_value=False),\n label=_(u'Password'))\n \n password1 = forms.CharField(widget=forms.PasswordInput(attrs={'class': 'mandatory', 'placeholder': ' Password Again'}, render_value=False),\n label=_(u'Password Again'))\n \n mobile = forms.CharField(max_length=14)\n \n def clean(self):\n \"\"\"\n Verifiy that the values entered into the two password fields\n match. Note that an error here will end up in\n ``non_field_errors()`` because it doesn't apply to a single\n field.\n \n \"\"\"\n if 'password1' in self.cleaned_data and 'password' in self.cleaned_data:\n if self.cleaned_data['password1'] != self.cleaned_data['password']:\n raise forms.ValidationError(_(u'You must type the same password each time'))\n return self.cleaned_data \n\n\n def clean_email(self):\n if 'email' in self.cleaned_data:\n \n try:\n user = User.objects.get(username= self.cleaned_data[\"email\"])\n raise forms.ValidationError(_(u'Already Email Address is registered'))\n \n except User.DoesNotExist:\n pass\n return self.cleaned_data[\"email\"]\n\ndef index_main(request):\n if request.user.is_authenticated()==True:\n return HttpResponseRedirect(reverse(\"main\"))\n else:\n if request.method==\"POST\":\n form= RegistrationForm(request.POST)\n if form.is_valid():\n u = User.objects.create_user(form.cleaned_data[\"email\"], form.cleaned_data[\"email\"], form.cleaned_data[\"password\"],)\n # Send a mail with verification code\n profile = UserProfile()\n profile.user =u\n profile.userType =1\n profile.mobile = form.cleaned_data[\"mobile\"]\n profile.save()\n \n org= Organization()\n org.owner = u\n org.save()\n return HttpResponse(\"Thanks\") \n else:\n form =RegistrationForm()\n return render( request, 'index_main.html', context={\"form\":form},)\n\n \ndef docListOwner(request):\n pass\ndef docUploadOwner(request):\n pass\n\n@login_required\ndef indexOwner(request):\n context = {}\n return render(request,'ownerMain.html',context)\n\n# ====================================\n# DATA PROVIDERS\n# ====================================\nimport json\nfrom django.core import serializers\n\ndef get_universitys(request):\n p={}\n # import ipdb; ipdb.set_trace()\n for c in University.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_publishers(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Publisher.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_courses(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Course.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_topics(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Topic.objects.all():\n p[c.name] = (c.name,c.pk)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_tags(request):\n p={}\n # import ipdb; ipdb.set_tra ce()\n for c in Tag.objects.all():\n p[c.name] = (c.name,c.id)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_services(request):\n p={}\n # import ipdb; ipdb.set_trace()\n for c in Service.objects.all():\n p[c.name] = (c.name,c.id)\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_colleges(request):\n p={}\n for c in College.objects.all():\n p[c.name] =(str(c.latitude), str(c.longitude))\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n\ndef get_cities(request):\n p={}\n for c in City.objects.all():\n p[c.name] =(str(c.latitude), str(c.longitude))\n return HttpResponse(json.dumps(p), content_type=\"application/json\")\n",
"step-ids": [
23,
24,
32,
34,
37
]
}
|
[
23,
24,
32,
34,
37
] |
#!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:tom_tao626
@license: Apache Licence
@file: 17.列表中的元素统计.py
@time: 2020/12/09
@contact: [email protected]
@site: xxxx.suizhu.net
@software: PyCharm
"""
# collections.Counter()
from collections import Counter
list1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']
count = Counter(list1)
print(count)
# Counter({'a': 2, 'b': 2, 'e': 2, 'c': 1, 'd': 1})
print(count['b'])
# 3
# 出现次数最多的元素
print(count.most_common(1))
# [('b', 3)]
print(count.items())
# dict_items([('a', 2), ('b', 3), ('c', 1), ('d', 1), ('e', 2)])
|
normal
|
{
"blob_id": "f2c592a0ea38d800510323a1001c646cdbecefff",
"index": 3009,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(count)\nprint(count['b'])\nprint(count.most_common(1))\nprint(count.items())\n",
"step-3": "<mask token>\nlist1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']\ncount = Counter(list1)\nprint(count)\nprint(count['b'])\nprint(count.most_common(1))\nprint(count.items())\n",
"step-4": "<mask token>\nfrom collections import Counter\nlist1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']\ncount = Counter(list1)\nprint(count)\nprint(count['b'])\nprint(count.most_common(1))\nprint(count.items())\n",
"step-5": "#!/usr/bin/env python \n# -*- coding:utf-8 _*-\n\"\"\" \n@author:tom_tao626 \n@license: Apache Licence \n@file: 17.列表中的元素统计.py \n@time: 2020/12/09\n@contact: [email protected]\n@site: xxxx.suizhu.net\n@software: PyCharm \n\"\"\"\n\n# collections.Counter()\n\nfrom collections import Counter\nlist1 = ['a', 'b', 'b', 'c', 'd', 'e', 'a', 'b', 'e']\ncount = Counter(list1)\nprint(count)\n# Counter({'a': 2, 'b': 2, 'e': 2, 'c': 1, 'd': 1})\nprint(count['b'])\n# 3\n# 出现次数最多的元素\nprint(count.most_common(1))\n# [('b', 3)]\nprint(count.items())\n# dict_items([('a', 2), ('b', 3), ('c', 1), ('d', 1), ('e', 2)])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django import forms
from . import models
class PhotoForm(forms.Form):
image = forms.ImageField()
|
normal
|
{
"blob_id": "3983f8dfb9c7b7e664af05857a0f6fe380154424",
"index": 3684,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass PhotoForm(forms.Form):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass PhotoForm(forms.Form):\n image = forms.ImageField()\n",
"step-4": "from django import forms\nfrom . import models\n\n\nclass PhotoForm(forms.Form):\n image = forms.ImageField()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
from random import randint
def combinacaoDeEmbralhamento(qtdeLinhas):
while True:
a = randint(0,qtdeLinhas)
b = randint(0,qtdeLinhas)
if a == b :
continue
else:
break
resp = [[a,b]]
return resp
def embaralhaMatriz(x):
for i in range(qtdeLinhas):
print(i)
combinacaoDeEmbralhamento(x.shape[0])
x[[1,0]] = x[[0,1]]
# MAIN
A = np.zeros(shape=(5,2))
print(A)
print("-------------")
print(A.shape)
|
normal
|
{
"blob_id": "28ed494939d0928bf3ad4f07f58186374e925426",
"index": 7024,
"step-1": "<mask token>\n\n\ndef combinacaoDeEmbralhamento(qtdeLinhas):\n while True:\n a = randint(0, qtdeLinhas)\n b = randint(0, qtdeLinhas)\n if a == b:\n continue\n else:\n break\n resp = [[a, b]]\n return resp\n\n\ndef embaralhaMatriz(x):\n for i in range(qtdeLinhas):\n print(i)\n combinacaoDeEmbralhamento(x.shape[0])\n x[[1, 0]] = x[[0, 1]]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef combinacaoDeEmbralhamento(qtdeLinhas):\n while True:\n a = randint(0, qtdeLinhas)\n b = randint(0, qtdeLinhas)\n if a == b:\n continue\n else:\n break\n resp = [[a, b]]\n return resp\n\n\ndef embaralhaMatriz(x):\n for i in range(qtdeLinhas):\n print(i)\n combinacaoDeEmbralhamento(x.shape[0])\n x[[1, 0]] = x[[0, 1]]\n\n\n<mask token>\nprint(A)\nprint('-------------')\nprint(A.shape)\n",
"step-3": "<mask token>\n\n\ndef combinacaoDeEmbralhamento(qtdeLinhas):\n while True:\n a = randint(0, qtdeLinhas)\n b = randint(0, qtdeLinhas)\n if a == b:\n continue\n else:\n break\n resp = [[a, b]]\n return resp\n\n\ndef embaralhaMatriz(x):\n for i in range(qtdeLinhas):\n print(i)\n combinacaoDeEmbralhamento(x.shape[0])\n x[[1, 0]] = x[[0, 1]]\n\n\nA = np.zeros(shape=(5, 2))\nprint(A)\nprint('-------------')\nprint(A.shape)\n",
"step-4": "import numpy as np\nfrom random import randint\n\n\ndef combinacaoDeEmbralhamento(qtdeLinhas):\n while True:\n a = randint(0, qtdeLinhas)\n b = randint(0, qtdeLinhas)\n if a == b:\n continue\n else:\n break\n resp = [[a, b]]\n return resp\n\n\ndef embaralhaMatriz(x):\n for i in range(qtdeLinhas):\n print(i)\n combinacaoDeEmbralhamento(x.shape[0])\n x[[1, 0]] = x[[0, 1]]\n\n\nA = np.zeros(shape=(5, 2))\nprint(A)\nprint('-------------')\nprint(A.shape)\n",
"step-5": "import numpy as np\nfrom random import randint\n\ndef combinacaoDeEmbralhamento(qtdeLinhas):\n\twhile True:\n\t\ta = randint(0,qtdeLinhas)\n\t\tb = randint(0,qtdeLinhas)\n\t\tif a == b :\n\t\t\tcontinue\n\t\telse:\n\t\t\tbreak\n\tresp = [[a,b]]\n\treturn resp\n\n\t\n\ndef embaralhaMatriz(x):\n\tfor i in range(qtdeLinhas):\n\t\tprint(i)\n\tcombinacaoDeEmbralhamento(x.shape[0])\n\tx[[1,0]] = x[[0,1]]\n\n\n# MAIN\n\nA = np.zeros(shape=(5,2))\n\nprint(A)\nprint(\"-------------\")\nprint(A.shape)",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import sys, warnings
if sys.version_info[0] < 3:
warnings.warn("At least Python 3.0 is required to run this program", RuntimeWarning)
else:
print('Normal continuation')
|
normal
|
{
"blob_id": "a6d5552fa0648fcf9484a1498e4132eb80ecfc86",
"index": 2304,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif sys.version_info[0] < 3:\n warnings.warn('At least Python 3.0 is required to run this program',\n RuntimeWarning)\nelse:\n print('Normal continuation')\n",
"step-3": "import sys, warnings\nif sys.version_info[0] < 3:\n warnings.warn('At least Python 3.0 is required to run this program',\n RuntimeWarning)\nelse:\n print('Normal continuation')\n",
"step-4": "import sys, warnings\nif sys.version_info[0] < 3:\n warnings.warn(\"At least Python 3.0 is required to run this program\", RuntimeWarning)\nelse:\n print('Normal continuation')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import configparser
import shutil
def get_imagemagick_path():
config = configparser.ConfigParser()
config.read("settings/settings.ini")
return config['commands'].get('convert', shutil.which("convert"))
# try:
# except KeyError:
# EXIV2_PATH = shutil.which("exiv2")
|
normal
|
{
"blob_id": "5fa9c9908d4aea507cf0ca8287a6b8e5b391470a",
"index": 9297,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_imagemagick_path():\n config = configparser.ConfigParser()\n config.read('settings/settings.ini')\n return config['commands'].get('convert', shutil.which('convert'))\n",
"step-3": "import configparser\nimport shutil\n\n\ndef get_imagemagick_path():\n config = configparser.ConfigParser()\n config.read('settings/settings.ini')\n return config['commands'].get('convert', shutil.which('convert'))\n",
"step-4": "import configparser\nimport shutil\n\n\ndef get_imagemagick_path():\n config = configparser.ConfigParser()\n config.read(\"settings/settings.ini\")\n return config['commands'].get('convert', shutil.which(\"convert\"))\n\n\n# try:\n# except KeyError:\n# EXIV2_PATH = shutil.which(\"exiv2\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, HttpResponseServerError
from django.shortcuts import render
from django.template import RequestContext
import json
import datetime
import os
import re
from cog.views.utils import getQueryDict
from cog.plugins.esgf.security import esgfDatabaseManager
import traceback
import json
# Code used for react components
# Get directories for static files
package_dir = os.path.dirname(os.path.abspath(__file__))
static_dir = os.path.dirname(package_dir)
js_dir = os.path.join(static_dir,"static/cog/cog-react/js/")
css_dir = os.path.join(static_dir,"static/cog/cog-react/css/")
# Get static list
js_files = os.listdir(js_dir)
css_files = os.listdir(css_dir)
js_files = list(map(lambda f: "cog/cog-react/js/" + f, js_files))
css_files = list(map(lambda f: "cog/cog-react/css/" + f, css_files))
# Separate source and map files
map_files = []
js_only = []
for f in js_files:
if f.endswith(".map"):
map_files.append(f)
else:
js_only.append(f)
css_only = []
for f in css_files:
if f.endswith(".map"):
map_files.append(f)
else:
css_only.append(f)
# These files are used by Django 'subscribe.html' page, to renders front-end.
react_files = {
'css': css_only,
'js': js_only,
'map': map_files
}
# Example data that subscriptions front-end could receive from back-end
test_data = {
"post_url": "/subscription/",
"user_info": {"first":"John","last":"Doe","hobbies":"Programming.","send_emails_to":"This place."},
"activities": {"method":["email"],"weekly":["CMIP"],"monthly":["CMIP6"]},
"experiments": {"method":["popup"],"daily":["test", "experiment 2"],"weekly":["test2"]},
}
# To pass data to front-end, use react-props and pass it a dictionary with key-value pairs
react_props = test_data
def lookup_and_render(request):
try:
dbres = esgfDatabaseManager.lookupUserSubscriptions(request.user)
except Exception as e:
# log error
error_cond = str(e)
print(traceback.print_exc())
return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'error': "An Error Has Occurred While Processing Your Request. <p> {}".format(error_cond)})
return render(request, 'cog/subscription/subscribe_list.html', {'dbres': dbres})
def delete_subscription(request):
res = request.POST.get('subscription_id', None)
try:
if res == "ALL":
dbres = esgfDatabaseManager.deleteAllUserSubscriptions(
request.user)
else:
dbres = esgfDatabaseManager.deleteUserSubscriptionById(res)
except Exception as e:
# log error
error_cond = str(e)
return render(request, 'cog/subscription/subscribe_done.html', {'error': "An Error Has Occurred While Processing Your Request. <p> {}".format(error_cond)})
return render(request, 'cog/subscription/subs_delete_done.html')
def temp_print(request, var_name, method="POST"):
print(request.POST)
if request.method == "POST":
data = json.loads(request.body)
else:
data = request.GET.copy()
if(data):
try:
print("{} {}: {}".format(method, var_name, data[var_name]))
except KeyError:
print("Key error: {}".format(data))
else:
print("{} {}: None".format(method, var_name))
@login_required
def subscribe(request):
# Contains the data from the front-end POST requests
if request.method == "POST":
# Get data from the POST request received from front-end
data = json.loads(request.body)
# Example obtaining data
if data:
for key in data.keys():
print("{}: {}".format(key, data[key]))
# Example response sent back to front-end
test = {"status": "All good!","data": data}
return HttpResponse(json.dumps(test),content_type='application/json')
if request.method == 'GET':
if request.GET.get('action') == "modify":
return lookup_and_render(request)
else:
return render(request, 'cog/subscription/subscribe.html', {'react_files': react_files, 'react_props': react_props})
elif request.POST.get('action') == "delete":
return delete_subscription(request)
else:
period = request.POST.get("period", -1)
if period == -1:
return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'error': "Invalid period"})
subs_count = 0
error_cond = ""
keyarr = []
valarr = []
for i in range(1, 4):
keystr = 'subscription_key{}'.format(i)
keyres = request.POST.get(keystr, '')
valstr = 'subscription_value{}'.format(i)
valres = request.POST.get(valstr, '')
if len(keyres) < 2 or len(valres) < 2:
continue
keyarr.append(keyres)
valarr.append(valres)
subs_count = subs_count + 1
if subs_count > 0:
try:
esgfDatabaseManager.addUserSubscription(
request.user, period, keyarr, valarr)
except Exception as e:
# log error
error_cond = str(e)
return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'error': "An Error Has Occurred While Processing Your Request. <p> {}".format(error_cond), })
return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'count': subs_count})
else:
return render(request, 'cog/subscription/subscribe.html', {'react_files': react_files, 'react_props': react_props})
|
normal
|
{
"blob_id": "d583661accce8c058f3e6b8568a09b4be1e58e4e",
"index": 4877,
"step-1": "<mask token>\n\n\ndef lookup_and_render(request):\n try:\n dbres = esgfDatabaseManager.lookupUserSubscriptions(request.user)\n except Exception as e:\n error_cond = str(e)\n print(traceback.print_exc())\n return render(request, 'cog/subscription/subscribe_done.html', {\n 'email': request.user.email, 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'.\n format(error_cond)})\n return render(request, 'cog/subscription/subscribe_list.html', {'dbres':\n dbres})\n\n\ndef delete_subscription(request):\n res = request.POST.get('subscription_id', None)\n try:\n if res == 'ALL':\n dbres = esgfDatabaseManager.deleteAllUserSubscriptions(request.user\n )\n else:\n dbres = esgfDatabaseManager.deleteUserSubscriptionById(res)\n except Exception as e:\n error_cond = str(e)\n return render(request, 'cog/subscription/subscribe_done.html', {\n 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'.\n format(error_cond)})\n return render(request, 'cog/subscription/subs_delete_done.html')\n\n\ndef temp_print(request, var_name, method='POST'):\n print(request.POST)\n if request.method == 'POST':\n data = json.loads(request.body)\n else:\n data = request.GET.copy()\n if data:\n try:\n print('{} {}: {}'.format(method, var_name, data[var_name]))\n except KeyError:\n print('Key error: {}'.format(data))\n else:\n print('{} {}: None'.format(method, var_name))\n\n\n@login_required\ndef subscribe(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n if data:\n for key in data.keys():\n print('{}: {}'.format(key, data[key]))\n test = {'status': 'All good!', 'data': data}\n return HttpResponse(json.dumps(test), content_type='application/json')\n if request.method == 'GET':\n if request.GET.get('action') == 'modify':\n return lookup_and_render(request)\n else:\n return render(request, 'cog/subscription/subscribe.html', {\n 'react_files': react_files, 'react_props': react_props})\n elif request.POST.get('action') == 'delete':\n return delete_subscription(request)\n else:\n period = request.POST.get('period', -1)\n if period == -1:\n return render(request, 'cog/subscription/subscribe_done.html',\n {'email': request.user.email, 'error': 'Invalid period'})\n subs_count = 0\n error_cond = ''\n keyarr = []\n valarr = []\n for i in range(1, 4):\n keystr = 'subscription_key{}'.format(i)\n keyres = request.POST.get(keystr, '')\n valstr = 'subscription_value{}'.format(i)\n valres = request.POST.get(valstr, '')\n if len(keyres) < 2 or len(valres) < 2:\n continue\n keyarr.append(keyres)\n valarr.append(valres)\n subs_count = subs_count + 1\n if subs_count > 0:\n try:\n esgfDatabaseManager.addUserSubscription(request.user,\n period, keyarr, valarr)\n except Exception as e:\n error_cond = str(e)\n return render(request,\n 'cog/subscription/subscribe_done.html', {'email':\n request.user.email, 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'\n .format(error_cond)})\n return render(request, 'cog/subscription/subscribe_done.html',\n {'email': request.user.email, 'count': subs_count})\n else:\n return render(request, 'cog/subscription/subscribe.html', {\n 'react_files': react_files, 'react_props': react_props})\n",
"step-2": "<mask token>\nfor f in js_files:\n if f.endswith('.map'):\n map_files.append(f)\n else:\n js_only.append(f)\n<mask token>\nfor f in css_files:\n if f.endswith('.map'):\n map_files.append(f)\n else:\n css_only.append(f)\n<mask token>\n\n\ndef lookup_and_render(request):\n try:\n dbres = esgfDatabaseManager.lookupUserSubscriptions(request.user)\n except Exception as e:\n error_cond = str(e)\n print(traceback.print_exc())\n return render(request, 'cog/subscription/subscribe_done.html', {\n 'email': request.user.email, 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'.\n format(error_cond)})\n return render(request, 'cog/subscription/subscribe_list.html', {'dbres':\n dbres})\n\n\ndef delete_subscription(request):\n res = request.POST.get('subscription_id', None)\n try:\n if res == 'ALL':\n dbres = esgfDatabaseManager.deleteAllUserSubscriptions(request.user\n )\n else:\n dbres = esgfDatabaseManager.deleteUserSubscriptionById(res)\n except Exception as e:\n error_cond = str(e)\n return render(request, 'cog/subscription/subscribe_done.html', {\n 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'.\n format(error_cond)})\n return render(request, 'cog/subscription/subs_delete_done.html')\n\n\ndef temp_print(request, var_name, method='POST'):\n print(request.POST)\n if request.method == 'POST':\n data = json.loads(request.body)\n else:\n data = request.GET.copy()\n if data:\n try:\n print('{} {}: {}'.format(method, var_name, data[var_name]))\n except KeyError:\n print('Key error: {}'.format(data))\n else:\n print('{} {}: None'.format(method, var_name))\n\n\n@login_required\ndef subscribe(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n if data:\n for key in data.keys():\n print('{}: {}'.format(key, data[key]))\n test = {'status': 'All good!', 'data': data}\n return HttpResponse(json.dumps(test), content_type='application/json')\n if request.method == 'GET':\n if request.GET.get('action') == 'modify':\n return lookup_and_render(request)\n else:\n return render(request, 'cog/subscription/subscribe.html', {\n 'react_files': react_files, 'react_props': react_props})\n elif request.POST.get('action') == 'delete':\n return delete_subscription(request)\n else:\n period = request.POST.get('period', -1)\n if period == -1:\n return render(request, 'cog/subscription/subscribe_done.html',\n {'email': request.user.email, 'error': 'Invalid period'})\n subs_count = 0\n error_cond = ''\n keyarr = []\n valarr = []\n for i in range(1, 4):\n keystr = 'subscription_key{}'.format(i)\n keyres = request.POST.get(keystr, '')\n valstr = 'subscription_value{}'.format(i)\n valres = request.POST.get(valstr, '')\n if len(keyres) < 2 or len(valres) < 2:\n continue\n keyarr.append(keyres)\n valarr.append(valres)\n subs_count = subs_count + 1\n if subs_count > 0:\n try:\n esgfDatabaseManager.addUserSubscription(request.user,\n period, keyarr, valarr)\n except Exception as e:\n error_cond = str(e)\n return render(request,\n 'cog/subscription/subscribe_done.html', {'email':\n request.user.email, 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'\n .format(error_cond)})\n return render(request, 'cog/subscription/subscribe_done.html',\n {'email': request.user.email, 'count': subs_count})\n else:\n return render(request, 'cog/subscription/subscribe.html', {\n 'react_files': react_files, 'react_props': react_props})\n",
"step-3": "<mask token>\npackage_dir = os.path.dirname(os.path.abspath(__file__))\nstatic_dir = os.path.dirname(package_dir)\njs_dir = os.path.join(static_dir, 'static/cog/cog-react/js/')\ncss_dir = os.path.join(static_dir, 'static/cog/cog-react/css/')\njs_files = os.listdir(js_dir)\ncss_files = os.listdir(css_dir)\njs_files = list(map(lambda f: 'cog/cog-react/js/' + f, js_files))\ncss_files = list(map(lambda f: 'cog/cog-react/css/' + f, css_files))\nmap_files = []\njs_only = []\nfor f in js_files:\n if f.endswith('.map'):\n map_files.append(f)\n else:\n js_only.append(f)\ncss_only = []\nfor f in css_files:\n if f.endswith('.map'):\n map_files.append(f)\n else:\n css_only.append(f)\nreact_files = {'css': css_only, 'js': js_only, 'map': map_files}\ntest_data = {'post_url': '/subscription/', 'user_info': {'first': 'John',\n 'last': 'Doe', 'hobbies': 'Programming.', 'send_emails_to':\n 'This place.'}, 'activities': {'method': ['email'], 'weekly': ['CMIP'],\n 'monthly': ['CMIP6']}, 'experiments': {'method': ['popup'], 'daily': [\n 'test', 'experiment 2'], 'weekly': ['test2']}}\nreact_props = test_data\n\n\ndef lookup_and_render(request):\n try:\n dbres = esgfDatabaseManager.lookupUserSubscriptions(request.user)\n except Exception as e:\n error_cond = str(e)\n print(traceback.print_exc())\n return render(request, 'cog/subscription/subscribe_done.html', {\n 'email': request.user.email, 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'.\n format(error_cond)})\n return render(request, 'cog/subscription/subscribe_list.html', {'dbres':\n dbres})\n\n\ndef delete_subscription(request):\n res = request.POST.get('subscription_id', None)\n try:\n if res == 'ALL':\n dbres = esgfDatabaseManager.deleteAllUserSubscriptions(request.user\n )\n else:\n dbres = esgfDatabaseManager.deleteUserSubscriptionById(res)\n except Exception as e:\n error_cond = str(e)\n return render(request, 'cog/subscription/subscribe_done.html', {\n 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'.\n format(error_cond)})\n return render(request, 'cog/subscription/subs_delete_done.html')\n\n\ndef temp_print(request, var_name, method='POST'):\n print(request.POST)\n if request.method == 'POST':\n data = json.loads(request.body)\n else:\n data = request.GET.copy()\n if data:\n try:\n print('{} {}: {}'.format(method, var_name, data[var_name]))\n except KeyError:\n print('Key error: {}'.format(data))\n else:\n print('{} {}: None'.format(method, var_name))\n\n\n@login_required\ndef subscribe(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n if data:\n for key in data.keys():\n print('{}: {}'.format(key, data[key]))\n test = {'status': 'All good!', 'data': data}\n return HttpResponse(json.dumps(test), content_type='application/json')\n if request.method == 'GET':\n if request.GET.get('action') == 'modify':\n return lookup_and_render(request)\n else:\n return render(request, 'cog/subscription/subscribe.html', {\n 'react_files': react_files, 'react_props': react_props})\n elif request.POST.get('action') == 'delete':\n return delete_subscription(request)\n else:\n period = request.POST.get('period', -1)\n if period == -1:\n return render(request, 'cog/subscription/subscribe_done.html',\n {'email': request.user.email, 'error': 'Invalid period'})\n subs_count = 0\n error_cond = ''\n keyarr = []\n valarr = []\n for i in range(1, 4):\n keystr = 'subscription_key{}'.format(i)\n keyres = request.POST.get(keystr, '')\n valstr = 'subscription_value{}'.format(i)\n valres = request.POST.get(valstr, '')\n if len(keyres) < 2 or len(valres) < 2:\n continue\n keyarr.append(keyres)\n valarr.append(valres)\n subs_count = subs_count + 1\n if subs_count > 0:\n try:\n esgfDatabaseManager.addUserSubscription(request.user,\n period, keyarr, valarr)\n except Exception as e:\n error_cond = str(e)\n return render(request,\n 'cog/subscription/subscribe_done.html', {'email':\n request.user.email, 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'\n .format(error_cond)})\n return render(request, 'cog/subscription/subscribe_done.html',\n {'email': request.user.email, 'count': subs_count})\n else:\n return render(request, 'cog/subscription/subscribe.html', {\n 'react_files': react_files, 'react_props': react_props})\n",
"step-4": "from django.core.urlresolvers import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, HttpResponseServerError\nfrom django.shortcuts import render\nfrom django.template import RequestContext\nimport json\nimport datetime\nimport os\nimport re\nfrom cog.views.utils import getQueryDict\nfrom cog.plugins.esgf.security import esgfDatabaseManager\nimport traceback\nimport json\npackage_dir = os.path.dirname(os.path.abspath(__file__))\nstatic_dir = os.path.dirname(package_dir)\njs_dir = os.path.join(static_dir, 'static/cog/cog-react/js/')\ncss_dir = os.path.join(static_dir, 'static/cog/cog-react/css/')\njs_files = os.listdir(js_dir)\ncss_files = os.listdir(css_dir)\njs_files = list(map(lambda f: 'cog/cog-react/js/' + f, js_files))\ncss_files = list(map(lambda f: 'cog/cog-react/css/' + f, css_files))\nmap_files = []\njs_only = []\nfor f in js_files:\n if f.endswith('.map'):\n map_files.append(f)\n else:\n js_only.append(f)\ncss_only = []\nfor f in css_files:\n if f.endswith('.map'):\n map_files.append(f)\n else:\n css_only.append(f)\nreact_files = {'css': css_only, 'js': js_only, 'map': map_files}\ntest_data = {'post_url': '/subscription/', 'user_info': {'first': 'John',\n 'last': 'Doe', 'hobbies': 'Programming.', 'send_emails_to':\n 'This place.'}, 'activities': {'method': ['email'], 'weekly': ['CMIP'],\n 'monthly': ['CMIP6']}, 'experiments': {'method': ['popup'], 'daily': [\n 'test', 'experiment 2'], 'weekly': ['test2']}}\nreact_props = test_data\n\n\ndef lookup_and_render(request):\n try:\n dbres = esgfDatabaseManager.lookupUserSubscriptions(request.user)\n except Exception as e:\n error_cond = str(e)\n print(traceback.print_exc())\n return render(request, 'cog/subscription/subscribe_done.html', {\n 'email': request.user.email, 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'.\n format(error_cond)})\n return render(request, 'cog/subscription/subscribe_list.html', {'dbres':\n dbres})\n\n\ndef delete_subscription(request):\n res = request.POST.get('subscription_id', None)\n try:\n if res == 'ALL':\n dbres = esgfDatabaseManager.deleteAllUserSubscriptions(request.user\n )\n else:\n dbres = esgfDatabaseManager.deleteUserSubscriptionById(res)\n except Exception as e:\n error_cond = str(e)\n return render(request, 'cog/subscription/subscribe_done.html', {\n 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'.\n format(error_cond)})\n return render(request, 'cog/subscription/subs_delete_done.html')\n\n\ndef temp_print(request, var_name, method='POST'):\n print(request.POST)\n if request.method == 'POST':\n data = json.loads(request.body)\n else:\n data = request.GET.copy()\n if data:\n try:\n print('{} {}: {}'.format(method, var_name, data[var_name]))\n except KeyError:\n print('Key error: {}'.format(data))\n else:\n print('{} {}: None'.format(method, var_name))\n\n\n@login_required\ndef subscribe(request):\n if request.method == 'POST':\n data = json.loads(request.body)\n if data:\n for key in data.keys():\n print('{}: {}'.format(key, data[key]))\n test = {'status': 'All good!', 'data': data}\n return HttpResponse(json.dumps(test), content_type='application/json')\n if request.method == 'GET':\n if request.GET.get('action') == 'modify':\n return lookup_and_render(request)\n else:\n return render(request, 'cog/subscription/subscribe.html', {\n 'react_files': react_files, 'react_props': react_props})\n elif request.POST.get('action') == 'delete':\n return delete_subscription(request)\n else:\n period = request.POST.get('period', -1)\n if period == -1:\n return render(request, 'cog/subscription/subscribe_done.html',\n {'email': request.user.email, 'error': 'Invalid period'})\n subs_count = 0\n error_cond = ''\n keyarr = []\n valarr = []\n for i in range(1, 4):\n keystr = 'subscription_key{}'.format(i)\n keyres = request.POST.get(keystr, '')\n valstr = 'subscription_value{}'.format(i)\n valres = request.POST.get(valstr, '')\n if len(keyres) < 2 or len(valres) < 2:\n continue\n keyarr.append(keyres)\n valarr.append(valres)\n subs_count = subs_count + 1\n if subs_count > 0:\n try:\n esgfDatabaseManager.addUserSubscription(request.user,\n period, keyarr, valarr)\n except Exception as e:\n error_cond = str(e)\n return render(request,\n 'cog/subscription/subscribe_done.html', {'email':\n request.user.email, 'error':\n 'An Error Has Occurred While Processing Your Request. <p> {}'\n .format(error_cond)})\n return render(request, 'cog/subscription/subscribe_done.html',\n {'email': request.user.email, 'count': subs_count})\n else:\n return render(request, 'cog/subscription/subscribe.html', {\n 'react_files': react_files, 'react_props': react_props})\n",
"step-5": "\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect, HttpResponseForbidden, HttpResponseServerError\nfrom django.shortcuts import render\nfrom django.template import RequestContext\nimport json\n\nimport datetime\n\nimport os\nimport re\nfrom cog.views.utils import getQueryDict\n\nfrom cog.plugins.esgf.security import esgfDatabaseManager\n\nimport traceback\nimport json\n\n# Code used for react components\n\n# Get directories for static files\npackage_dir = os.path.dirname(os.path.abspath(__file__))\nstatic_dir = os.path.dirname(package_dir)\njs_dir = os.path.join(static_dir,\"static/cog/cog-react/js/\")\ncss_dir = os.path.join(static_dir,\"static/cog/cog-react/css/\")\n\n# Get static list\njs_files = os.listdir(js_dir)\ncss_files = os.listdir(css_dir)\njs_files = list(map(lambda f: \"cog/cog-react/js/\" + f, js_files))\ncss_files = list(map(lambda f: \"cog/cog-react/css/\" + f, css_files))\n\n# Separate source and map files\nmap_files = []\njs_only = []\nfor f in js_files:\n if f.endswith(\".map\"):\n map_files.append(f)\n else:\n js_only.append(f)\ncss_only = []\nfor f in css_files:\n if f.endswith(\".map\"):\n map_files.append(f)\n else:\n css_only.append(f)\n\n# These files are used by Django 'subscribe.html' page, to renders front-end.\nreact_files = {\n 'css': css_only,\n 'js': js_only,\n 'map': map_files\n}\n\n# Example data that subscriptions front-end could receive from back-end\ntest_data = {\n \"post_url\": \"/subscription/\",\n \"user_info\": {\"first\":\"John\",\"last\":\"Doe\",\"hobbies\":\"Programming.\",\"send_emails_to\":\"This place.\"},\n \"activities\": {\"method\":[\"email\"],\"weekly\":[\"CMIP\"],\"monthly\":[\"CMIP6\"]},\n \"experiments\": {\"method\":[\"popup\"],\"daily\":[\"test\", \"experiment 2\"],\"weekly\":[\"test2\"]},\n}\n\n# To pass data to front-end, use react-props and pass it a dictionary with key-value pairs\nreact_props = test_data\n\ndef lookup_and_render(request):\n\n try:\n dbres = esgfDatabaseManager.lookupUserSubscriptions(request.user)\n except Exception as e:\n # log error\n error_cond = str(e)\n print(traceback.print_exc())\n return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'error': \"An Error Has Occurred While Processing Your Request. <p> {}\".format(error_cond)})\n\n return render(request, 'cog/subscription/subscribe_list.html', {'dbres': dbres})\n\n\ndef delete_subscription(request):\n res = request.POST.get('subscription_id', None)\n try:\n if res == \"ALL\":\n dbres = esgfDatabaseManager.deleteAllUserSubscriptions(\n request.user)\n else:\n dbres = esgfDatabaseManager.deleteUserSubscriptionById(res)\n except Exception as e:\n # log error\n error_cond = str(e)\n return render(request, 'cog/subscription/subscribe_done.html', {'error': \"An Error Has Occurred While Processing Your Request. <p> {}\".format(error_cond)})\n\n return render(request, 'cog/subscription/subs_delete_done.html')\n\ndef temp_print(request, var_name, method=\"POST\"):\n print(request.POST)\n if request.method == \"POST\":\n data = json.loads(request.body)\n else:\n data = request.GET.copy()\n \n if(data):\n try:\n print(\"{} {}: {}\".format(method, var_name, data[var_name]))\n except KeyError:\n print(\"Key error: {}\".format(data))\n else:\n print(\"{} {}: None\".format(method, var_name))\n\n@login_required\ndef subscribe(request):\n\n # Contains the data from the front-end POST requests\n if request.method == \"POST\":\n\n # Get data from the POST request received from front-end\n data = json.loads(request.body)\n\n # Example obtaining data\n if data:\n for key in data.keys():\n print(\"{}: {}\".format(key, data[key]))\n\n # Example response sent back to front-end\n test = {\"status\": \"All good!\",\"data\": data}\n return HttpResponse(json.dumps(test),content_type='application/json')\n\n if request.method == 'GET':\n if request.GET.get('action') == \"modify\":\n return lookup_and_render(request)\n else:\n return render(request, 'cog/subscription/subscribe.html', {'react_files': react_files, 'react_props': react_props})\n elif request.POST.get('action') == \"delete\":\n return delete_subscription(request)\n else:\n period = request.POST.get(\"period\", -1)\n if period == -1:\n return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'error': \"Invalid period\"})\n\n subs_count = 0\n error_cond = \"\"\n keyarr = []\n valarr = []\n for i in range(1, 4):\n\n keystr = 'subscription_key{}'.format(i)\n keyres = request.POST.get(keystr, '')\n\n valstr = 'subscription_value{}'.format(i)\n valres = request.POST.get(valstr, '')\n\n if len(keyres) < 2 or len(valres) < 2:\n continue\n\n keyarr.append(keyres)\n valarr.append(valres)\n\n subs_count = subs_count + 1\n\n if subs_count > 0:\n\n try:\n\n esgfDatabaseManager.addUserSubscription(\n request.user, period, keyarr, valarr)\n\n except Exception as e:\n # log error\n error_cond = str(e)\n return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'error': \"An Error Has Occurred While Processing Your Request. <p> {}\".format(error_cond), })\n\n return render(request, 'cog/subscription/subscribe_done.html', {'email': request.user.email, 'count': subs_count})\n else:\n return render(request, 'cog/subscription/subscribe.html', {'react_files': react_files, 'react_props': react_props})\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
from flask import request, json, Response, Blueprint
from ..models.DriverModel import DriverModel, DriverSchema
driver_api = Blueprint('drivers', __name__)
driver_schema = DriverSchema()
@driver_api.route('/', methods=['POST'])
def create():
req_data = request.get_json()
data, error = driver_schema.load(req_data)
if error:
return custom_response(error, 400)
driver_in_db = DriverModel.get_driver_by_name(data.get('name'))
if driver_in_db:
return custom_response({'Error': 'Driver already exist.'}, 400)
driver = DriverModel(data)
driver.save()
response = driver_schema.dump(driver).data
return custom_response(response, 201)
@driver_api.route('/<int:driver_id>', methods=['GET'])
def get(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 404)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['PUT'])
def update(driver_id):
req_data = request.get_json()
data, error = driver_schema.load(req_data, partial=True)
if error:
return custom_response({'Error': 'Driver not found.'}, 400)
driver = DriverModel.get_one_driver(driver_id)
driver.update(data)
response = driver_schema.dump(driver).data
return custom_response(response, 200)
@driver_api.route('/<int:driver_id>', methods=['DELETE'])
def delete(driver_id):
driver = DriverModel.get_one_driver(driver_id)
if not driver:
return custom_response({'Error': 'Driver not found.'}, 400)
driver.delete()
return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)
@driver_api.route('/list_not_loaded', methods=['GET'])
def list_truck_not_loaded():
driver = DriverModel.truck_not_loaded()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
@driver_api.route('/list_trucks_owned', methods=['GET'])
def list_truck_owned():
driver = DriverModel.truck_owned()
response = driver_schema.dump(driver, many=True).data
return custom_response(response, 200)
def custom_response(response, status_code):
return Response(
mimetype="application/json",
response=json.dumps(response),
status=status_code
)
|
normal
|
{
"blob_id": "ee7820d50b5020a787fbaf012480e8c70bc0ee41",
"index": 1690,
"step-1": "<mask token>\n\n\n@driver_api.route('/<int:driver_id>', methods=['PUT'])\ndef update(driver_id):\n req_data = request.get_json()\n data, error = driver_schema.load(req_data, partial=True)\n if error:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver = DriverModel.get_one_driver(driver_id)\n driver.update(data)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n<mask token>\n\n\n@driver_api.route('/list_not_loaded', methods=['GET'])\ndef list_truck_not_loaded():\n driver = DriverModel.truck_not_loaded()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@driver_api.route('/', methods=['POST'])\ndef create():\n req_data = request.get_json()\n data, error = driver_schema.load(req_data)\n if error:\n return custom_response(error, 400)\n driver_in_db = DriverModel.get_driver_by_name(data.get('name'))\n if driver_in_db:\n return custom_response({'Error': 'Driver already exist.'}, 400)\n driver = DriverModel(data)\n driver.save()\n response = driver_schema.dump(driver).data\n return custom_response(response, 201)\n\n\n<mask token>\n\n\n@driver_api.route('/<int:driver_id>', methods=['PUT'])\ndef update(driver_id):\n req_data = request.get_json()\n data, error = driver_schema.load(req_data, partial=True)\n if error:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver = DriverModel.get_one_driver(driver_id)\n driver.update(data)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['DELETE'])\ndef delete(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver.delete()\n return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)\n\n\n@driver_api.route('/list_not_loaded', methods=['GET'])\ndef list_truck_not_loaded():\n driver = DriverModel.truck_not_loaded()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/list_trucks_owned', methods=['GET'])\ndef list_truck_owned():\n driver = DriverModel.truck_owned()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@driver_api.route('/', methods=['POST'])\ndef create():\n req_data = request.get_json()\n data, error = driver_schema.load(req_data)\n if error:\n return custom_response(error, 400)\n driver_in_db = DriverModel.get_driver_by_name(data.get('name'))\n if driver_in_db:\n return custom_response({'Error': 'Driver already exist.'}, 400)\n driver = DriverModel(data)\n driver.save()\n response = driver_schema.dump(driver).data\n return custom_response(response, 201)\n\n\n@driver_api.route('/<int:driver_id>', methods=['GET'])\ndef get(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 404)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['PUT'])\ndef update(driver_id):\n req_data = request.get_json()\n data, error = driver_schema.load(req_data, partial=True)\n if error:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver = DriverModel.get_one_driver(driver_id)\n driver.update(data)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['DELETE'])\ndef delete(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver.delete()\n return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)\n\n\n@driver_api.route('/list_not_loaded', methods=['GET'])\ndef list_truck_not_loaded():\n driver = DriverModel.truck_not_loaded()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/list_trucks_owned', methods=['GET'])\ndef list_truck_owned():\n driver = DriverModel.truck_owned()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\ndef custom_response(response, status_code):\n return Response(mimetype='application/json', response=json.dumps(\n response), status=status_code)\n",
"step-4": "from flask import request, json, Response, Blueprint\nfrom ..models.DriverModel import DriverModel, DriverSchema\ndriver_api = Blueprint('drivers', __name__)\ndriver_schema = DriverSchema()\n\n\n@driver_api.route('/', methods=['POST'])\ndef create():\n req_data = request.get_json()\n data, error = driver_schema.load(req_data)\n if error:\n return custom_response(error, 400)\n driver_in_db = DriverModel.get_driver_by_name(data.get('name'))\n if driver_in_db:\n return custom_response({'Error': 'Driver already exist.'}, 400)\n driver = DriverModel(data)\n driver.save()\n response = driver_schema.dump(driver).data\n return custom_response(response, 201)\n\n\n@driver_api.route('/<int:driver_id>', methods=['GET'])\ndef get(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 404)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['PUT'])\ndef update(driver_id):\n req_data = request.get_json()\n data, error = driver_schema.load(req_data, partial=True)\n if error:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver = DriverModel.get_one_driver(driver_id)\n driver.update(data)\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['DELETE'])\ndef delete(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 400)\n driver.delete()\n return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)\n\n\n@driver_api.route('/list_not_loaded', methods=['GET'])\ndef list_truck_not_loaded():\n driver = DriverModel.truck_not_loaded()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/list_trucks_owned', methods=['GET'])\ndef list_truck_owned():\n driver = DriverModel.truck_owned()\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\ndef custom_response(response, status_code):\n return Response(mimetype='application/json', response=json.dumps(\n response), status=status_code)\n",
"step-5": "from flask import request, json, Response, Blueprint\nfrom ..models.DriverModel import DriverModel, DriverSchema\n\ndriver_api = Blueprint('drivers', __name__)\ndriver_schema = DriverSchema()\n\n\n@driver_api.route('/', methods=['POST'])\ndef create():\n req_data = request.get_json()\n data, error = driver_schema.load(req_data)\n\n if error:\n return custom_response(error, 400)\n\n driver_in_db = DriverModel.get_driver_by_name(data.get('name'))\n if driver_in_db:\n return custom_response({'Error': 'Driver already exist.'}, 400)\n\n driver = DriverModel(data)\n driver.save()\n\n response = driver_schema.dump(driver).data\n return custom_response(response, 201)\n\n\n@driver_api.route('/<int:driver_id>', methods=['GET'])\ndef get(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 404)\n\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['PUT'])\ndef update(driver_id):\n req_data = request.get_json()\n data, error = driver_schema.load(req_data, partial=True)\n if error:\n return custom_response({'Error': 'Driver not found.'}, 400)\n\n driver = DriverModel.get_one_driver(driver_id)\n driver.update(data)\n\n response = driver_schema.dump(driver).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/<int:driver_id>', methods=['DELETE'])\ndef delete(driver_id):\n driver = DriverModel.get_one_driver(driver_id)\n if not driver:\n return custom_response({'Error': 'Driver not found.'}, 400)\n\n driver.delete()\n return custom_response({'Sucess': 'Driver deleted with sucess!'}, 200)\n\n\n@driver_api.route('/list_not_loaded', methods=['GET'])\ndef list_truck_not_loaded():\n driver = DriverModel.truck_not_loaded()\n\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\n@driver_api.route('/list_trucks_owned', methods=['GET'])\ndef list_truck_owned():\n driver = DriverModel.truck_owned()\n\n response = driver_schema.dump(driver, many=True).data\n return custom_response(response, 200)\n\n\ndef custom_response(response, status_code):\n return Response(\n mimetype=\"application/json\",\n response=json.dumps(response),\n status=status_code\n )\n",
"step-ids": [
2,
5,
7,
9,
10
]
}
|
[
2,
5,
7,
9,
10
] |
#-*- coding: utf8 -*-
#Programa: 04-palindromo
#Objetivo:Un Numero Palindromo es aquel numero que se lee igual, de izquierda a derecha y viceversa
#El palindromo mas grande que se pued obtener por el producto de dos numeos de dos digitos
# es: 9009 que es igual a 91x99.
#Encuentre el palindromo mas grande que se pueda encontrar por el producto de numeo de tres digitos.
#Recomendacion: tratar de hacerlo con el ejemplo siempre.
#Autor: Fernando Martinez
#Fecha: 28 enero de 2020
def obtener_palindromo(valor):
"""
Funcion que verifica si un numero es palindromo
"""
#Luego de convertirlo a str, los vamos a insertar en una lista para luego verificar
palindromo = list(str(valor))
#lo insertamos en una nueva lista
palindromo_final = palindromo
#Luego aplicaremos la verificacion para comprobar si es un palindromo
if palindromo [:: -1] == palindromo_final:
return True
#print('El numero es un palindromo')
def multiplicaciones(): #906609 tiene que darme
"""
Funcion se encargara de crear las multiplicaciones entre 999 y 100
mediante dos ciclos for.
"""
ultimo_palindromo = 0
total = 0
for primer_numero in range(100, 1000):
for segundo_numero in range(100, 1000):
#total se encarga de hacer la multiplicacion entre los numeros
total = primer_numero * segundo_numero
# llamamos a la funcion que verifica si la multiplicacion que envia es un palindromo
if obtener_palindromo(total):
#luego de verificar que la multiplicacion era palindromo pasamos a evaluarla hasta llegar al ultimo palindromo
#entre 100 y 1000
if ultimo_palindromo < total:
ultimo_palindromo = total
return ultimo_palindromo
#Llamamos a la funcion
if __name__ == "__main__":
print(multiplicaciones())
|
normal
|
{
"blob_id": "45f9d5ac0fa7d9259c1d53b92c030559f3bfda89",
"index": 7161,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef multiplicaciones():\n \"\"\"\n Funcion se encargara de crear las multiplicaciones entre 999 y 100\n\n mediante dos ciclos for.\n \"\"\"\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n total = primer_numero * segundo_numero\n if obtener_palindromo(total):\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo\n\n\n<mask token>\n",
"step-3": "def obtener_palindromo(valor):\n \"\"\"\n Funcion que verifica si un numero es palindromo\n \n \"\"\"\n palindromo = list(str(valor))\n palindromo_final = palindromo\n if palindromo[::-1] == palindromo_final:\n return True\n\n\ndef multiplicaciones():\n \"\"\"\n Funcion se encargara de crear las multiplicaciones entre 999 y 100\n\n mediante dos ciclos for.\n \"\"\"\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n total = primer_numero * segundo_numero\n if obtener_palindromo(total):\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo\n\n\n<mask token>\n",
"step-4": "def obtener_palindromo(valor):\n \"\"\"\n Funcion que verifica si un numero es palindromo\n \n \"\"\"\n palindromo = list(str(valor))\n palindromo_final = palindromo\n if palindromo[::-1] == palindromo_final:\n return True\n\n\ndef multiplicaciones():\n \"\"\"\n Funcion se encargara de crear las multiplicaciones entre 999 y 100\n\n mediante dos ciclos for.\n \"\"\"\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n total = primer_numero * segundo_numero\n if obtener_palindromo(total):\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo\n\n\nif __name__ == '__main__':\n print(multiplicaciones())\n",
"step-5": "#-*- coding: utf8 -*-\n#Programa: 04-palindromo\n#Objetivo:Un Numero Palindromo es aquel numero que se lee igual, de izquierda a derecha y viceversa\n #El palindromo mas grande que se pued obtener por el producto de dos numeos de dos digitos \n # es: 9009 que es igual a 91x99.\n #Encuentre el palindromo mas grande que se pueda encontrar por el producto de numeo de tres digitos.\n\n #Recomendacion: tratar de hacerlo con el ejemplo siempre.\n#Autor: Fernando Martinez\n#Fecha: 28 enero de 2020\n\ndef obtener_palindromo(valor):\n \"\"\"\n Funcion que verifica si un numero es palindromo\n \n \"\"\"\n \n #Luego de convertirlo a str, los vamos a insertar en una lista para luego verificar\n palindromo = list(str(valor))\n #lo insertamos en una nueva lista\n palindromo_final = palindromo\n \n #Luego aplicaremos la verificacion para comprobar si es un palindromo\n if palindromo [:: -1] == palindromo_final:\n return True\n\n\n \n #print('El numero es un palindromo')\n\ndef multiplicaciones(): #906609 tiene que darme\n \"\"\"\n Funcion se encargara de crear las multiplicaciones entre 999 y 100\n\n mediante dos ciclos for.\n \"\"\"\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n #total se encarga de hacer la multiplicacion entre los numeros\n total = primer_numero * segundo_numero\n # llamamos a la funcion que verifica si la multiplicacion que envia es un palindromo\n if obtener_palindromo(total):\n #luego de verificar que la multiplicacion era palindromo pasamos a evaluarla hasta llegar al ultimo palindromo\n #entre 100 y 1000\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo\n\n#Llamamos a la funcion\n\nif __name__ == \"__main__\":\n print(multiplicaciones())\n\n\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
from requests.adapters import HTTPAdapter
from requests.exceptions import ConnectionError, Timeout, RequestException
# import from `requests` because Jarvis / some platforms still have old urllib3
from requests.packages.urllib3.util.retry import Retry
def retryable_session(retries=3, backoff_factor=0.5, status_forcelist=(500, 502, 504, 520), session=None):
# from https://www.peterbe.com/plog/best-practice-with-retries-with-requests
session = session or requests.Session()
# 'Retry-After' 413/503/529 headers are respected by default
retry = Retry(total=retries, read=retries, connect=retries,
backoff_factor=backoff_factor, status_forcelist=status_forcelist)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class Getter(object):
def __init__(self, contenttype=None, login=lambda: False, session=None):
self.session = session or retryable_session()
self.login = login
if contenttype:
self.session.headers['Accept'] = contenttype
def __call__(self, url, **kwargs):
try:
return self._inner_call(url, **kwargs)
except (Timeout, ConnectionError, RequestException) as ex:
message = ex.response.reason if getattr(ex, 'response', None) is not None else type(ex).__name__
raise GetterError(message, ex, not isinstance(ex, RequestException))
def _inner_call(self, url, **kwargs):
if 'timeout' not in kwargs:
kwargs['timeout'] = 20
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 401:
if self.login():
result = self.session.get(url, **kwargs)
if result is None:
return
if result.status_code == 404:
return
result.raise_for_status()
return result
class GetterError(Exception):
def __init__(self, message, cause, connection_error):
super(GetterError, self).__init__()
self.message = message
self.cause = cause
self.connection_error = connection_error
self.request = getattr(cause, 'request', None)
self.response = getattr(cause, 'response', None)
|
normal
|
{
"blob_id": "603708c830dadb6f1a3e5de00536d558f448b5fb",
"index": 1352,
"step-1": "<mask token>\n\n\nclass Getter(object):\n <mask token>\n\n def __call__(self, url, **kwargs):\n try:\n return self._inner_call(url, **kwargs)\n except (Timeout, ConnectionError, RequestException) as ex:\n message = ex.response.reason if getattr(ex, 'response', None\n ) is not None else type(ex).__name__\n raise GetterError(message, ex, not isinstance(ex, RequestException)\n )\n\n def _inner_call(self, url, **kwargs):\n if 'timeout' not in kwargs:\n kwargs['timeout'] = 20\n result = self.session.get(url, **kwargs)\n if result is None:\n return\n if result.status_code == 401:\n if self.login():\n result = self.session.get(url, **kwargs)\n if result is None:\n return\n if result.status_code == 404:\n return\n result.raise_for_status()\n return result\n\n\nclass GetterError(Exception):\n\n def __init__(self, message, cause, connection_error):\n super(GetterError, self).__init__()\n self.message = message\n self.cause = cause\n self.connection_error = connection_error\n self.request = getattr(cause, 'request', None)\n self.response = getattr(cause, 'response', None)\n",
"step-2": "<mask token>\n\n\nclass Getter(object):\n\n def __init__(self, contenttype=None, login=lambda : False, session=None):\n self.session = session or retryable_session()\n self.login = login\n if contenttype:\n self.session.headers['Accept'] = contenttype\n\n def __call__(self, url, **kwargs):\n try:\n return self._inner_call(url, **kwargs)\n except (Timeout, ConnectionError, RequestException) as ex:\n message = ex.response.reason if getattr(ex, 'response', None\n ) is not None else type(ex).__name__\n raise GetterError(message, ex, not isinstance(ex, RequestException)\n )\n\n def _inner_call(self, url, **kwargs):\n if 'timeout' not in kwargs:\n kwargs['timeout'] = 20\n result = self.session.get(url, **kwargs)\n if result is None:\n return\n if result.status_code == 401:\n if self.login():\n result = self.session.get(url, **kwargs)\n if result is None:\n return\n if result.status_code == 404:\n return\n result.raise_for_status()\n return result\n\n\nclass GetterError(Exception):\n\n def __init__(self, message, cause, connection_error):\n super(GetterError, self).__init__()\n self.message = message\n self.cause = cause\n self.connection_error = connection_error\n self.request = getattr(cause, 'request', None)\n self.response = getattr(cause, 'response', None)\n",
"step-3": "<mask token>\n\n\ndef retryable_session(retries=3, backoff_factor=0.5, status_forcelist=(500,\n 502, 504, 520), session=None):\n session = session or requests.Session()\n retry = Retry(total=retries, read=retries, connect=retries,\n backoff_factor=backoff_factor, status_forcelist=status_forcelist)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n return session\n\n\nclass Getter(object):\n\n def __init__(self, contenttype=None, login=lambda : False, session=None):\n self.session = session or retryable_session()\n self.login = login\n if contenttype:\n self.session.headers['Accept'] = contenttype\n\n def __call__(self, url, **kwargs):\n try:\n return self._inner_call(url, **kwargs)\n except (Timeout, ConnectionError, RequestException) as ex:\n message = ex.response.reason if getattr(ex, 'response', None\n ) is not None else type(ex).__name__\n raise GetterError(message, ex, not isinstance(ex, RequestException)\n )\n\n def _inner_call(self, url, **kwargs):\n if 'timeout' not in kwargs:\n kwargs['timeout'] = 20\n result = self.session.get(url, **kwargs)\n if result is None:\n return\n if result.status_code == 401:\n if self.login():\n result = self.session.get(url, **kwargs)\n if result is None:\n return\n if result.status_code == 404:\n return\n result.raise_for_status()\n return result\n\n\nclass GetterError(Exception):\n\n def __init__(self, message, cause, connection_error):\n super(GetterError, self).__init__()\n self.message = message\n self.cause = cause\n self.connection_error = connection_error\n self.request = getattr(cause, 'request', None)\n self.response = getattr(cause, 'response', None)\n",
"step-4": "import requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.exceptions import ConnectionError, Timeout, RequestException\nfrom requests.packages.urllib3.util.retry import Retry\n\n\ndef retryable_session(retries=3, backoff_factor=0.5, status_forcelist=(500,\n 502, 504, 520), session=None):\n session = session or requests.Session()\n retry = Retry(total=retries, read=retries, connect=retries,\n backoff_factor=backoff_factor, status_forcelist=status_forcelist)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n return session\n\n\nclass Getter(object):\n\n def __init__(self, contenttype=None, login=lambda : False, session=None):\n self.session = session or retryable_session()\n self.login = login\n if contenttype:\n self.session.headers['Accept'] = contenttype\n\n def __call__(self, url, **kwargs):\n try:\n return self._inner_call(url, **kwargs)\n except (Timeout, ConnectionError, RequestException) as ex:\n message = ex.response.reason if getattr(ex, 'response', None\n ) is not None else type(ex).__name__\n raise GetterError(message, ex, not isinstance(ex, RequestException)\n )\n\n def _inner_call(self, url, **kwargs):\n if 'timeout' not in kwargs:\n kwargs['timeout'] = 20\n result = self.session.get(url, **kwargs)\n if result is None:\n return\n if result.status_code == 401:\n if self.login():\n result = self.session.get(url, **kwargs)\n if result is None:\n return\n if result.status_code == 404:\n return\n result.raise_for_status()\n return result\n\n\nclass GetterError(Exception):\n\n def __init__(self, message, cause, connection_error):\n super(GetterError, self).__init__()\n self.message = message\n self.cause = cause\n self.connection_error = connection_error\n self.request = getattr(cause, 'request', None)\n self.response = getattr(cause, 'response', None)\n",
"step-5": "import requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.exceptions import ConnectionError, Timeout, RequestException\n# import from `requests` because Jarvis / some platforms still have old urllib3\nfrom requests.packages.urllib3.util.retry import Retry\n\ndef retryable_session(retries=3, backoff_factor=0.5, status_forcelist=(500, 502, 504, 520), session=None):\n # from https://www.peterbe.com/plog/best-practice-with-retries-with-requests\n session = session or requests.Session()\n # 'Retry-After' 413/503/529 headers are respected by default\n retry = Retry(total=retries, read=retries, connect=retries,\n backoff_factor=backoff_factor, status_forcelist=status_forcelist)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n return session\n\nclass Getter(object):\n def __init__(self, contenttype=None, login=lambda: False, session=None):\n self.session = session or retryable_session()\n self.login = login\n if contenttype:\n self.session.headers['Accept'] = contenttype\n\n def __call__(self, url, **kwargs):\n try:\n return self._inner_call(url, **kwargs)\n except (Timeout, ConnectionError, RequestException) as ex:\n message = ex.response.reason if getattr(ex, 'response', None) is not None else type(ex).__name__\n raise GetterError(message, ex, not isinstance(ex, RequestException))\n\n def _inner_call(self, url, **kwargs):\n if 'timeout' not in kwargs:\n kwargs['timeout'] = 20\n result = self.session.get(url, **kwargs)\n if result is None:\n return\n if result.status_code == 401:\n if self.login():\n result = self.session.get(url, **kwargs)\n if result is None:\n return\n\n if result.status_code == 404:\n return\n result.raise_for_status()\n return result\n\nclass GetterError(Exception):\n def __init__(self, message, cause, connection_error):\n super(GetterError, self).__init__()\n self.message = message\n self.cause = cause\n self.connection_error = connection_error\n self.request = getattr(cause, 'request', None)\n self.response = getattr(cause, 'response', None)\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
#-*-coding:utf-8-*-
from Classify import get_train_data
import sys
'''
获取训练集数据
'''
get_train_data(sys.argv[1], sys.argv[2])
|
normal
|
{
"blob_id": "513aff6cf29bbce55e2382943767a9a21df2e98e",
"index": 5080,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nget_train_data(sys.argv[1], sys.argv[2])\n",
"step-3": "from Classify import get_train_data\nimport sys\n<mask token>\nget_train_data(sys.argv[1], sys.argv[2])\n",
"step-4": "#-*-coding:utf-8-*-\nfrom Classify import get_train_data\nimport sys\n'''\n 获取训练集数据\n'''\nget_train_data(sys.argv[1], sys.argv[2])",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
'''
=======================================================================
AutoTest Team Source File.
Copyright(C), Changyou.com
-----------------------------------------------------------------------
Created: 2017/3/2 by ChengLongLong
-----------------------------------------------------------------------
Description:
-----------------------------------------------------------------------
History:
2017/3/2
=======================================================================
'''
|
normal
|
{
"blob_id": "38f7c529cd0a8d85de266c6a932e6c8342aee273",
"index": 4969,
"step-1": "<mask token>\n",
"step-2": "# -*- coding: utf-8 -*-\n'''\n=======================================================================\nAutoTest Team Source File.\nCopyright(C), Changyou.com\n-----------------------------------------------------------------------\nCreated: 2017/3/2 by ChengLongLong\n-----------------------------------------------------------------------\nDescription: \n-----------------------------------------------------------------------\nHistory: \n2017/3/2 \n=======================================================================\n'''",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import os
import logging
from flask import Flask
from flask_orator import Orator
from flask_jwt_extended import JWTManager
from dotenv import load_dotenv
load_dotenv(verbose=True)
app = Flask(__name__)
app.secret_key = os.getenv('SECRET_KEY')
app.config['JSON_SORT_KEYS'] = False
app.config['ORATOR_DATABASES'] = {
'default': 'mysql',
'mysql': {
'driver': 'mysql',
'host': os.getenv('DB_HOST'),
'database': os.getenv('DB_NAME'),
'user': os.getenv('DB_USER'),
'password': os.getenv('DB_PASSWORD'),
'prefix': '',
'log_queries': bool(os.getenv('LOG_QUERIES'))
}
}
app.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY') # Change this!
app.config['JWT_TOKEN_LOCATION'] = ['headers'] # headers', 'cookies', 'query_string', 'json'
db = Orator(app)
jwt = JWTManager(app)
if bool(os.getenv('IS_DEV')):
logger = logging.getLogger('orator.connection.queries')
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(elapsed_time)sms %(query)s'
)
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
@app.route('/')
def index():
return os.getenv('DB_HOST')
|
normal
|
{
"blob_id": "f20e2227821c43de17c116d8c11233eda53ab631",
"index": 9967,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef index():\n return os.getenv('DB_HOST')\n",
"step-2": "<mask token>\nload_dotenv(verbose=True)\n<mask token>\nif bool(os.getenv('IS_DEV')):\n logger = logging.getLogger('orator.connection.queries')\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(elapsed_time)sms %(query)s')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\[email protected]('/')\ndef index():\n return os.getenv('DB_HOST')\n",
"step-3": "<mask token>\nload_dotenv(verbose=True)\napp = Flask(__name__)\napp.secret_key = os.getenv('SECRET_KEY')\napp.config['JSON_SORT_KEYS'] = False\napp.config['ORATOR_DATABASES'] = {'default': 'mysql', 'mysql': {'driver':\n 'mysql', 'host': os.getenv('DB_HOST'), 'database': os.getenv('DB_NAME'),\n 'user': os.getenv('DB_USER'), 'password': os.getenv('DB_PASSWORD'),\n 'prefix': '', 'log_queries': bool(os.getenv('LOG_QUERIES'))}}\napp.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY')\napp.config['JWT_TOKEN_LOCATION'] = ['headers']\ndb = Orator(app)\njwt = JWTManager(app)\nif bool(os.getenv('IS_DEV')):\n logger = logging.getLogger('orator.connection.queries')\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(elapsed_time)sms %(query)s')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\[email protected]('/')\ndef index():\n return os.getenv('DB_HOST')\n",
"step-4": "import os\nimport logging\nfrom flask import Flask\nfrom flask_orator import Orator\nfrom flask_jwt_extended import JWTManager\nfrom dotenv import load_dotenv\nload_dotenv(verbose=True)\napp = Flask(__name__)\napp.secret_key = os.getenv('SECRET_KEY')\napp.config['JSON_SORT_KEYS'] = False\napp.config['ORATOR_DATABASES'] = {'default': 'mysql', 'mysql': {'driver':\n 'mysql', 'host': os.getenv('DB_HOST'), 'database': os.getenv('DB_NAME'),\n 'user': os.getenv('DB_USER'), 'password': os.getenv('DB_PASSWORD'),\n 'prefix': '', 'log_queries': bool(os.getenv('LOG_QUERIES'))}}\napp.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY')\napp.config['JWT_TOKEN_LOCATION'] = ['headers']\ndb = Orator(app)\njwt = JWTManager(app)\nif bool(os.getenv('IS_DEV')):\n logger = logging.getLogger('orator.connection.queries')\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(elapsed_time)sms %(query)s')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n\n\[email protected]('/')\ndef index():\n return os.getenv('DB_HOST')\n",
"step-5": "import os\nimport logging\nfrom flask import Flask\nfrom flask_orator import Orator\nfrom flask_jwt_extended import JWTManager\nfrom dotenv import load_dotenv\n\n\nload_dotenv(verbose=True)\n\napp = Flask(__name__)\napp.secret_key = os.getenv('SECRET_KEY')\napp.config['JSON_SORT_KEYS'] = False\napp.config['ORATOR_DATABASES'] = {\n 'default': 'mysql',\n 'mysql': {\n 'driver': 'mysql',\n 'host': os.getenv('DB_HOST'),\n 'database': os.getenv('DB_NAME'),\n 'user': os.getenv('DB_USER'),\n 'password': os.getenv('DB_PASSWORD'),\n 'prefix': '',\n 'log_queries': bool(os.getenv('LOG_QUERIES'))\n }\n}\n\napp.config['JWT_SECRET_KEY'] = os.getenv('JWT_SECRET_KEY') # Change this!\napp.config['JWT_TOKEN_LOCATION'] = ['headers'] # headers', 'cookies', 'query_string', 'json'\n\ndb = Orator(app)\njwt = JWTManager(app)\n\nif bool(os.getenv('IS_DEV')):\n\n logger = logging.getLogger('orator.connection.queries')\n logger.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter(\n '%(elapsed_time)sms %(query)s'\n )\n\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n\n logger.addHandler(handler)\n\n\[email protected]('/')\ndef index():\n return os.getenv('DB_HOST')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import abc
import math
import random
from typing import Union, Tuple
import numpy as np
from scipy import stats
from . import Rectangle, Line, Point, Shape
__all__ = ['get_critical_angle', 'Paddle', 'Ball', 'Snell', 'Canvas']
EPSILON = 1e-7
def get_critical_angle(s0: float, s1: float) -> Union[float, None]:
"""
Returns the critical angle if it exists for a ball moving from a medium with velocity `s0` to a medium with
velocity `s1`. If the critical angle does not exist, returns None.
:param s0: speed of the initial medium
:param s1: speed of the final medium
:return: critical angle or None
"""
if s0 < s1:
critical_angle = math.asin(s0 / s1)
else:
critical_angle = None
return critical_angle
class Paddle(Rectangle):
def __init__(self, height: float, width: float, speed: float, side: str, max_angle: float, visibility: str):
"""
:param height: The paddle height
:param width: The paddle width (only matters for rendering)
:param side: The side the paddle will be on ('left' or 'right')
:param speed: The units the paddle moves in a single turn
:param visibility: Whether and how to render the paddle. See `Shape.visibility`
:param max_angle: The maximum angle at which the paddle can hit the ball
"""
super().__init__(height=height, width=width, visibility=visibility, render_value=255)
assert side in ['left', 'right'], f"side must be 'left' or 'right', not {side}"
assert 0 <= max_angle <= math.pi / 2, f"max angle must be between 0 and pi/2, not {max_angle}"
self.side = side
self.speed = speed
self.max_angle = max_angle
def up(self):
self.y += self.speed
def down(self):
self.y -= self.speed
def _get_edges(self) -> Tuple[Line]:
"""
Only return the field-side edge
"""
if self.side == 'right':
return Line((self.left_bound, self.bottom_bound), (self.left_bound, self.top_bound)),
elif self.side == 'left':
return Line((self.right_bound, self.bottom_bound), (self.right_bound, self.top_bound)),
def get_fraction_of_paddle(self, point: Point):
"""
Computes the fractional distance from the middle of the paddle, normalized by the paddle's height.
Asserts if the ball was not on the paddle.
:param point: the point where the ball hit the paddle
:return: fraction of the paddle
"""
fraction = (point.y - self.y) / self.height
fraction = max(min(fraction, 0.5), -0.5) # clamp to +/- 0.5
return fraction
class Ball(Rectangle):
def __init__(self, size: float, max_initial_angle: float, visibility: str, has_volume: bool = False):
"""
Ball object
:param has_volume:
:param size: The size to render the ball
:param max_initial_angle: The maximum angle the ball can start with
:param visibility: How to render the ball. See `Shape.visibility`
:param has_volume: determines whether the ball interacts as a point or as an area
"""
super().__init__(width=size, height=size, visibility=visibility, render_value=255)
self.max_initial_angle = max_initial_angle
self.reset(self.pos, direction='left')
self.has_volume = has_volume
def reset(self, position: Union[Tuple[float, float], Point], direction: str = 'right'):
if direction == 'right':
self._angle = (2 * random.random() - 1) * self.max_initial_angle
elif direction == 'left':
self._angle = math.pi - (2 * random.random() - 1) * self.max_initial_angle
else:
raise ValueError(f"direction must be 'left' or 'right', not {direction}")
self.pos = position
@property
def angle(self):
"""
Angle with respect to the right horizontal
"""
return self._angle
@angle.setter
def angle(self, value):
self._angle = value % (2 * math.pi)
@property
def unit_velocity(self) -> Point:
x = math.cos(self.angle)
y = math.sin(self.angle)
return Point(x, y)
@unit_velocity.setter
def unit_velocity(self, value: Union[Tuple[float, float], Point]):
"""
Sets the angle parameter give a set of (x, y) coordinates.
:param value: (x, y)
"""
if isinstance(value, tuple):
value = Point(*value)
assert isinstance(value, Point), f"value must be a point, not {type(value)}"
self.angle = value.angle
def get_velocity(self, speed: Union[float, int]):
return self.unit_velocity * speed
class Snell(Rectangle):
def __init__(self, width, height, speed, change_rate, visibility):
"""
Rectangular area with a different ball speed.
:param width: The width of the layer
:param height: The height of the layer
:param change_rate: Rate at which the ball speed changes, the standard deviation of the change on each step.
:param visibility: Whether and how to render the layer. See `Shape.visibility`
"""
assert change_rate >= 0, "Snell `change_rate` must be non-negative"
super().__init__(width=width, height=height, visibility=visibility, render_value=(235, 76, 52))
self.speed = speed
self._initial_speed = speed
self.change_rate = change_rate
def step(self):
"""
Step the Snell speed using a bounded Gaussian random walk.
- step with mean 0, standard deviation `self.speed`
- Clip the speed at `0.5 * self._initial_speed <= self.speed <= 2.0 * self._initial_speed`
"""
if self.change_rate != 0:
self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()
if self.speed < 0.5 * self._initial_speed:
self.speed = 0.5 * self._initial_speed
if self.speed > 2.0 * self._initial_speed:
self.speed = 2.0 * self._initial_speed
else:
pass
class TrajectoryBase(abc.ABC):
def __init__(self, shape: Union[Point, Line, Rectangle], velocity: Point):
self.shape = shape
self.velocity = velocity
self._reference = None
self.intersection = None
self.intersected_trajectory = None
self.intersected_object = None
self.intersected_edge = None
self.remaining_speed = None
def set_intersection(self, point: Point, trajectory_line: Line, obj: Shape, edge: Line):
assert isinstance(obj, Shape), f"type Shape expected, not {type(obj)}"
assert isinstance(point, Point), f"type Point expected, not {type(point)}"
assert isinstance(edge, Line), f"type Line expected, not {type(edge)}"
self.intersection = point
self.intersected_trajectory = trajectory_line
self.remaining_speed = point.l2_distance(trajectory_line.end)
self.intersected_object = obj
self.intersected_edge = edge
def get_center_at_intersection(self) -> Point:
"""
Get the new center of `self.shape` given that it moved along `intersected_trajectory` to `intersection`
:return: new center point
"""
return self._reference + (self.intersection - self.intersected_trajectory.start)
@property
def corners(self) -> Tuple[Line, ...]:
return self.top_left, self.top_right, self.bottom_right, self.bottom_left
@property
@abc.abstractmethod
def center(self) -> Line: ...
@property
@abc.abstractmethod
def top_right(self) -> Line: ...
@property
@abc.abstractmethod
def top_left(self) -> Line: ...
@property
@abc.abstractmethod
def bottom_right(self) -> Line: ...
@property
@abc.abstractmethod
def bottom_left(self) -> Line: ...
class TrajectoryRectangle(TrajectoryBase):
"""
Compute the trajectory of each corner of the rectangle
"""
def __init__(self, shape: Rectangle, velocity: Point):
super(TrajectoryRectangle, self).__init__(shape, velocity)
assert isinstance(shape, Rectangle)
self._reference = self.shape.pos
@property
def center(self) -> Line:
"""
Line representing the trajectory of the center of the rectangle
"""
return Line(self.shape.pos, self.shape.pos + self.velocity)
@property
def top_right(self) -> Line:
"""
Line representing the trajectory of the point on the top right corner of the rectangle
"""
start = Point(self.shape.right_bound, self.shape.top_bound)
return Line(start, start + self.velocity)
@property
def top_left(self) -> Line:
"""
Line representing the trajectory of the point on the top left corner of the rectangle
"""
start = Point(self.shape.left_bound, self.shape.top_bound)
return Line(start, start + self.velocity)
@property
def bottom_right(self) -> Line:
"""
Line representing the trajectory of the point on the bottom right corner of the rectangle
"""
start = Point(self.shape.right_bound, self.shape.bottom_bound)
return Line(start, start + self.velocity)
@property
def bottom_left(self) -> Line:
"""
Line representing the trajectory of the point on the bottom left corner of the rectangle
"""
start = Point(self.shape.left_bound, self.shape.bottom_bound)
return Line(start, start + self.velocity)
class TrajectoryLine(TrajectoryRectangle):
"""
Create a bounding box around the line and compute the trajectory as if it were a rectangle.
"""
# noinspection PyTypeChecker
# noinspection PyUnresolvedReferences
def __init__(self, shape: Line, velocity: Point):
super(TrajectoryLine, self).__init__(shape, velocity)
assert isinstance(shape, Line)
self._reference = self.shape.start
height = abs(self.shape.start.y - self.shape.end.y)
width = abs(self.shape.start.x - self.shape.end.x)
center = Point((self.shape.start.x + self.shape.end.x) / 2,
(self.shape.start.y + self.shape.end.y) / 2)
self.shape = Rectangle(height=height, width=width)
self.shape.pos = center
class TrajectoryPoint(TrajectoryBase):
def __init__(self, shape: Point, velocity: Point):
super(TrajectoryPoint, self).__init__(shape, velocity)
assert isinstance(shape, Point)
self._reference = self.shape
@property
def corners(self) -> Tuple[Line, ...]:
return (self._trajectory,)
@property
def _trajectory(self) -> Line:
return Line(self.shape, self.shape + self.velocity)
@property
def center(self) -> Line:
return self._trajectory
@property
def top_right(self) -> Line:
return self._trajectory
@property
def top_left(self) -> Line:
return self._trajectory
@property
def bottom_right(self) -> Line:
return self._trajectory
@property
def bottom_left(self) -> Line:
return self._trajectory
class Trajectory(object):
def __new__(cls, shape: Shape, velocity: Point):
if isinstance(shape, Point):
return TrajectoryPoint(shape, velocity)
elif isinstance(shape, Line):
return TrajectoryLine(shape, velocity)
elif isinstance(shape, Rectangle):
return TrajectoryRectangle(shape, velocity)
else:
raise NotImplementedError(f"No implementation of Trajectory for input shape of type {type(shape)}")
class Canvas(Rectangle):
action_meanings = {0: 'NOOP',
1: 'UP',
2: 'DOWN', }
actions = {k: v for v, k in action_meanings.items()}
def __init__(self, paddle_l: Paddle, paddle_r: Paddle, ball: Ball, snell: Snell, ball_speed: int, height: int,
width: int, their_update_probability: float, refract: bool, uniform_speed: bool):
super().__init__(height=height, width=width, visibility='none', render_value=0)
self.pos = self.width / 2, self.height / 2
assert isinstance(their_update_probability, (float, int)), \
f"their_update_probability must be numeric, not {type(their_update_probability)}"
assert 0 <= their_update_probability <= 1, f"{their_update_probability} outside allowed bounds [0, 1]"
self.their_update_probability = their_update_probability
self.default_ball_speed = ball_speed
# Initialize objects
self.snell = snell
self.ball = ball
self.paddle_l = paddle_l
self.paddle_r = paddle_r
self.sprites = [self, snell, paddle_l, paddle_r, ball]
self.uniform_speed = uniform_speed
self.refract = refract
self.we_scored = False
self.they_scored = False
# score
self.our_score = 0
self.their_score = 0
def register_sprite(self, sprite: Shape):
assert issubclass(type(sprite), Shape), f"sprite must be subclassed from Shape"
# noinspection PyTypeChecker
self.sprites.insert(-1, sprite) # insert before ball
@property
def left_bound(self):
return 0
@property
def right_bound(self):
return self.width
@property
def top_bound(self):
return self.height
@property
def bottom_bound(self):
return 0
# noinspection PyMethodOverriding
def to_numpy(self) -> Tuple[np.ndarray, np.ndarray]:
"""
Performs masked rendering of objects in `self.sprites`. Priority is determined by the ordering of the list,
earlier objects will be obscured by later ones.
:return: (state, rendering)
"""
state = self._zero_rgb_image(round(self.height), round(self.width))
rendering = self._zero_rgb_image(round(self.height), round(self.width))
for sprite in self.sprites[1:]: # skip self
sprite_state, sprite_rendering = sprite.to_numpy(self.height, self.width)
state[sprite_state != 0] = sprite_state[sprite_state != 0]
rendering[sprite_rendering != 0] = sprite_rendering[sprite_rendering != 0]
return state, rendering
def score(self, who):
"""
Increment the score and reset the ball
:param who: 'we' or 'they'
:return: reward
"""
if who == 'they':
reward = -1
self.their_score += 1
elif who == 'we':
reward = 1
self.our_score += 1
else:
raise ValueError(f"who must be 'we' or 'they', not {who}")
self._reset_ball()
return reward
def step(self, action):
self._move_our_paddle(action)
self._step_their_paddle()
reward = self._step_ball()
self._step_snell()
return reward
def get_state_size(self) -> Tuple[int, int]:
"""
Return the tuple (height, width) of the canvas dimensions
"""
return self.height, self.width
def _step_snell(self) -> None:
"""
Step the snell layer
"""
self.snell.step()
def _reset_ball(self):
self.ball.reset((self.width / 2, self.height / 2))
def _move_our_paddle(self, action) -> None:
"""
Move our paddle according to the provided action
:param action: the action code
"""
if not isinstance(action, int):
action = action.item() # pops the item if the action is a single tensor
assert action in [a for a in self.action_meanings.keys()], f"{action} is not a valid action"
if action == self.actions['UP']:
if self.paddle_r.top_bound < self.top_bound:
self.paddle_r.up()
elif action == self.actions['DOWN']:
if self.paddle_r.bottom_bound > self.bottom_bound:
self.paddle_r.down()
def _step_ball(self, speed: Union[float, int] = None):
"""
Move the ball to the next position according to the speed of the layer it is in.
:param speed: used to continue the trajectory of a ball that interacted with an object
"""
trajectory = self._get_trajectory(speed)
self._get_first_intersection(trajectory)
reward = 0
if trajectory.intersection is None: # No intersection
self.ball.pos = trajectory.center.end
else:
reward = self._interaction_dispatcher(trajectory)
return reward
def _get_trajectory(self, speed) -> TrajectoryBase:
"""
Get the ball's trajectory
:param speed: The speed of the starting medium
:return: trajectory `Line`
"""
if speed is None:
speed = self._get_ball_speed()
if self.ball.has_volume:
trajectory = Trajectory(self.ball, self.ball.get_velocity(speed))
else:
trajectory = Trajectory(self.ball.pos, self.ball.get_velocity(speed))
return trajectory
def _interaction_dispatcher(self, trajectory: TrajectoryBase):
"""
Dispatch data to the appropriate method based on the interaction `obj`.
:param trajectory: the trajectory of the ball
"""
reward = 0
obj = trajectory.intersected_object
if obj is self: # border interaction
reward = self._interact_border(trajectory)
elif isinstance(obj, Paddle): # paddle interaction
self._interact_paddle(trajectory)
elif isinstance(obj, Snell):
self._refract(trajectory)
return reward
def _interact_paddle(self, trajectory: TrajectoryBase) -> float:
paddle = trajectory.intersected_object
paddle_fraction = paddle.get_fraction_of_paddle(trajectory.get_center_at_intersection())
angle = paddle_fraction * paddle.max_angle
angle = math.pi - angle if self.ball.unit_velocity.x > 0 else angle
self.ball.angle = angle
reward = self._finish_step_ball(trajectory)
return reward
def _refract(self, trajectory: TrajectoryBase):
edge = trajectory.intersected_edge
if self.refract:
s0, s1 = self._get_start_and_end_speed(trajectory)
angle = edge.angle_to_normal(trajectory.center)
if self._exceeds_critical_angle(angle, s0, s1):
# TODO: reflect to arbitrary angle (non-vertical interface)
self._reflect(Point(-1, 1), trajectory)
return
new_angle = math.asin(s1 / s0 * math.sin(angle))
boundary_angle, new_angle = self._adjust_refraction_to_boundary_angle(edge, new_angle)
new_angle = self._adjust_refraction_to_direction_of_incidence(boundary_angle, new_angle, trajectory)
self.ball.angle = new_angle
return self._finish_step_ball(trajectory)
@staticmethod
def _exceeds_critical_angle(angle: float, s0: float, s1: float) -> bool:
"""
Test if the angle exceeds the critical angle
:param angle: The angle to the normal of the boundary
:param s0: The speed of the original medium
:param s1: The speed of the next medium
:return: True if the angle exceeds the critical angle
"""
if s1 > s0: # if the second speed is faster, there is a critical angle
critical_angle = get_critical_angle(s0, s1)
if abs(angle) >= critical_angle:
return True
return False
@staticmethod
def _adjust_refraction_to_direction_of_incidence(boundary_angle: float, new_angle: float,
trajectory: TrajectoryBase) -> float:
"""
If the direction of incidence was from the right of the boundary, reflect `new_angle`, otherwise, return
`new_angle` without modification.
:param boundary_angle: must be in the first or fourth quadrant
:param new_angle: The angle to be reflected in the return
:param trajectory: The angle of the incoming ball in global coordinates
:return: The (possibly) reflected `new_angle`
"""
angle = trajectory.center.angle
assert -math.pi / 2 <= boundary_angle <= math.pi / 2, "boundary_angle should be in first or fourth quadrant"
# noinspection PyChainedComparisons
if boundary_angle >= 0 and boundary_angle < angle % (2 * math.pi) < boundary_angle + math.pi:
new_angle = math.pi - new_angle
elif (boundary_angle < 0 and
boundary_angle % (2 * math.pi) + math.pi < angle % (2 * math.pi) < boundary_angle % (
2 * math.pi)):
new_angle = math.pi - new_angle
return new_angle
@staticmethod
def _adjust_refraction_to_boundary_angle(boundary: Line, new_angle: float) -> Tuple[float, float]:
"""
Compute the rotation of `new_angle` back to global coordinates. Assume incidence from the left side of the
boundary.
:param boundary: The boundary `primitives.Line` object
:param new_angle: The refracted angle normal to the boundary
:return: The new angle in global coordinates
"""
# TODO: verify this works with a non-vertical interface
boundary_angle = boundary.angle % (2 * math.pi)
if 0 <= boundary_angle < math.pi / 2: # in the first quadrant
boundary_angle = boundary_angle
new_angle = boundary_angle - math.pi / 2 + new_angle
elif math.pi / 2 <= boundary_angle < math.pi: # in the second quadrant
boundary_angle = math.pi - boundary_angle
new_angle = math.pi / 2 - boundary_angle + new_angle
elif math.pi <= boundary_angle < 3 * math.pi / 2: # in the third quadrant
boundary_angle = math.pi - boundary_angle
new_angle = boundary_angle - math.pi / 2 + new_angle
elif 2 * math.pi / 3 <= boundary_angle < 2 * math.pi: # in the fourth quadrant
boundary_angle = 2 * math.pi - boundary_angle
new_angle = math.pi / 2 - boundary_angle - new_angle
else:
raise ValueError(f'Unexpected angle {boundary_angle}')
return boundary_angle, new_angle
def _get_start_and_end_speed(self, trajectory: TrajectoryBase) -> Tuple[float, float]:
"""
Get the speed at the start of the trajectory and the speed at the end of the trajectory.
:param trajectory: The trajectory `primitives.Line` object
:return: (initial speed, final speed)
"""
snell = trajectory.intersected_object
# todo: detect if start is in some other snell layer
if snell.is_in(trajectory.center.start):
s0 = snell.speed
s1 = self.default_ball_speed
else:
s0 = self.default_ball_speed
s1 = snell.speed
return s0, s1
def _interact_border(self, trajectory: TrajectoryBase) -> float:
reward = 0.
edge = trajectory.intersected_edge
if edge == self.top_edge or edge == self.bot_edge:
self._reflect(Point(1, -1), trajectory)
elif edge == self.left_edge:
reward = self.score('we')
elif edge == self.right_edge:
reward = self.score('they')
else:
raise ValueError(f'invalid edge, {edge}')
return reward
def _reflect(self, direction: Point, trajectory: TrajectoryBase):
"""
Multiplies the velocity of the ball by `direction`, continues the path of the ball by calculating the remaining
speed using trajectory and point.
:param direction: velocity multiplier
:param trajectory: The original trajectory of the ball
"""
self.ball.unit_velocity *= direction
return self._finish_step_ball(trajectory)
def _finish_step_ball(self, trajectory: TrajectoryBase):
"""
Finish the remainder of the trajectory after any interactions.
:param trajectory: The original trajectory
:return: reward
"""
point = trajectory.get_center_at_intersection()
self.ball.pos = point + self.ball.unit_velocity * EPSILON
return self._step_ball(trajectory.remaining_speed)
def _get_first_intersection(self, trajectory: TrajectoryBase):
"""
Find the first point at which the trajectory interacted with an object.
:param trajectory: the trajectory of the object
:return: (shape object interacted with, point of interaction, line object interacted with)
"""
for trajectory_line in trajectory.corners:
for o in self.sprites:
if not isinstance(o, Ball):
intersection_result = o.get_intersection(trajectory_line)
if intersection_result is not None:
edge, point = intersection_result
if trajectory.intersection is None:
trajectory.set_intersection(point, trajectory_line, o, edge)
elif point == trajectory.intersection and trajectory_line == trajectory.intersected_trajectory:
raise NotImplementedError("overlapping parallel edges not implemented")
elif (point.l2_distance(trajectory_line.start) <
trajectory.intersection.l2_distance(trajectory.intersected_trajectory.start)):
trajectory.set_intersection(point, trajectory_line, o, edge)
def _get_ball_speed(self) -> float:
if self.uniform_speed:
return self.default_ball_speed
else:
if self.ball.is_overlapping(self.snell):
return self.snell.speed
else:
return self.default_ball_speed
def _step_their_paddle(self):
"""
Move the opponents paddle. Override this in a subclass to change the behavior.
"""
if random.random() < self.their_update_probability:
if self.paddle_l.y < self.ball.y:
if self.paddle_l.top_bound < self.top_bound:
self.paddle_l.up()
else:
if self.paddle_l.bottom_bound > self.bottom_bound:
self.paddle_l.down()
|
normal
|
{
"blob_id": "42d2be7544d2afb9580841422ae35e1a5621df52",
"index": 6459,
"step-1": "<mask token>\n\n\nclass TrajectoryRectangle(TrajectoryBase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def top_left(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the top left corner of the rectangle\n \"\"\"\n start = Point(self.shape.left_bound, self.shape.top_bound)\n return Line(start, start + self.velocity)\n <mask token>\n <mask token>\n\n\nclass TrajectoryLine(TrajectoryRectangle):\n \"\"\"\n Create a bounding box around the line and compute the trajectory as if it were a rectangle.\n \"\"\"\n\n def __init__(self, shape: Line, velocity: Point):\n super(TrajectoryLine, self).__init__(shape, velocity)\n assert isinstance(shape, Line)\n self._reference = self.shape.start\n height = abs(self.shape.start.y - self.shape.end.y)\n width = abs(self.shape.start.x - self.shape.end.x)\n center = Point((self.shape.start.x + self.shape.end.x) / 2, (self.\n shape.start.y + self.shape.end.y) / 2)\n self.shape = Rectangle(height=height, width=width)\n self.shape.pos = center\n\n\nclass TrajectoryPoint(TrajectoryBase):\n\n def __init__(self, shape: Point, velocity: Point):\n super(TrajectoryPoint, self).__init__(shape, velocity)\n assert isinstance(shape, Point)\n self._reference = self.shape\n\n @property\n def corners(self) ->Tuple[Line, ...]:\n return self._trajectory,\n\n @property\n def _trajectory(self) ->Line:\n return Line(self.shape, self.shape + self.velocity)\n\n @property\n def center(self) ->Line:\n return self._trajectory\n\n @property\n def top_right(self) ->Line:\n return self._trajectory\n\n @property\n def top_left(self) ->Line:\n return self._trajectory\n\n @property\n def bottom_right(self) ->Line:\n return self._trajectory\n\n @property\n def bottom_left(self) ->Line:\n return self._trajectory\n\n\nclass Trajectory(object):\n\n def __new__(cls, shape: Shape, velocity: Point):\n if isinstance(shape, Point):\n return TrajectoryPoint(shape, velocity)\n elif isinstance(shape, Line):\n return TrajectoryLine(shape, velocity)\n elif isinstance(shape, Rectangle):\n return TrajectoryRectangle(shape, velocity)\n else:\n raise NotImplementedError(\n f'No implementation of Trajectory for input shape of type {type(shape)}'\n )\n\n\nclass Canvas(Rectangle):\n action_meanings = {(0): 'NOOP', (1): 'UP', (2): 'DOWN'}\n actions = {k: v for v, k in action_meanings.items()}\n\n def __init__(self, paddle_l: Paddle, paddle_r: Paddle, ball: Ball,\n snell: Snell, ball_speed: int, height: int, width: int,\n their_update_probability: float, refract: bool, uniform_speed: bool):\n super().__init__(height=height, width=width, visibility='none',\n render_value=0)\n self.pos = self.width / 2, self.height / 2\n assert isinstance(their_update_probability, (float, int)\n ), f'their_update_probability must be numeric, not {type(their_update_probability)}'\n assert 0 <= their_update_probability <= 1, f'{their_update_probability} outside allowed bounds [0, 1]'\n self.their_update_probability = their_update_probability\n self.default_ball_speed = ball_speed\n self.snell = snell\n self.ball = ball\n self.paddle_l = paddle_l\n self.paddle_r = paddle_r\n self.sprites = [self, snell, paddle_l, paddle_r, ball]\n self.uniform_speed = uniform_speed\n self.refract = refract\n self.we_scored = False\n self.they_scored = False\n self.our_score = 0\n self.their_score = 0\n\n def register_sprite(self, sprite: Shape):\n assert issubclass(type(sprite), Shape\n ), f'sprite must be subclassed from Shape'\n self.sprites.insert(-1, sprite)\n\n @property\n def left_bound(self):\n return 0\n\n @property\n def right_bound(self):\n return self.width\n\n @property\n def top_bound(self):\n return self.height\n\n @property\n def bottom_bound(self):\n return 0\n\n def to_numpy(self) ->Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Performs masked rendering of objects in `self.sprites`. Priority is determined by the ordering of the list,\n earlier objects will be obscured by later ones.\n\n :return: (state, rendering)\n \"\"\"\n state = self._zero_rgb_image(round(self.height), round(self.width))\n rendering = self._zero_rgb_image(round(self.height), round(self.width))\n for sprite in self.sprites[1:]:\n sprite_state, sprite_rendering = sprite.to_numpy(self.height,\n self.width)\n state[sprite_state != 0] = sprite_state[sprite_state != 0]\n rendering[sprite_rendering != 0] = sprite_rendering[\n sprite_rendering != 0]\n return state, rendering\n\n def score(self, who):\n \"\"\"\n Increment the score and reset the ball\n\n :param who: 'we' or 'they'\n :return: reward\n \"\"\"\n if who == 'they':\n reward = -1\n self.their_score += 1\n elif who == 'we':\n reward = 1\n self.our_score += 1\n else:\n raise ValueError(f\"who must be 'we' or 'they', not {who}\")\n self._reset_ball()\n return reward\n\n def step(self, action):\n self._move_our_paddle(action)\n self._step_their_paddle()\n reward = self._step_ball()\n self._step_snell()\n return reward\n\n def get_state_size(self) ->Tuple[int, int]:\n \"\"\"\n Return the tuple (height, width) of the canvas dimensions\n \"\"\"\n return self.height, self.width\n\n def _step_snell(self) ->None:\n \"\"\"\n Step the snell layer\n \"\"\"\n self.snell.step()\n\n def _reset_ball(self):\n self.ball.reset((self.width / 2, self.height / 2))\n\n def _move_our_paddle(self, action) ->None:\n \"\"\"\n Move our paddle according to the provided action\n\n :param action: the action code\n \"\"\"\n if not isinstance(action, int):\n action = action.item()\n assert action in [a for a in self.action_meanings.keys()\n ], f'{action} is not a valid action'\n if action == self.actions['UP']:\n if self.paddle_r.top_bound < self.top_bound:\n self.paddle_r.up()\n elif action == self.actions['DOWN']:\n if self.paddle_r.bottom_bound > self.bottom_bound:\n self.paddle_r.down()\n\n def _step_ball(self, speed: Union[float, int]=None):\n \"\"\"\n Move the ball to the next position according to the speed of the layer it is in.\n\n :param speed: used to continue the trajectory of a ball that interacted with an object\n \"\"\"\n trajectory = self._get_trajectory(speed)\n self._get_first_intersection(trajectory)\n reward = 0\n if trajectory.intersection is None:\n self.ball.pos = trajectory.center.end\n else:\n reward = self._interaction_dispatcher(trajectory)\n return reward\n\n def _get_trajectory(self, speed) ->TrajectoryBase:\n \"\"\"\n Get the ball's trajectory\n\n :param speed: The speed of the starting medium\n :return: trajectory `Line`\n \"\"\"\n if speed is None:\n speed = self._get_ball_speed()\n if self.ball.has_volume:\n trajectory = Trajectory(self.ball, self.ball.get_velocity(speed))\n else:\n trajectory = Trajectory(self.ball.pos, self.ball.get_velocity(\n speed))\n return trajectory\n\n def _interaction_dispatcher(self, trajectory: TrajectoryBase):\n \"\"\"\n Dispatch data to the appropriate method based on the interaction `obj`.\n\n :param trajectory: the trajectory of the ball\n \"\"\"\n reward = 0\n obj = trajectory.intersected_object\n if obj is self:\n reward = self._interact_border(trajectory)\n elif isinstance(obj, Paddle):\n self._interact_paddle(trajectory)\n elif isinstance(obj, Snell):\n self._refract(trajectory)\n return reward\n\n def _interact_paddle(self, trajectory: TrajectoryBase) ->float:\n paddle = trajectory.intersected_object\n paddle_fraction = paddle.get_fraction_of_paddle(trajectory.\n get_center_at_intersection())\n angle = paddle_fraction * paddle.max_angle\n angle = math.pi - angle if self.ball.unit_velocity.x > 0 else angle\n self.ball.angle = angle\n reward = self._finish_step_ball(trajectory)\n return reward\n\n def _refract(self, trajectory: TrajectoryBase):\n edge = trajectory.intersected_edge\n if self.refract:\n s0, s1 = self._get_start_and_end_speed(trajectory)\n angle = edge.angle_to_normal(trajectory.center)\n if self._exceeds_critical_angle(angle, s0, s1):\n self._reflect(Point(-1, 1), trajectory)\n return\n new_angle = math.asin(s1 / s0 * math.sin(angle))\n boundary_angle, new_angle = (self.\n _adjust_refraction_to_boundary_angle(edge, new_angle))\n new_angle = self._adjust_refraction_to_direction_of_incidence(\n boundary_angle, new_angle, trajectory)\n self.ball.angle = new_angle\n return self._finish_step_ball(trajectory)\n\n @staticmethod\n def _exceeds_critical_angle(angle: float, s0: float, s1: float) ->bool:\n \"\"\"\n Test if the angle exceeds the critical angle\n\n :param angle: The angle to the normal of the boundary\n :param s0: The speed of the original medium\n :param s1: The speed of the next medium\n :return: True if the angle exceeds the critical angle\n \"\"\"\n if s1 > s0:\n critical_angle = get_critical_angle(s0, s1)\n if abs(angle) >= critical_angle:\n return True\n return False\n\n @staticmethod\n def _adjust_refraction_to_direction_of_incidence(boundary_angle: float,\n new_angle: float, trajectory: TrajectoryBase) ->float:\n \"\"\"\n If the direction of incidence was from the right of the boundary, reflect `new_angle`, otherwise, return\n `new_angle` without modification.\n\n :param boundary_angle: must be in the first or fourth quadrant\n :param new_angle: The angle to be reflected in the return\n :param trajectory: The angle of the incoming ball in global coordinates\n :return: The (possibly) reflected `new_angle`\n \"\"\"\n angle = trajectory.center.angle\n assert -math.pi / 2 <= boundary_angle <= math.pi / 2, 'boundary_angle should be in first or fourth quadrant'\n if boundary_angle >= 0 and boundary_angle < angle % (2 * math.pi\n ) < boundary_angle + math.pi:\n new_angle = math.pi - new_angle\n elif boundary_angle < 0 and boundary_angle % (2 * math.pi\n ) + math.pi < angle % (2 * math.pi) < boundary_angle % (2 * math.pi\n ):\n new_angle = math.pi - new_angle\n return new_angle\n\n @staticmethod\n def _adjust_refraction_to_boundary_angle(boundary: Line, new_angle: float\n ) ->Tuple[float, float]:\n \"\"\"\n Compute the rotation of `new_angle` back to global coordinates. Assume incidence from the left side of the\n boundary.\n\n :param boundary: The boundary `primitives.Line` object\n :param new_angle: The refracted angle normal to the boundary\n :return: The new angle in global coordinates\n \"\"\"\n boundary_angle = boundary.angle % (2 * math.pi)\n if 0 <= boundary_angle < math.pi / 2:\n boundary_angle = boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif math.pi / 2 <= boundary_angle < math.pi:\n boundary_angle = math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle + new_angle\n elif math.pi <= boundary_angle < 3 * math.pi / 2:\n boundary_angle = math.pi - boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif 2 * math.pi / 3 <= boundary_angle < 2 * math.pi:\n boundary_angle = 2 * math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle - new_angle\n else:\n raise ValueError(f'Unexpected angle {boundary_angle}')\n return boundary_angle, new_angle\n\n def _get_start_and_end_speed(self, trajectory: TrajectoryBase) ->Tuple[\n float, float]:\n \"\"\"\n Get the speed at the start of the trajectory and the speed at the end of the trajectory.\n\n :param trajectory: The trajectory `primitives.Line` object\n :return: (initial speed, final speed)\n \"\"\"\n snell = trajectory.intersected_object\n if snell.is_in(trajectory.center.start):\n s0 = snell.speed\n s1 = self.default_ball_speed\n else:\n s0 = self.default_ball_speed\n s1 = snell.speed\n return s0, s1\n\n def _interact_border(self, trajectory: TrajectoryBase) ->float:\n reward = 0.0\n edge = trajectory.intersected_edge\n if edge == self.top_edge or edge == self.bot_edge:\n self._reflect(Point(1, -1), trajectory)\n elif edge == self.left_edge:\n reward = self.score('we')\n elif edge == self.right_edge:\n reward = self.score('they')\n else:\n raise ValueError(f'invalid edge, {edge}')\n return reward\n\n def _reflect(self, direction: Point, trajectory: TrajectoryBase):\n \"\"\"\n Multiplies the velocity of the ball by `direction`, continues the path of the ball by calculating the remaining\n speed using trajectory and point.\n\n :param direction: velocity multiplier\n :param trajectory: The original trajectory of the ball\n \"\"\"\n self.ball.unit_velocity *= direction\n return self._finish_step_ball(trajectory)\n\n def _finish_step_ball(self, trajectory: TrajectoryBase):\n \"\"\"\n Finish the remainder of the trajectory after any interactions.\n\n :param trajectory: The original trajectory\n :return: reward\n \"\"\"\n point = trajectory.get_center_at_intersection()\n self.ball.pos = point + self.ball.unit_velocity * EPSILON\n return self._step_ball(trajectory.remaining_speed)\n\n def _get_first_intersection(self, trajectory: TrajectoryBase):\n \"\"\"\n Find the first point at which the trajectory interacted with an object.\n\n :param trajectory: the trajectory of the object\n :return: (shape object interacted with, point of interaction, line object interacted with)\n \"\"\"\n for trajectory_line in trajectory.corners:\n for o in self.sprites:\n if not isinstance(o, Ball):\n intersection_result = o.get_intersection(trajectory_line)\n if intersection_result is not None:\n edge, point = intersection_result\n if trajectory.intersection is None:\n trajectory.set_intersection(point,\n trajectory_line, o, edge)\n elif point == trajectory.intersection and trajectory_line == trajectory.intersected_trajectory:\n raise NotImplementedError(\n 'overlapping parallel edges not implemented')\n elif point.l2_distance(trajectory_line.start\n ) < trajectory.intersection.l2_distance(trajectory\n .intersected_trajectory.start):\n trajectory.set_intersection(point,\n trajectory_line, o, edge)\n\n def _get_ball_speed(self) ->float:\n if self.uniform_speed:\n return self.default_ball_speed\n elif self.ball.is_overlapping(self.snell):\n return self.snell.speed\n else:\n return self.default_ball_speed\n\n def _step_their_paddle(self):\n \"\"\"\n Move the opponents paddle. Override this in a subclass to change the behavior.\n \"\"\"\n if random.random() < self.their_update_probability:\n if self.paddle_l.y < self.ball.y:\n if self.paddle_l.top_bound < self.top_bound:\n self.paddle_l.up()\n elif self.paddle_l.bottom_bound > self.bottom_bound:\n self.paddle_l.down()\n",
"step-2": "<mask token>\n\n\nclass TrajectoryRectangle(TrajectoryBase):\n <mask token>\n\n def __init__(self, shape: Rectangle, velocity: Point):\n super(TrajectoryRectangle, self).__init__(shape, velocity)\n assert isinstance(shape, Rectangle)\n self._reference = self.shape.pos\n\n @property\n def center(self) ->Line:\n \"\"\"\n Line representing the trajectory of the center of the rectangle\n \"\"\"\n return Line(self.shape.pos, self.shape.pos + self.velocity)\n\n @property\n def top_right(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the top right corner of the rectangle\n \"\"\"\n start = Point(self.shape.right_bound, self.shape.top_bound)\n return Line(start, start + self.velocity)\n\n @property\n def top_left(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the top left corner of the rectangle\n \"\"\"\n start = Point(self.shape.left_bound, self.shape.top_bound)\n return Line(start, start + self.velocity)\n\n @property\n def bottom_right(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the bottom right corner of the rectangle\n \"\"\"\n start = Point(self.shape.right_bound, self.shape.bottom_bound)\n return Line(start, start + self.velocity)\n\n @property\n def bottom_left(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the bottom left corner of the rectangle\n \"\"\"\n start = Point(self.shape.left_bound, self.shape.bottom_bound)\n return Line(start, start + self.velocity)\n\n\nclass TrajectoryLine(TrajectoryRectangle):\n \"\"\"\n Create a bounding box around the line and compute the trajectory as if it were a rectangle.\n \"\"\"\n\n def __init__(self, shape: Line, velocity: Point):\n super(TrajectoryLine, self).__init__(shape, velocity)\n assert isinstance(shape, Line)\n self._reference = self.shape.start\n height = abs(self.shape.start.y - self.shape.end.y)\n width = abs(self.shape.start.x - self.shape.end.x)\n center = Point((self.shape.start.x + self.shape.end.x) / 2, (self.\n shape.start.y + self.shape.end.y) / 2)\n self.shape = Rectangle(height=height, width=width)\n self.shape.pos = center\n\n\nclass TrajectoryPoint(TrajectoryBase):\n\n def __init__(self, shape: Point, velocity: Point):\n super(TrajectoryPoint, self).__init__(shape, velocity)\n assert isinstance(shape, Point)\n self._reference = self.shape\n\n @property\n def corners(self) ->Tuple[Line, ...]:\n return self._trajectory,\n\n @property\n def _trajectory(self) ->Line:\n return Line(self.shape, self.shape + self.velocity)\n\n @property\n def center(self) ->Line:\n return self._trajectory\n\n @property\n def top_right(self) ->Line:\n return self._trajectory\n\n @property\n def top_left(self) ->Line:\n return self._trajectory\n\n @property\n def bottom_right(self) ->Line:\n return self._trajectory\n\n @property\n def bottom_left(self) ->Line:\n return self._trajectory\n\n\nclass Trajectory(object):\n\n def __new__(cls, shape: Shape, velocity: Point):\n if isinstance(shape, Point):\n return TrajectoryPoint(shape, velocity)\n elif isinstance(shape, Line):\n return TrajectoryLine(shape, velocity)\n elif isinstance(shape, Rectangle):\n return TrajectoryRectangle(shape, velocity)\n else:\n raise NotImplementedError(\n f'No implementation of Trajectory for input shape of type {type(shape)}'\n )\n\n\nclass Canvas(Rectangle):\n action_meanings = {(0): 'NOOP', (1): 'UP', (2): 'DOWN'}\n actions = {k: v for v, k in action_meanings.items()}\n\n def __init__(self, paddle_l: Paddle, paddle_r: Paddle, ball: Ball,\n snell: Snell, ball_speed: int, height: int, width: int,\n their_update_probability: float, refract: bool, uniform_speed: bool):\n super().__init__(height=height, width=width, visibility='none',\n render_value=0)\n self.pos = self.width / 2, self.height / 2\n assert isinstance(their_update_probability, (float, int)\n ), f'their_update_probability must be numeric, not {type(their_update_probability)}'\n assert 0 <= their_update_probability <= 1, f'{their_update_probability} outside allowed bounds [0, 1]'\n self.their_update_probability = their_update_probability\n self.default_ball_speed = ball_speed\n self.snell = snell\n self.ball = ball\n self.paddle_l = paddle_l\n self.paddle_r = paddle_r\n self.sprites = [self, snell, paddle_l, paddle_r, ball]\n self.uniform_speed = uniform_speed\n self.refract = refract\n self.we_scored = False\n self.they_scored = False\n self.our_score = 0\n self.their_score = 0\n\n def register_sprite(self, sprite: Shape):\n assert issubclass(type(sprite), Shape\n ), f'sprite must be subclassed from Shape'\n self.sprites.insert(-1, sprite)\n\n @property\n def left_bound(self):\n return 0\n\n @property\n def right_bound(self):\n return self.width\n\n @property\n def top_bound(self):\n return self.height\n\n @property\n def bottom_bound(self):\n return 0\n\n def to_numpy(self) ->Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Performs masked rendering of objects in `self.sprites`. Priority is determined by the ordering of the list,\n earlier objects will be obscured by later ones.\n\n :return: (state, rendering)\n \"\"\"\n state = self._zero_rgb_image(round(self.height), round(self.width))\n rendering = self._zero_rgb_image(round(self.height), round(self.width))\n for sprite in self.sprites[1:]:\n sprite_state, sprite_rendering = sprite.to_numpy(self.height,\n self.width)\n state[sprite_state != 0] = sprite_state[sprite_state != 0]\n rendering[sprite_rendering != 0] = sprite_rendering[\n sprite_rendering != 0]\n return state, rendering\n\n def score(self, who):\n \"\"\"\n Increment the score and reset the ball\n\n :param who: 'we' or 'they'\n :return: reward\n \"\"\"\n if who == 'they':\n reward = -1\n self.their_score += 1\n elif who == 'we':\n reward = 1\n self.our_score += 1\n else:\n raise ValueError(f\"who must be 'we' or 'they', not {who}\")\n self._reset_ball()\n return reward\n\n def step(self, action):\n self._move_our_paddle(action)\n self._step_their_paddle()\n reward = self._step_ball()\n self._step_snell()\n return reward\n\n def get_state_size(self) ->Tuple[int, int]:\n \"\"\"\n Return the tuple (height, width) of the canvas dimensions\n \"\"\"\n return self.height, self.width\n\n def _step_snell(self) ->None:\n \"\"\"\n Step the snell layer\n \"\"\"\n self.snell.step()\n\n def _reset_ball(self):\n self.ball.reset((self.width / 2, self.height / 2))\n\n def _move_our_paddle(self, action) ->None:\n \"\"\"\n Move our paddle according to the provided action\n\n :param action: the action code\n \"\"\"\n if not isinstance(action, int):\n action = action.item()\n assert action in [a for a in self.action_meanings.keys()\n ], f'{action} is not a valid action'\n if action == self.actions['UP']:\n if self.paddle_r.top_bound < self.top_bound:\n self.paddle_r.up()\n elif action == self.actions['DOWN']:\n if self.paddle_r.bottom_bound > self.bottom_bound:\n self.paddle_r.down()\n\n def _step_ball(self, speed: Union[float, int]=None):\n \"\"\"\n Move the ball to the next position according to the speed of the layer it is in.\n\n :param speed: used to continue the trajectory of a ball that interacted with an object\n \"\"\"\n trajectory = self._get_trajectory(speed)\n self._get_first_intersection(trajectory)\n reward = 0\n if trajectory.intersection is None:\n self.ball.pos = trajectory.center.end\n else:\n reward = self._interaction_dispatcher(trajectory)\n return reward\n\n def _get_trajectory(self, speed) ->TrajectoryBase:\n \"\"\"\n Get the ball's trajectory\n\n :param speed: The speed of the starting medium\n :return: trajectory `Line`\n \"\"\"\n if speed is None:\n speed = self._get_ball_speed()\n if self.ball.has_volume:\n trajectory = Trajectory(self.ball, self.ball.get_velocity(speed))\n else:\n trajectory = Trajectory(self.ball.pos, self.ball.get_velocity(\n speed))\n return trajectory\n\n def _interaction_dispatcher(self, trajectory: TrajectoryBase):\n \"\"\"\n Dispatch data to the appropriate method based on the interaction `obj`.\n\n :param trajectory: the trajectory of the ball\n \"\"\"\n reward = 0\n obj = trajectory.intersected_object\n if obj is self:\n reward = self._interact_border(trajectory)\n elif isinstance(obj, Paddle):\n self._interact_paddle(trajectory)\n elif isinstance(obj, Snell):\n self._refract(trajectory)\n return reward\n\n def _interact_paddle(self, trajectory: TrajectoryBase) ->float:\n paddle = trajectory.intersected_object\n paddle_fraction = paddle.get_fraction_of_paddle(trajectory.\n get_center_at_intersection())\n angle = paddle_fraction * paddle.max_angle\n angle = math.pi - angle if self.ball.unit_velocity.x > 0 else angle\n self.ball.angle = angle\n reward = self._finish_step_ball(trajectory)\n return reward\n\n def _refract(self, trajectory: TrajectoryBase):\n edge = trajectory.intersected_edge\n if self.refract:\n s0, s1 = self._get_start_and_end_speed(trajectory)\n angle = edge.angle_to_normal(trajectory.center)\n if self._exceeds_critical_angle(angle, s0, s1):\n self._reflect(Point(-1, 1), trajectory)\n return\n new_angle = math.asin(s1 / s0 * math.sin(angle))\n boundary_angle, new_angle = (self.\n _adjust_refraction_to_boundary_angle(edge, new_angle))\n new_angle = self._adjust_refraction_to_direction_of_incidence(\n boundary_angle, new_angle, trajectory)\n self.ball.angle = new_angle\n return self._finish_step_ball(trajectory)\n\n @staticmethod\n def _exceeds_critical_angle(angle: float, s0: float, s1: float) ->bool:\n \"\"\"\n Test if the angle exceeds the critical angle\n\n :param angle: The angle to the normal of the boundary\n :param s0: The speed of the original medium\n :param s1: The speed of the next medium\n :return: True if the angle exceeds the critical angle\n \"\"\"\n if s1 > s0:\n critical_angle = get_critical_angle(s0, s1)\n if abs(angle) >= critical_angle:\n return True\n return False\n\n @staticmethod\n def _adjust_refraction_to_direction_of_incidence(boundary_angle: float,\n new_angle: float, trajectory: TrajectoryBase) ->float:\n \"\"\"\n If the direction of incidence was from the right of the boundary, reflect `new_angle`, otherwise, return\n `new_angle` without modification.\n\n :param boundary_angle: must be in the first or fourth quadrant\n :param new_angle: The angle to be reflected in the return\n :param trajectory: The angle of the incoming ball in global coordinates\n :return: The (possibly) reflected `new_angle`\n \"\"\"\n angle = trajectory.center.angle\n assert -math.pi / 2 <= boundary_angle <= math.pi / 2, 'boundary_angle should be in first or fourth quadrant'\n if boundary_angle >= 0 and boundary_angle < angle % (2 * math.pi\n ) < boundary_angle + math.pi:\n new_angle = math.pi - new_angle\n elif boundary_angle < 0 and boundary_angle % (2 * math.pi\n ) + math.pi < angle % (2 * math.pi) < boundary_angle % (2 * math.pi\n ):\n new_angle = math.pi - new_angle\n return new_angle\n\n @staticmethod\n def _adjust_refraction_to_boundary_angle(boundary: Line, new_angle: float\n ) ->Tuple[float, float]:\n \"\"\"\n Compute the rotation of `new_angle` back to global coordinates. Assume incidence from the left side of the\n boundary.\n\n :param boundary: The boundary `primitives.Line` object\n :param new_angle: The refracted angle normal to the boundary\n :return: The new angle in global coordinates\n \"\"\"\n boundary_angle = boundary.angle % (2 * math.pi)\n if 0 <= boundary_angle < math.pi / 2:\n boundary_angle = boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif math.pi / 2 <= boundary_angle < math.pi:\n boundary_angle = math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle + new_angle\n elif math.pi <= boundary_angle < 3 * math.pi / 2:\n boundary_angle = math.pi - boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif 2 * math.pi / 3 <= boundary_angle < 2 * math.pi:\n boundary_angle = 2 * math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle - new_angle\n else:\n raise ValueError(f'Unexpected angle {boundary_angle}')\n return boundary_angle, new_angle\n\n def _get_start_and_end_speed(self, trajectory: TrajectoryBase) ->Tuple[\n float, float]:\n \"\"\"\n Get the speed at the start of the trajectory and the speed at the end of the trajectory.\n\n :param trajectory: The trajectory `primitives.Line` object\n :return: (initial speed, final speed)\n \"\"\"\n snell = trajectory.intersected_object\n if snell.is_in(trajectory.center.start):\n s0 = snell.speed\n s1 = self.default_ball_speed\n else:\n s0 = self.default_ball_speed\n s1 = snell.speed\n return s0, s1\n\n def _interact_border(self, trajectory: TrajectoryBase) ->float:\n reward = 0.0\n edge = trajectory.intersected_edge\n if edge == self.top_edge or edge == self.bot_edge:\n self._reflect(Point(1, -1), trajectory)\n elif edge == self.left_edge:\n reward = self.score('we')\n elif edge == self.right_edge:\n reward = self.score('they')\n else:\n raise ValueError(f'invalid edge, {edge}')\n return reward\n\n def _reflect(self, direction: Point, trajectory: TrajectoryBase):\n \"\"\"\n Multiplies the velocity of the ball by `direction`, continues the path of the ball by calculating the remaining\n speed using trajectory and point.\n\n :param direction: velocity multiplier\n :param trajectory: The original trajectory of the ball\n \"\"\"\n self.ball.unit_velocity *= direction\n return self._finish_step_ball(trajectory)\n\n def _finish_step_ball(self, trajectory: TrajectoryBase):\n \"\"\"\n Finish the remainder of the trajectory after any interactions.\n\n :param trajectory: The original trajectory\n :return: reward\n \"\"\"\n point = trajectory.get_center_at_intersection()\n self.ball.pos = point + self.ball.unit_velocity * EPSILON\n return self._step_ball(trajectory.remaining_speed)\n\n def _get_first_intersection(self, trajectory: TrajectoryBase):\n \"\"\"\n Find the first point at which the trajectory interacted with an object.\n\n :param trajectory: the trajectory of the object\n :return: (shape object interacted with, point of interaction, line object interacted with)\n \"\"\"\n for trajectory_line in trajectory.corners:\n for o in self.sprites:\n if not isinstance(o, Ball):\n intersection_result = o.get_intersection(trajectory_line)\n if intersection_result is not None:\n edge, point = intersection_result\n if trajectory.intersection is None:\n trajectory.set_intersection(point,\n trajectory_line, o, edge)\n elif point == trajectory.intersection and trajectory_line == trajectory.intersected_trajectory:\n raise NotImplementedError(\n 'overlapping parallel edges not implemented')\n elif point.l2_distance(trajectory_line.start\n ) < trajectory.intersection.l2_distance(trajectory\n .intersected_trajectory.start):\n trajectory.set_intersection(point,\n trajectory_line, o, edge)\n\n def _get_ball_speed(self) ->float:\n if self.uniform_speed:\n return self.default_ball_speed\n elif self.ball.is_overlapping(self.snell):\n return self.snell.speed\n else:\n return self.default_ball_speed\n\n def _step_their_paddle(self):\n \"\"\"\n Move the opponents paddle. Override this in a subclass to change the behavior.\n \"\"\"\n if random.random() < self.their_update_probability:\n if self.paddle_l.y < self.ball.y:\n if self.paddle_l.top_bound < self.top_bound:\n self.paddle_l.up()\n elif self.paddle_l.bottom_bound > self.bottom_bound:\n self.paddle_l.down()\n",
"step-3": "<mask token>\n\n\nclass Paddle(Rectangle):\n <mask token>\n\n def up(self):\n self.y += self.speed\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Ball(Rectangle):\n\n def __init__(self, size: float, max_initial_angle: float, visibility:\n str, has_volume: bool=False):\n \"\"\"\n Ball object\n\n :param has_volume:\n :param size: The size to render the ball\n :param max_initial_angle: The maximum angle the ball can start with\n :param visibility: How to render the ball. See `Shape.visibility`\n :param has_volume: determines whether the ball interacts as a point or as an area\n \"\"\"\n super().__init__(width=size, height=size, visibility=visibility,\n render_value=255)\n self.max_initial_angle = max_initial_angle\n self.reset(self.pos, direction='left')\n self.has_volume = has_volume\n\n def reset(self, position: Union[Tuple[float, float], Point], direction:\n str='right'):\n if direction == 'right':\n self._angle = (2 * random.random() - 1) * self.max_initial_angle\n elif direction == 'left':\n self._angle = math.pi - (2 * random.random() - 1\n ) * self.max_initial_angle\n else:\n raise ValueError(\n f\"direction must be 'left' or 'right', not {direction}\")\n self.pos = position\n\n @property\n def angle(self):\n \"\"\"\n Angle with respect to the right horizontal\n \"\"\"\n return self._angle\n\n @angle.setter\n def angle(self, value):\n self._angle = value % (2 * math.pi)\n\n @property\n def unit_velocity(self) ->Point:\n x = math.cos(self.angle)\n y = math.sin(self.angle)\n return Point(x, y)\n\n @unit_velocity.setter\n def unit_velocity(self, value: Union[Tuple[float, float], Point]):\n \"\"\"\n Sets the angle parameter give a set of (x, y) coordinates.\n\n :param value: (x, y)\n \"\"\"\n if isinstance(value, tuple):\n value = Point(*value)\n assert isinstance(value, Point\n ), f'value must be a point, not {type(value)}'\n self.angle = value.angle\n\n def get_velocity(self, speed: Union[float, int]):\n return self.unit_velocity * speed\n\n\nclass Snell(Rectangle):\n\n def __init__(self, width, height, speed, change_rate, visibility):\n \"\"\"\n Rectangular area with a different ball speed.\n\n :param width: The width of the layer\n :param height: The height of the layer\n :param change_rate: Rate at which the ball speed changes, the standard deviation of the change on each step.\n :param visibility: Whether and how to render the layer. See `Shape.visibility`\n \"\"\"\n assert change_rate >= 0, 'Snell `change_rate` must be non-negative'\n super().__init__(width=width, height=height, visibility=visibility,\n render_value=(235, 76, 52))\n self.speed = speed\n self._initial_speed = speed\n self.change_rate = change_rate\n\n def step(self):\n \"\"\"\n Step the Snell speed using a bounded Gaussian random walk.\n\n - step with mean 0, standard deviation `self.speed`\n - Clip the speed at `0.5 * self._initial_speed <= self.speed <= 2.0 * self._initial_speed`\n \"\"\"\n if self.change_rate != 0:\n self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()\n if self.speed < 0.5 * self._initial_speed:\n self.speed = 0.5 * self._initial_speed\n if self.speed > 2.0 * self._initial_speed:\n self.speed = 2.0 * self._initial_speed\n else:\n pass\n\n\nclass TrajectoryBase(abc.ABC):\n\n def __init__(self, shape: Union[Point, Line, Rectangle], velocity: Point):\n self.shape = shape\n self.velocity = velocity\n self._reference = None\n self.intersection = None\n self.intersected_trajectory = None\n self.intersected_object = None\n self.intersected_edge = None\n self.remaining_speed = None\n\n def set_intersection(self, point: Point, trajectory_line: Line, obj:\n Shape, edge: Line):\n assert isinstance(obj, Shape), f'type Shape expected, not {type(obj)}'\n assert isinstance(point, Point\n ), f'type Point expected, not {type(point)}'\n assert isinstance(edge, Line), f'type Line expected, not {type(edge)}'\n self.intersection = point\n self.intersected_trajectory = trajectory_line\n self.remaining_speed = point.l2_distance(trajectory_line.end)\n self.intersected_object = obj\n self.intersected_edge = edge\n\n def get_center_at_intersection(self) ->Point:\n \"\"\"\n Get the new center of `self.shape` given that it moved along `intersected_trajectory` to `intersection`\n\n :return: new center point\n \"\"\"\n return self._reference + (self.intersection - self.\n intersected_trajectory.start)\n\n @property\n def corners(self) ->Tuple[Line, ...]:\n return (self.top_left, self.top_right, self.bottom_right, self.\n bottom_left)\n\n @property\n @abc.abstractmethod\n def center(self) ->Line:\n ...\n\n @property\n @abc.abstractmethod\n def top_right(self) ->Line:\n ...\n\n @property\n @abc.abstractmethod\n def top_left(self) ->Line:\n ...\n\n @property\n @abc.abstractmethod\n def bottom_right(self) ->Line:\n ...\n\n @property\n @abc.abstractmethod\n def bottom_left(self) ->Line:\n ...\n\n\nclass TrajectoryRectangle(TrajectoryBase):\n \"\"\"\n Compute the trajectory of each corner of the rectangle\n \"\"\"\n\n def __init__(self, shape: Rectangle, velocity: Point):\n super(TrajectoryRectangle, self).__init__(shape, velocity)\n assert isinstance(shape, Rectangle)\n self._reference = self.shape.pos\n\n @property\n def center(self) ->Line:\n \"\"\"\n Line representing the trajectory of the center of the rectangle\n \"\"\"\n return Line(self.shape.pos, self.shape.pos + self.velocity)\n\n @property\n def top_right(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the top right corner of the rectangle\n \"\"\"\n start = Point(self.shape.right_bound, self.shape.top_bound)\n return Line(start, start + self.velocity)\n\n @property\n def top_left(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the top left corner of the rectangle\n \"\"\"\n start = Point(self.shape.left_bound, self.shape.top_bound)\n return Line(start, start + self.velocity)\n\n @property\n def bottom_right(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the bottom right corner of the rectangle\n \"\"\"\n start = Point(self.shape.right_bound, self.shape.bottom_bound)\n return Line(start, start + self.velocity)\n\n @property\n def bottom_left(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the bottom left corner of the rectangle\n \"\"\"\n start = Point(self.shape.left_bound, self.shape.bottom_bound)\n return Line(start, start + self.velocity)\n\n\nclass TrajectoryLine(TrajectoryRectangle):\n \"\"\"\n Create a bounding box around the line and compute the trajectory as if it were a rectangle.\n \"\"\"\n\n def __init__(self, shape: Line, velocity: Point):\n super(TrajectoryLine, self).__init__(shape, velocity)\n assert isinstance(shape, Line)\n self._reference = self.shape.start\n height = abs(self.shape.start.y - self.shape.end.y)\n width = abs(self.shape.start.x - self.shape.end.x)\n center = Point((self.shape.start.x + self.shape.end.x) / 2, (self.\n shape.start.y + self.shape.end.y) / 2)\n self.shape = Rectangle(height=height, width=width)\n self.shape.pos = center\n\n\nclass TrajectoryPoint(TrajectoryBase):\n\n def __init__(self, shape: Point, velocity: Point):\n super(TrajectoryPoint, self).__init__(shape, velocity)\n assert isinstance(shape, Point)\n self._reference = self.shape\n\n @property\n def corners(self) ->Tuple[Line, ...]:\n return self._trajectory,\n\n @property\n def _trajectory(self) ->Line:\n return Line(self.shape, self.shape + self.velocity)\n\n @property\n def center(self) ->Line:\n return self._trajectory\n\n @property\n def top_right(self) ->Line:\n return self._trajectory\n\n @property\n def top_left(self) ->Line:\n return self._trajectory\n\n @property\n def bottom_right(self) ->Line:\n return self._trajectory\n\n @property\n def bottom_left(self) ->Line:\n return self._trajectory\n\n\nclass Trajectory(object):\n\n def __new__(cls, shape: Shape, velocity: Point):\n if isinstance(shape, Point):\n return TrajectoryPoint(shape, velocity)\n elif isinstance(shape, Line):\n return TrajectoryLine(shape, velocity)\n elif isinstance(shape, Rectangle):\n return TrajectoryRectangle(shape, velocity)\n else:\n raise NotImplementedError(\n f'No implementation of Trajectory for input shape of type {type(shape)}'\n )\n\n\nclass Canvas(Rectangle):\n action_meanings = {(0): 'NOOP', (1): 'UP', (2): 'DOWN'}\n actions = {k: v for v, k in action_meanings.items()}\n\n def __init__(self, paddle_l: Paddle, paddle_r: Paddle, ball: Ball,\n snell: Snell, ball_speed: int, height: int, width: int,\n their_update_probability: float, refract: bool, uniform_speed: bool):\n super().__init__(height=height, width=width, visibility='none',\n render_value=0)\n self.pos = self.width / 2, self.height / 2\n assert isinstance(their_update_probability, (float, int)\n ), f'their_update_probability must be numeric, not {type(their_update_probability)}'\n assert 0 <= their_update_probability <= 1, f'{their_update_probability} outside allowed bounds [0, 1]'\n self.their_update_probability = their_update_probability\n self.default_ball_speed = ball_speed\n self.snell = snell\n self.ball = ball\n self.paddle_l = paddle_l\n self.paddle_r = paddle_r\n self.sprites = [self, snell, paddle_l, paddle_r, ball]\n self.uniform_speed = uniform_speed\n self.refract = refract\n self.we_scored = False\n self.they_scored = False\n self.our_score = 0\n self.their_score = 0\n\n def register_sprite(self, sprite: Shape):\n assert issubclass(type(sprite), Shape\n ), f'sprite must be subclassed from Shape'\n self.sprites.insert(-1, sprite)\n\n @property\n def left_bound(self):\n return 0\n\n @property\n def right_bound(self):\n return self.width\n\n @property\n def top_bound(self):\n return self.height\n\n @property\n def bottom_bound(self):\n return 0\n\n def to_numpy(self) ->Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Performs masked rendering of objects in `self.sprites`. Priority is determined by the ordering of the list,\n earlier objects will be obscured by later ones.\n\n :return: (state, rendering)\n \"\"\"\n state = self._zero_rgb_image(round(self.height), round(self.width))\n rendering = self._zero_rgb_image(round(self.height), round(self.width))\n for sprite in self.sprites[1:]:\n sprite_state, sprite_rendering = sprite.to_numpy(self.height,\n self.width)\n state[sprite_state != 0] = sprite_state[sprite_state != 0]\n rendering[sprite_rendering != 0] = sprite_rendering[\n sprite_rendering != 0]\n return state, rendering\n\n def score(self, who):\n \"\"\"\n Increment the score and reset the ball\n\n :param who: 'we' or 'they'\n :return: reward\n \"\"\"\n if who == 'they':\n reward = -1\n self.their_score += 1\n elif who == 'we':\n reward = 1\n self.our_score += 1\n else:\n raise ValueError(f\"who must be 'we' or 'they', not {who}\")\n self._reset_ball()\n return reward\n\n def step(self, action):\n self._move_our_paddle(action)\n self._step_their_paddle()\n reward = self._step_ball()\n self._step_snell()\n return reward\n\n def get_state_size(self) ->Tuple[int, int]:\n \"\"\"\n Return the tuple (height, width) of the canvas dimensions\n \"\"\"\n return self.height, self.width\n\n def _step_snell(self) ->None:\n \"\"\"\n Step the snell layer\n \"\"\"\n self.snell.step()\n\n def _reset_ball(self):\n self.ball.reset((self.width / 2, self.height / 2))\n\n def _move_our_paddle(self, action) ->None:\n \"\"\"\n Move our paddle according to the provided action\n\n :param action: the action code\n \"\"\"\n if not isinstance(action, int):\n action = action.item()\n assert action in [a for a in self.action_meanings.keys()\n ], f'{action} is not a valid action'\n if action == self.actions['UP']:\n if self.paddle_r.top_bound < self.top_bound:\n self.paddle_r.up()\n elif action == self.actions['DOWN']:\n if self.paddle_r.bottom_bound > self.bottom_bound:\n self.paddle_r.down()\n\n def _step_ball(self, speed: Union[float, int]=None):\n \"\"\"\n Move the ball to the next position according to the speed of the layer it is in.\n\n :param speed: used to continue the trajectory of a ball that interacted with an object\n \"\"\"\n trajectory = self._get_trajectory(speed)\n self._get_first_intersection(trajectory)\n reward = 0\n if trajectory.intersection is None:\n self.ball.pos = trajectory.center.end\n else:\n reward = self._interaction_dispatcher(trajectory)\n return reward\n\n def _get_trajectory(self, speed) ->TrajectoryBase:\n \"\"\"\n Get the ball's trajectory\n\n :param speed: The speed of the starting medium\n :return: trajectory `Line`\n \"\"\"\n if speed is None:\n speed = self._get_ball_speed()\n if self.ball.has_volume:\n trajectory = Trajectory(self.ball, self.ball.get_velocity(speed))\n else:\n trajectory = Trajectory(self.ball.pos, self.ball.get_velocity(\n speed))\n return trajectory\n\n def _interaction_dispatcher(self, trajectory: TrajectoryBase):\n \"\"\"\n Dispatch data to the appropriate method based on the interaction `obj`.\n\n :param trajectory: the trajectory of the ball\n \"\"\"\n reward = 0\n obj = trajectory.intersected_object\n if obj is self:\n reward = self._interact_border(trajectory)\n elif isinstance(obj, Paddle):\n self._interact_paddle(trajectory)\n elif isinstance(obj, Snell):\n self._refract(trajectory)\n return reward\n\n def _interact_paddle(self, trajectory: TrajectoryBase) ->float:\n paddle = trajectory.intersected_object\n paddle_fraction = paddle.get_fraction_of_paddle(trajectory.\n get_center_at_intersection())\n angle = paddle_fraction * paddle.max_angle\n angle = math.pi - angle if self.ball.unit_velocity.x > 0 else angle\n self.ball.angle = angle\n reward = self._finish_step_ball(trajectory)\n return reward\n\n def _refract(self, trajectory: TrajectoryBase):\n edge = trajectory.intersected_edge\n if self.refract:\n s0, s1 = self._get_start_and_end_speed(trajectory)\n angle = edge.angle_to_normal(trajectory.center)\n if self._exceeds_critical_angle(angle, s0, s1):\n self._reflect(Point(-1, 1), trajectory)\n return\n new_angle = math.asin(s1 / s0 * math.sin(angle))\n boundary_angle, new_angle = (self.\n _adjust_refraction_to_boundary_angle(edge, new_angle))\n new_angle = self._adjust_refraction_to_direction_of_incidence(\n boundary_angle, new_angle, trajectory)\n self.ball.angle = new_angle\n return self._finish_step_ball(trajectory)\n\n @staticmethod\n def _exceeds_critical_angle(angle: float, s0: float, s1: float) ->bool:\n \"\"\"\n Test if the angle exceeds the critical angle\n\n :param angle: The angle to the normal of the boundary\n :param s0: The speed of the original medium\n :param s1: The speed of the next medium\n :return: True if the angle exceeds the critical angle\n \"\"\"\n if s1 > s0:\n critical_angle = get_critical_angle(s0, s1)\n if abs(angle) >= critical_angle:\n return True\n return False\n\n @staticmethod\n def _adjust_refraction_to_direction_of_incidence(boundary_angle: float,\n new_angle: float, trajectory: TrajectoryBase) ->float:\n \"\"\"\n If the direction of incidence was from the right of the boundary, reflect `new_angle`, otherwise, return\n `new_angle` without modification.\n\n :param boundary_angle: must be in the first or fourth quadrant\n :param new_angle: The angle to be reflected in the return\n :param trajectory: The angle of the incoming ball in global coordinates\n :return: The (possibly) reflected `new_angle`\n \"\"\"\n angle = trajectory.center.angle\n assert -math.pi / 2 <= boundary_angle <= math.pi / 2, 'boundary_angle should be in first or fourth quadrant'\n if boundary_angle >= 0 and boundary_angle < angle % (2 * math.pi\n ) < boundary_angle + math.pi:\n new_angle = math.pi - new_angle\n elif boundary_angle < 0 and boundary_angle % (2 * math.pi\n ) + math.pi < angle % (2 * math.pi) < boundary_angle % (2 * math.pi\n ):\n new_angle = math.pi - new_angle\n return new_angle\n\n @staticmethod\n def _adjust_refraction_to_boundary_angle(boundary: Line, new_angle: float\n ) ->Tuple[float, float]:\n \"\"\"\n Compute the rotation of `new_angle` back to global coordinates. Assume incidence from the left side of the\n boundary.\n\n :param boundary: The boundary `primitives.Line` object\n :param new_angle: The refracted angle normal to the boundary\n :return: The new angle in global coordinates\n \"\"\"\n boundary_angle = boundary.angle % (2 * math.pi)\n if 0 <= boundary_angle < math.pi / 2:\n boundary_angle = boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif math.pi / 2 <= boundary_angle < math.pi:\n boundary_angle = math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle + new_angle\n elif math.pi <= boundary_angle < 3 * math.pi / 2:\n boundary_angle = math.pi - boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif 2 * math.pi / 3 <= boundary_angle < 2 * math.pi:\n boundary_angle = 2 * math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle - new_angle\n else:\n raise ValueError(f'Unexpected angle {boundary_angle}')\n return boundary_angle, new_angle\n\n def _get_start_and_end_speed(self, trajectory: TrajectoryBase) ->Tuple[\n float, float]:\n \"\"\"\n Get the speed at the start of the trajectory and the speed at the end of the trajectory.\n\n :param trajectory: The trajectory `primitives.Line` object\n :return: (initial speed, final speed)\n \"\"\"\n snell = trajectory.intersected_object\n if snell.is_in(trajectory.center.start):\n s0 = snell.speed\n s1 = self.default_ball_speed\n else:\n s0 = self.default_ball_speed\n s1 = snell.speed\n return s0, s1\n\n def _interact_border(self, trajectory: TrajectoryBase) ->float:\n reward = 0.0\n edge = trajectory.intersected_edge\n if edge == self.top_edge or edge == self.bot_edge:\n self._reflect(Point(1, -1), trajectory)\n elif edge == self.left_edge:\n reward = self.score('we')\n elif edge == self.right_edge:\n reward = self.score('they')\n else:\n raise ValueError(f'invalid edge, {edge}')\n return reward\n\n def _reflect(self, direction: Point, trajectory: TrajectoryBase):\n \"\"\"\n Multiplies the velocity of the ball by `direction`, continues the path of the ball by calculating the remaining\n speed using trajectory and point.\n\n :param direction: velocity multiplier\n :param trajectory: The original trajectory of the ball\n \"\"\"\n self.ball.unit_velocity *= direction\n return self._finish_step_ball(trajectory)\n\n def _finish_step_ball(self, trajectory: TrajectoryBase):\n \"\"\"\n Finish the remainder of the trajectory after any interactions.\n\n :param trajectory: The original trajectory\n :return: reward\n \"\"\"\n point = trajectory.get_center_at_intersection()\n self.ball.pos = point + self.ball.unit_velocity * EPSILON\n return self._step_ball(trajectory.remaining_speed)\n\n def _get_first_intersection(self, trajectory: TrajectoryBase):\n \"\"\"\n Find the first point at which the trajectory interacted with an object.\n\n :param trajectory: the trajectory of the object\n :return: (shape object interacted with, point of interaction, line object interacted with)\n \"\"\"\n for trajectory_line in trajectory.corners:\n for o in self.sprites:\n if not isinstance(o, Ball):\n intersection_result = o.get_intersection(trajectory_line)\n if intersection_result is not None:\n edge, point = intersection_result\n if trajectory.intersection is None:\n trajectory.set_intersection(point,\n trajectory_line, o, edge)\n elif point == trajectory.intersection and trajectory_line == trajectory.intersected_trajectory:\n raise NotImplementedError(\n 'overlapping parallel edges not implemented')\n elif point.l2_distance(trajectory_line.start\n ) < trajectory.intersection.l2_distance(trajectory\n .intersected_trajectory.start):\n trajectory.set_intersection(point,\n trajectory_line, o, edge)\n\n def _get_ball_speed(self) ->float:\n if self.uniform_speed:\n return self.default_ball_speed\n elif self.ball.is_overlapping(self.snell):\n return self.snell.speed\n else:\n return self.default_ball_speed\n\n def _step_their_paddle(self):\n \"\"\"\n Move the opponents paddle. Override this in a subclass to change the behavior.\n \"\"\"\n if random.random() < self.their_update_probability:\n if self.paddle_l.y < self.ball.y:\n if self.paddle_l.top_bound < self.top_bound:\n self.paddle_l.up()\n elif self.paddle_l.bottom_bound > self.bottom_bound:\n self.paddle_l.down()\n",
"step-4": "<mask token>\n\n\nclass Paddle(Rectangle):\n <mask token>\n\n def up(self):\n self.y += self.speed\n <mask token>\n\n def _get_edges(self) ->Tuple[Line]:\n \"\"\"\n Only return the field-side edge\n \"\"\"\n if self.side == 'right':\n return Line((self.left_bound, self.bottom_bound), (self.\n left_bound, self.top_bound)),\n elif self.side == 'left':\n return Line((self.right_bound, self.bottom_bound), (self.\n right_bound, self.top_bound)),\n <mask token>\n\n\nclass Ball(Rectangle):\n\n def __init__(self, size: float, max_initial_angle: float, visibility:\n str, has_volume: bool=False):\n \"\"\"\n Ball object\n\n :param has_volume:\n :param size: The size to render the ball\n :param max_initial_angle: The maximum angle the ball can start with\n :param visibility: How to render the ball. See `Shape.visibility`\n :param has_volume: determines whether the ball interacts as a point or as an area\n \"\"\"\n super().__init__(width=size, height=size, visibility=visibility,\n render_value=255)\n self.max_initial_angle = max_initial_angle\n self.reset(self.pos, direction='left')\n self.has_volume = has_volume\n\n def reset(self, position: Union[Tuple[float, float], Point], direction:\n str='right'):\n if direction == 'right':\n self._angle = (2 * random.random() - 1) * self.max_initial_angle\n elif direction == 'left':\n self._angle = math.pi - (2 * random.random() - 1\n ) * self.max_initial_angle\n else:\n raise ValueError(\n f\"direction must be 'left' or 'right', not {direction}\")\n self.pos = position\n\n @property\n def angle(self):\n \"\"\"\n Angle with respect to the right horizontal\n \"\"\"\n return self._angle\n\n @angle.setter\n def angle(self, value):\n self._angle = value % (2 * math.pi)\n\n @property\n def unit_velocity(self) ->Point:\n x = math.cos(self.angle)\n y = math.sin(self.angle)\n return Point(x, y)\n\n @unit_velocity.setter\n def unit_velocity(self, value: Union[Tuple[float, float], Point]):\n \"\"\"\n Sets the angle parameter give a set of (x, y) coordinates.\n\n :param value: (x, y)\n \"\"\"\n if isinstance(value, tuple):\n value = Point(*value)\n assert isinstance(value, Point\n ), f'value must be a point, not {type(value)}'\n self.angle = value.angle\n\n def get_velocity(self, speed: Union[float, int]):\n return self.unit_velocity * speed\n\n\nclass Snell(Rectangle):\n\n def __init__(self, width, height, speed, change_rate, visibility):\n \"\"\"\n Rectangular area with a different ball speed.\n\n :param width: The width of the layer\n :param height: The height of the layer\n :param change_rate: Rate at which the ball speed changes, the standard deviation of the change on each step.\n :param visibility: Whether and how to render the layer. See `Shape.visibility`\n \"\"\"\n assert change_rate >= 0, 'Snell `change_rate` must be non-negative'\n super().__init__(width=width, height=height, visibility=visibility,\n render_value=(235, 76, 52))\n self.speed = speed\n self._initial_speed = speed\n self.change_rate = change_rate\n\n def step(self):\n \"\"\"\n Step the Snell speed using a bounded Gaussian random walk.\n\n - step with mean 0, standard deviation `self.speed`\n - Clip the speed at `0.5 * self._initial_speed <= self.speed <= 2.0 * self._initial_speed`\n \"\"\"\n if self.change_rate != 0:\n self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()\n if self.speed < 0.5 * self._initial_speed:\n self.speed = 0.5 * self._initial_speed\n if self.speed > 2.0 * self._initial_speed:\n self.speed = 2.0 * self._initial_speed\n else:\n pass\n\n\nclass TrajectoryBase(abc.ABC):\n\n def __init__(self, shape: Union[Point, Line, Rectangle], velocity: Point):\n self.shape = shape\n self.velocity = velocity\n self._reference = None\n self.intersection = None\n self.intersected_trajectory = None\n self.intersected_object = None\n self.intersected_edge = None\n self.remaining_speed = None\n\n def set_intersection(self, point: Point, trajectory_line: Line, obj:\n Shape, edge: Line):\n assert isinstance(obj, Shape), f'type Shape expected, not {type(obj)}'\n assert isinstance(point, Point\n ), f'type Point expected, not {type(point)}'\n assert isinstance(edge, Line), f'type Line expected, not {type(edge)}'\n self.intersection = point\n self.intersected_trajectory = trajectory_line\n self.remaining_speed = point.l2_distance(trajectory_line.end)\n self.intersected_object = obj\n self.intersected_edge = edge\n\n def get_center_at_intersection(self) ->Point:\n \"\"\"\n Get the new center of `self.shape` given that it moved along `intersected_trajectory` to `intersection`\n\n :return: new center point\n \"\"\"\n return self._reference + (self.intersection - self.\n intersected_trajectory.start)\n\n @property\n def corners(self) ->Tuple[Line, ...]:\n return (self.top_left, self.top_right, self.bottom_right, self.\n bottom_left)\n\n @property\n @abc.abstractmethod\n def center(self) ->Line:\n ...\n\n @property\n @abc.abstractmethod\n def top_right(self) ->Line:\n ...\n\n @property\n @abc.abstractmethod\n def top_left(self) ->Line:\n ...\n\n @property\n @abc.abstractmethod\n def bottom_right(self) ->Line:\n ...\n\n @property\n @abc.abstractmethod\n def bottom_left(self) ->Line:\n ...\n\n\nclass TrajectoryRectangle(TrajectoryBase):\n \"\"\"\n Compute the trajectory of each corner of the rectangle\n \"\"\"\n\n def __init__(self, shape: Rectangle, velocity: Point):\n super(TrajectoryRectangle, self).__init__(shape, velocity)\n assert isinstance(shape, Rectangle)\n self._reference = self.shape.pos\n\n @property\n def center(self) ->Line:\n \"\"\"\n Line representing the trajectory of the center of the rectangle\n \"\"\"\n return Line(self.shape.pos, self.shape.pos + self.velocity)\n\n @property\n def top_right(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the top right corner of the rectangle\n \"\"\"\n start = Point(self.shape.right_bound, self.shape.top_bound)\n return Line(start, start + self.velocity)\n\n @property\n def top_left(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the top left corner of the rectangle\n \"\"\"\n start = Point(self.shape.left_bound, self.shape.top_bound)\n return Line(start, start + self.velocity)\n\n @property\n def bottom_right(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the bottom right corner of the rectangle\n \"\"\"\n start = Point(self.shape.right_bound, self.shape.bottom_bound)\n return Line(start, start + self.velocity)\n\n @property\n def bottom_left(self) ->Line:\n \"\"\"\n Line representing the trajectory of the point on the bottom left corner of the rectangle\n \"\"\"\n start = Point(self.shape.left_bound, self.shape.bottom_bound)\n return Line(start, start + self.velocity)\n\n\nclass TrajectoryLine(TrajectoryRectangle):\n \"\"\"\n Create a bounding box around the line and compute the trajectory as if it were a rectangle.\n \"\"\"\n\n def __init__(self, shape: Line, velocity: Point):\n super(TrajectoryLine, self).__init__(shape, velocity)\n assert isinstance(shape, Line)\n self._reference = self.shape.start\n height = abs(self.shape.start.y - self.shape.end.y)\n width = abs(self.shape.start.x - self.shape.end.x)\n center = Point((self.shape.start.x + self.shape.end.x) / 2, (self.\n shape.start.y + self.shape.end.y) / 2)\n self.shape = Rectangle(height=height, width=width)\n self.shape.pos = center\n\n\nclass TrajectoryPoint(TrajectoryBase):\n\n def __init__(self, shape: Point, velocity: Point):\n super(TrajectoryPoint, self).__init__(shape, velocity)\n assert isinstance(shape, Point)\n self._reference = self.shape\n\n @property\n def corners(self) ->Tuple[Line, ...]:\n return self._trajectory,\n\n @property\n def _trajectory(self) ->Line:\n return Line(self.shape, self.shape + self.velocity)\n\n @property\n def center(self) ->Line:\n return self._trajectory\n\n @property\n def top_right(self) ->Line:\n return self._trajectory\n\n @property\n def top_left(self) ->Line:\n return self._trajectory\n\n @property\n def bottom_right(self) ->Line:\n return self._trajectory\n\n @property\n def bottom_left(self) ->Line:\n return self._trajectory\n\n\nclass Trajectory(object):\n\n def __new__(cls, shape: Shape, velocity: Point):\n if isinstance(shape, Point):\n return TrajectoryPoint(shape, velocity)\n elif isinstance(shape, Line):\n return TrajectoryLine(shape, velocity)\n elif isinstance(shape, Rectangle):\n return TrajectoryRectangle(shape, velocity)\n else:\n raise NotImplementedError(\n f'No implementation of Trajectory for input shape of type {type(shape)}'\n )\n\n\nclass Canvas(Rectangle):\n action_meanings = {(0): 'NOOP', (1): 'UP', (2): 'DOWN'}\n actions = {k: v for v, k in action_meanings.items()}\n\n def __init__(self, paddle_l: Paddle, paddle_r: Paddle, ball: Ball,\n snell: Snell, ball_speed: int, height: int, width: int,\n their_update_probability: float, refract: bool, uniform_speed: bool):\n super().__init__(height=height, width=width, visibility='none',\n render_value=0)\n self.pos = self.width / 2, self.height / 2\n assert isinstance(their_update_probability, (float, int)\n ), f'their_update_probability must be numeric, not {type(their_update_probability)}'\n assert 0 <= their_update_probability <= 1, f'{their_update_probability} outside allowed bounds [0, 1]'\n self.their_update_probability = their_update_probability\n self.default_ball_speed = ball_speed\n self.snell = snell\n self.ball = ball\n self.paddle_l = paddle_l\n self.paddle_r = paddle_r\n self.sprites = [self, snell, paddle_l, paddle_r, ball]\n self.uniform_speed = uniform_speed\n self.refract = refract\n self.we_scored = False\n self.they_scored = False\n self.our_score = 0\n self.their_score = 0\n\n def register_sprite(self, sprite: Shape):\n assert issubclass(type(sprite), Shape\n ), f'sprite must be subclassed from Shape'\n self.sprites.insert(-1, sprite)\n\n @property\n def left_bound(self):\n return 0\n\n @property\n def right_bound(self):\n return self.width\n\n @property\n def top_bound(self):\n return self.height\n\n @property\n def bottom_bound(self):\n return 0\n\n def to_numpy(self) ->Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Performs masked rendering of objects in `self.sprites`. Priority is determined by the ordering of the list,\n earlier objects will be obscured by later ones.\n\n :return: (state, rendering)\n \"\"\"\n state = self._zero_rgb_image(round(self.height), round(self.width))\n rendering = self._zero_rgb_image(round(self.height), round(self.width))\n for sprite in self.sprites[1:]:\n sprite_state, sprite_rendering = sprite.to_numpy(self.height,\n self.width)\n state[sprite_state != 0] = sprite_state[sprite_state != 0]\n rendering[sprite_rendering != 0] = sprite_rendering[\n sprite_rendering != 0]\n return state, rendering\n\n def score(self, who):\n \"\"\"\n Increment the score and reset the ball\n\n :param who: 'we' or 'they'\n :return: reward\n \"\"\"\n if who == 'they':\n reward = -1\n self.their_score += 1\n elif who == 'we':\n reward = 1\n self.our_score += 1\n else:\n raise ValueError(f\"who must be 'we' or 'they', not {who}\")\n self._reset_ball()\n return reward\n\n def step(self, action):\n self._move_our_paddle(action)\n self._step_their_paddle()\n reward = self._step_ball()\n self._step_snell()\n return reward\n\n def get_state_size(self) ->Tuple[int, int]:\n \"\"\"\n Return the tuple (height, width) of the canvas dimensions\n \"\"\"\n return self.height, self.width\n\n def _step_snell(self) ->None:\n \"\"\"\n Step the snell layer\n \"\"\"\n self.snell.step()\n\n def _reset_ball(self):\n self.ball.reset((self.width / 2, self.height / 2))\n\n def _move_our_paddle(self, action) ->None:\n \"\"\"\n Move our paddle according to the provided action\n\n :param action: the action code\n \"\"\"\n if not isinstance(action, int):\n action = action.item()\n assert action in [a for a in self.action_meanings.keys()\n ], f'{action} is not a valid action'\n if action == self.actions['UP']:\n if self.paddle_r.top_bound < self.top_bound:\n self.paddle_r.up()\n elif action == self.actions['DOWN']:\n if self.paddle_r.bottom_bound > self.bottom_bound:\n self.paddle_r.down()\n\n def _step_ball(self, speed: Union[float, int]=None):\n \"\"\"\n Move the ball to the next position according to the speed of the layer it is in.\n\n :param speed: used to continue the trajectory of a ball that interacted with an object\n \"\"\"\n trajectory = self._get_trajectory(speed)\n self._get_first_intersection(trajectory)\n reward = 0\n if trajectory.intersection is None:\n self.ball.pos = trajectory.center.end\n else:\n reward = self._interaction_dispatcher(trajectory)\n return reward\n\n def _get_trajectory(self, speed) ->TrajectoryBase:\n \"\"\"\n Get the ball's trajectory\n\n :param speed: The speed of the starting medium\n :return: trajectory `Line`\n \"\"\"\n if speed is None:\n speed = self._get_ball_speed()\n if self.ball.has_volume:\n trajectory = Trajectory(self.ball, self.ball.get_velocity(speed))\n else:\n trajectory = Trajectory(self.ball.pos, self.ball.get_velocity(\n speed))\n return trajectory\n\n def _interaction_dispatcher(self, trajectory: TrajectoryBase):\n \"\"\"\n Dispatch data to the appropriate method based on the interaction `obj`.\n\n :param trajectory: the trajectory of the ball\n \"\"\"\n reward = 0\n obj = trajectory.intersected_object\n if obj is self:\n reward = self._interact_border(trajectory)\n elif isinstance(obj, Paddle):\n self._interact_paddle(trajectory)\n elif isinstance(obj, Snell):\n self._refract(trajectory)\n return reward\n\n def _interact_paddle(self, trajectory: TrajectoryBase) ->float:\n paddle = trajectory.intersected_object\n paddle_fraction = paddle.get_fraction_of_paddle(trajectory.\n get_center_at_intersection())\n angle = paddle_fraction * paddle.max_angle\n angle = math.pi - angle if self.ball.unit_velocity.x > 0 else angle\n self.ball.angle = angle\n reward = self._finish_step_ball(trajectory)\n return reward\n\n def _refract(self, trajectory: TrajectoryBase):\n edge = trajectory.intersected_edge\n if self.refract:\n s0, s1 = self._get_start_and_end_speed(trajectory)\n angle = edge.angle_to_normal(trajectory.center)\n if self._exceeds_critical_angle(angle, s0, s1):\n self._reflect(Point(-1, 1), trajectory)\n return\n new_angle = math.asin(s1 / s0 * math.sin(angle))\n boundary_angle, new_angle = (self.\n _adjust_refraction_to_boundary_angle(edge, new_angle))\n new_angle = self._adjust_refraction_to_direction_of_incidence(\n boundary_angle, new_angle, trajectory)\n self.ball.angle = new_angle\n return self._finish_step_ball(trajectory)\n\n @staticmethod\n def _exceeds_critical_angle(angle: float, s0: float, s1: float) ->bool:\n \"\"\"\n Test if the angle exceeds the critical angle\n\n :param angle: The angle to the normal of the boundary\n :param s0: The speed of the original medium\n :param s1: The speed of the next medium\n :return: True if the angle exceeds the critical angle\n \"\"\"\n if s1 > s0:\n critical_angle = get_critical_angle(s0, s1)\n if abs(angle) >= critical_angle:\n return True\n return False\n\n @staticmethod\n def _adjust_refraction_to_direction_of_incidence(boundary_angle: float,\n new_angle: float, trajectory: TrajectoryBase) ->float:\n \"\"\"\n If the direction of incidence was from the right of the boundary, reflect `new_angle`, otherwise, return\n `new_angle` without modification.\n\n :param boundary_angle: must be in the first or fourth quadrant\n :param new_angle: The angle to be reflected in the return\n :param trajectory: The angle of the incoming ball in global coordinates\n :return: The (possibly) reflected `new_angle`\n \"\"\"\n angle = trajectory.center.angle\n assert -math.pi / 2 <= boundary_angle <= math.pi / 2, 'boundary_angle should be in first or fourth quadrant'\n if boundary_angle >= 0 and boundary_angle < angle % (2 * math.pi\n ) < boundary_angle + math.pi:\n new_angle = math.pi - new_angle\n elif boundary_angle < 0 and boundary_angle % (2 * math.pi\n ) + math.pi < angle % (2 * math.pi) < boundary_angle % (2 * math.pi\n ):\n new_angle = math.pi - new_angle\n return new_angle\n\n @staticmethod\n def _adjust_refraction_to_boundary_angle(boundary: Line, new_angle: float\n ) ->Tuple[float, float]:\n \"\"\"\n Compute the rotation of `new_angle` back to global coordinates. Assume incidence from the left side of the\n boundary.\n\n :param boundary: The boundary `primitives.Line` object\n :param new_angle: The refracted angle normal to the boundary\n :return: The new angle in global coordinates\n \"\"\"\n boundary_angle = boundary.angle % (2 * math.pi)\n if 0 <= boundary_angle < math.pi / 2:\n boundary_angle = boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif math.pi / 2 <= boundary_angle < math.pi:\n boundary_angle = math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle + new_angle\n elif math.pi <= boundary_angle < 3 * math.pi / 2:\n boundary_angle = math.pi - boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif 2 * math.pi / 3 <= boundary_angle < 2 * math.pi:\n boundary_angle = 2 * math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle - new_angle\n else:\n raise ValueError(f'Unexpected angle {boundary_angle}')\n return boundary_angle, new_angle\n\n def _get_start_and_end_speed(self, trajectory: TrajectoryBase) ->Tuple[\n float, float]:\n \"\"\"\n Get the speed at the start of the trajectory and the speed at the end of the trajectory.\n\n :param trajectory: The trajectory `primitives.Line` object\n :return: (initial speed, final speed)\n \"\"\"\n snell = trajectory.intersected_object\n if snell.is_in(trajectory.center.start):\n s0 = snell.speed\n s1 = self.default_ball_speed\n else:\n s0 = self.default_ball_speed\n s1 = snell.speed\n return s0, s1\n\n def _interact_border(self, trajectory: TrajectoryBase) ->float:\n reward = 0.0\n edge = trajectory.intersected_edge\n if edge == self.top_edge or edge == self.bot_edge:\n self._reflect(Point(1, -1), trajectory)\n elif edge == self.left_edge:\n reward = self.score('we')\n elif edge == self.right_edge:\n reward = self.score('they')\n else:\n raise ValueError(f'invalid edge, {edge}')\n return reward\n\n def _reflect(self, direction: Point, trajectory: TrajectoryBase):\n \"\"\"\n Multiplies the velocity of the ball by `direction`, continues the path of the ball by calculating the remaining\n speed using trajectory and point.\n\n :param direction: velocity multiplier\n :param trajectory: The original trajectory of the ball\n \"\"\"\n self.ball.unit_velocity *= direction\n return self._finish_step_ball(trajectory)\n\n def _finish_step_ball(self, trajectory: TrajectoryBase):\n \"\"\"\n Finish the remainder of the trajectory after any interactions.\n\n :param trajectory: The original trajectory\n :return: reward\n \"\"\"\n point = trajectory.get_center_at_intersection()\n self.ball.pos = point + self.ball.unit_velocity * EPSILON\n return self._step_ball(trajectory.remaining_speed)\n\n def _get_first_intersection(self, trajectory: TrajectoryBase):\n \"\"\"\n Find the first point at which the trajectory interacted with an object.\n\n :param trajectory: the trajectory of the object\n :return: (shape object interacted with, point of interaction, line object interacted with)\n \"\"\"\n for trajectory_line in trajectory.corners:\n for o in self.sprites:\n if not isinstance(o, Ball):\n intersection_result = o.get_intersection(trajectory_line)\n if intersection_result is not None:\n edge, point = intersection_result\n if trajectory.intersection is None:\n trajectory.set_intersection(point,\n trajectory_line, o, edge)\n elif point == trajectory.intersection and trajectory_line == trajectory.intersected_trajectory:\n raise NotImplementedError(\n 'overlapping parallel edges not implemented')\n elif point.l2_distance(trajectory_line.start\n ) < trajectory.intersection.l2_distance(trajectory\n .intersected_trajectory.start):\n trajectory.set_intersection(point,\n trajectory_line, o, edge)\n\n def _get_ball_speed(self) ->float:\n if self.uniform_speed:\n return self.default_ball_speed\n elif self.ball.is_overlapping(self.snell):\n return self.snell.speed\n else:\n return self.default_ball_speed\n\n def _step_their_paddle(self):\n \"\"\"\n Move the opponents paddle. Override this in a subclass to change the behavior.\n \"\"\"\n if random.random() < self.their_update_probability:\n if self.paddle_l.y < self.ball.y:\n if self.paddle_l.top_bound < self.top_bound:\n self.paddle_l.up()\n elif self.paddle_l.bottom_bound > self.bottom_bound:\n self.paddle_l.down()\n",
"step-5": "import abc\nimport math\nimport random\nfrom typing import Union, Tuple\n\nimport numpy as np\nfrom scipy import stats\n\nfrom . import Rectangle, Line, Point, Shape\n\n__all__ = ['get_critical_angle', 'Paddle', 'Ball', 'Snell', 'Canvas']\n\nEPSILON = 1e-7\n\n\ndef get_critical_angle(s0: float, s1: float) -> Union[float, None]:\n \"\"\"\n Returns the critical angle if it exists for a ball moving from a medium with velocity `s0` to a medium with\n velocity `s1`. If the critical angle does not exist, returns None.\n\n :param s0: speed of the initial medium\n :param s1: speed of the final medium\n :return: critical angle or None\n \"\"\"\n if s0 < s1:\n critical_angle = math.asin(s0 / s1)\n else:\n critical_angle = None\n return critical_angle\n\n\nclass Paddle(Rectangle):\n def __init__(self, height: float, width: float, speed: float, side: str, max_angle: float, visibility: str):\n \"\"\"\n\n :param height: The paddle height\n :param width: The paddle width (only matters for rendering)\n :param side: The side the paddle will be on ('left' or 'right')\n :param speed: The units the paddle moves in a single turn\n :param visibility: Whether and how to render the paddle. See `Shape.visibility`\n :param max_angle: The maximum angle at which the paddle can hit the ball\n \"\"\"\n super().__init__(height=height, width=width, visibility=visibility, render_value=255)\n assert side in ['left', 'right'], f\"side must be 'left' or 'right', not {side}\"\n assert 0 <= max_angle <= math.pi / 2, f\"max angle must be between 0 and pi/2, not {max_angle}\"\n self.side = side\n self.speed = speed\n self.max_angle = max_angle\n\n def up(self):\n self.y += self.speed\n\n def down(self):\n self.y -= self.speed\n\n def _get_edges(self) -> Tuple[Line]:\n \"\"\"\n Only return the field-side edge\n \"\"\"\n if self.side == 'right':\n return Line((self.left_bound, self.bottom_bound), (self.left_bound, self.top_bound)),\n elif self.side == 'left':\n return Line((self.right_bound, self.bottom_bound), (self.right_bound, self.top_bound)),\n\n def get_fraction_of_paddle(self, point: Point):\n \"\"\"\n Computes the fractional distance from the middle of the paddle, normalized by the paddle's height.\n Asserts if the ball was not on the paddle.\n\n :param point: the point where the ball hit the paddle\n :return: fraction of the paddle\n \"\"\"\n fraction = (point.y - self.y) / self.height\n fraction = max(min(fraction, 0.5), -0.5) # clamp to +/- 0.5\n return fraction\n\n\nclass Ball(Rectangle):\n def __init__(self, size: float, max_initial_angle: float, visibility: str, has_volume: bool = False):\n \"\"\"\n Ball object\n\n :param has_volume:\n :param size: The size to render the ball\n :param max_initial_angle: The maximum angle the ball can start with\n :param visibility: How to render the ball. See `Shape.visibility`\n :param has_volume: determines whether the ball interacts as a point or as an area\n \"\"\"\n super().__init__(width=size, height=size, visibility=visibility, render_value=255)\n self.max_initial_angle = max_initial_angle\n self.reset(self.pos, direction='left')\n self.has_volume = has_volume\n\n def reset(self, position: Union[Tuple[float, float], Point], direction: str = 'right'):\n if direction == 'right':\n self._angle = (2 * random.random() - 1) * self.max_initial_angle\n elif direction == 'left':\n self._angle = math.pi - (2 * random.random() - 1) * self.max_initial_angle\n else:\n raise ValueError(f\"direction must be 'left' or 'right', not {direction}\")\n self.pos = position\n\n @property\n def angle(self):\n \"\"\"\n Angle with respect to the right horizontal\n \"\"\"\n return self._angle\n\n @angle.setter\n def angle(self, value):\n self._angle = value % (2 * math.pi)\n\n @property\n def unit_velocity(self) -> Point:\n x = math.cos(self.angle)\n y = math.sin(self.angle)\n return Point(x, y)\n\n @unit_velocity.setter\n def unit_velocity(self, value: Union[Tuple[float, float], Point]):\n \"\"\"\n Sets the angle parameter give a set of (x, y) coordinates.\n\n :param value: (x, y)\n \"\"\"\n if isinstance(value, tuple):\n value = Point(*value)\n assert isinstance(value, Point), f\"value must be a point, not {type(value)}\"\n self.angle = value.angle\n\n def get_velocity(self, speed: Union[float, int]):\n return self.unit_velocity * speed\n\n\nclass Snell(Rectangle):\n def __init__(self, width, height, speed, change_rate, visibility):\n \"\"\"\n Rectangular area with a different ball speed.\n\n :param width: The width of the layer\n :param height: The height of the layer\n :param change_rate: Rate at which the ball speed changes, the standard deviation of the change on each step.\n :param visibility: Whether and how to render the layer. See `Shape.visibility`\n \"\"\"\n assert change_rate >= 0, \"Snell `change_rate` must be non-negative\"\n\n super().__init__(width=width, height=height, visibility=visibility, render_value=(235, 76, 52))\n self.speed = speed\n self._initial_speed = speed\n self.change_rate = change_rate\n\n def step(self):\n \"\"\"\n Step the Snell speed using a bounded Gaussian random walk.\n\n - step with mean 0, standard deviation `self.speed`\n - Clip the speed at `0.5 * self._initial_speed <= self.speed <= 2.0 * self._initial_speed`\n \"\"\"\n if self.change_rate != 0:\n self.speed += stats.norm(loc=0, scale=self.change_rate).rvs()\n\n if self.speed < 0.5 * self._initial_speed:\n self.speed = 0.5 * self._initial_speed\n if self.speed > 2.0 * self._initial_speed:\n self.speed = 2.0 * self._initial_speed\n else:\n pass\n\n\nclass TrajectoryBase(abc.ABC):\n def __init__(self, shape: Union[Point, Line, Rectangle], velocity: Point):\n self.shape = shape\n self.velocity = velocity\n\n self._reference = None\n self.intersection = None\n self.intersected_trajectory = None\n self.intersected_object = None\n self.intersected_edge = None\n self.remaining_speed = None\n\n def set_intersection(self, point: Point, trajectory_line: Line, obj: Shape, edge: Line):\n assert isinstance(obj, Shape), f\"type Shape expected, not {type(obj)}\"\n assert isinstance(point, Point), f\"type Point expected, not {type(point)}\"\n assert isinstance(edge, Line), f\"type Line expected, not {type(edge)}\"\n\n self.intersection = point\n self.intersected_trajectory = trajectory_line\n self.remaining_speed = point.l2_distance(trajectory_line.end)\n self.intersected_object = obj\n self.intersected_edge = edge\n\n def get_center_at_intersection(self) -> Point:\n \"\"\"\n Get the new center of `self.shape` given that it moved along `intersected_trajectory` to `intersection`\n\n :return: new center point\n \"\"\"\n return self._reference + (self.intersection - self.intersected_trajectory.start)\n\n @property\n def corners(self) -> Tuple[Line, ...]:\n return self.top_left, self.top_right, self.bottom_right, self.bottom_left\n\n @property\n @abc.abstractmethod\n def center(self) -> Line: ...\n\n @property\n @abc.abstractmethod\n def top_right(self) -> Line: ...\n\n @property\n @abc.abstractmethod\n def top_left(self) -> Line: ...\n\n @property\n @abc.abstractmethod\n def bottom_right(self) -> Line: ...\n\n @property\n @abc.abstractmethod\n def bottom_left(self) -> Line: ...\n\n\nclass TrajectoryRectangle(TrajectoryBase):\n \"\"\"\n Compute the trajectory of each corner of the rectangle\n \"\"\"\n\n def __init__(self, shape: Rectangle, velocity: Point):\n super(TrajectoryRectangle, self).__init__(shape, velocity)\n assert isinstance(shape, Rectangle)\n self._reference = self.shape.pos\n\n @property\n def center(self) -> Line:\n \"\"\"\n Line representing the trajectory of the center of the rectangle\n \"\"\"\n return Line(self.shape.pos, self.shape.pos + self.velocity)\n\n @property\n def top_right(self) -> Line:\n \"\"\"\n Line representing the trajectory of the point on the top right corner of the rectangle\n \"\"\"\n start = Point(self.shape.right_bound, self.shape.top_bound)\n return Line(start, start + self.velocity)\n\n @property\n def top_left(self) -> Line:\n \"\"\"\n Line representing the trajectory of the point on the top left corner of the rectangle\n \"\"\"\n start = Point(self.shape.left_bound, self.shape.top_bound)\n return Line(start, start + self.velocity)\n\n @property\n def bottom_right(self) -> Line:\n \"\"\"\n Line representing the trajectory of the point on the bottom right corner of the rectangle\n \"\"\"\n start = Point(self.shape.right_bound, self.shape.bottom_bound)\n return Line(start, start + self.velocity)\n\n @property\n def bottom_left(self) -> Line:\n \"\"\"\n Line representing the trajectory of the point on the bottom left corner of the rectangle\n \"\"\"\n start = Point(self.shape.left_bound, self.shape.bottom_bound)\n return Line(start, start + self.velocity)\n\n\nclass TrajectoryLine(TrajectoryRectangle):\n \"\"\"\n Create a bounding box around the line and compute the trajectory as if it were a rectangle.\n \"\"\"\n\n # noinspection PyTypeChecker\n # noinspection PyUnresolvedReferences\n def __init__(self, shape: Line, velocity: Point):\n super(TrajectoryLine, self).__init__(shape, velocity)\n assert isinstance(shape, Line)\n self._reference = self.shape.start\n\n height = abs(self.shape.start.y - self.shape.end.y)\n width = abs(self.shape.start.x - self.shape.end.x)\n center = Point((self.shape.start.x + self.shape.end.x) / 2,\n (self.shape.start.y + self.shape.end.y) / 2)\n self.shape = Rectangle(height=height, width=width)\n self.shape.pos = center\n\n\nclass TrajectoryPoint(TrajectoryBase):\n def __init__(self, shape: Point, velocity: Point):\n super(TrajectoryPoint, self).__init__(shape, velocity)\n assert isinstance(shape, Point)\n self._reference = self.shape\n\n @property\n def corners(self) -> Tuple[Line, ...]:\n return (self._trajectory,)\n\n @property\n def _trajectory(self) -> Line:\n return Line(self.shape, self.shape + self.velocity)\n\n @property\n def center(self) -> Line:\n return self._trajectory\n\n @property\n def top_right(self) -> Line:\n return self._trajectory\n\n @property\n def top_left(self) -> Line:\n return self._trajectory\n\n @property\n def bottom_right(self) -> Line:\n return self._trajectory\n\n @property\n def bottom_left(self) -> Line:\n return self._trajectory\n\n\nclass Trajectory(object):\n def __new__(cls, shape: Shape, velocity: Point):\n if isinstance(shape, Point):\n return TrajectoryPoint(shape, velocity)\n elif isinstance(shape, Line):\n return TrajectoryLine(shape, velocity)\n elif isinstance(shape, Rectangle):\n return TrajectoryRectangle(shape, velocity)\n else:\n raise NotImplementedError(f\"No implementation of Trajectory for input shape of type {type(shape)}\")\n\n\nclass Canvas(Rectangle):\n action_meanings = {0: 'NOOP',\n 1: 'UP',\n 2: 'DOWN', }\n actions = {k: v for v, k in action_meanings.items()}\n\n def __init__(self, paddle_l: Paddle, paddle_r: Paddle, ball: Ball, snell: Snell, ball_speed: int, height: int,\n width: int, their_update_probability: float, refract: bool, uniform_speed: bool):\n\n super().__init__(height=height, width=width, visibility='none', render_value=0)\n self.pos = self.width / 2, self.height / 2\n\n assert isinstance(their_update_probability, (float, int)), \\\n f\"their_update_probability must be numeric, not {type(their_update_probability)}\"\n assert 0 <= their_update_probability <= 1, f\"{their_update_probability} outside allowed bounds [0, 1]\"\n\n self.their_update_probability = their_update_probability\n self.default_ball_speed = ball_speed\n\n # Initialize objects\n self.snell = snell\n self.ball = ball\n self.paddle_l = paddle_l\n self.paddle_r = paddle_r\n self.sprites = [self, snell, paddle_l, paddle_r, ball]\n\n self.uniform_speed = uniform_speed\n self.refract = refract\n self.we_scored = False\n self.they_scored = False\n\n # score\n self.our_score = 0\n self.their_score = 0\n\n def register_sprite(self, sprite: Shape):\n assert issubclass(type(sprite), Shape), f\"sprite must be subclassed from Shape\"\n # noinspection PyTypeChecker\n self.sprites.insert(-1, sprite) # insert before ball\n\n @property\n def left_bound(self):\n return 0\n\n @property\n def right_bound(self):\n return self.width\n\n @property\n def top_bound(self):\n return self.height\n\n @property\n def bottom_bound(self):\n return 0\n\n # noinspection PyMethodOverriding\n def to_numpy(self) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Performs masked rendering of objects in `self.sprites`. Priority is determined by the ordering of the list,\n earlier objects will be obscured by later ones.\n\n :return: (state, rendering)\n \"\"\"\n state = self._zero_rgb_image(round(self.height), round(self.width))\n rendering = self._zero_rgb_image(round(self.height), round(self.width))\n\n for sprite in self.sprites[1:]: # skip self\n sprite_state, sprite_rendering = sprite.to_numpy(self.height, self.width)\n state[sprite_state != 0] = sprite_state[sprite_state != 0]\n rendering[sprite_rendering != 0] = sprite_rendering[sprite_rendering != 0]\n return state, rendering\n\n def score(self, who):\n \"\"\"\n Increment the score and reset the ball\n\n :param who: 'we' or 'they'\n :return: reward\n \"\"\"\n if who == 'they':\n reward = -1\n self.their_score += 1\n elif who == 'we':\n reward = 1\n self.our_score += 1\n else:\n raise ValueError(f\"who must be 'we' or 'they', not {who}\")\n\n self._reset_ball()\n return reward\n\n def step(self, action):\n self._move_our_paddle(action)\n self._step_their_paddle()\n reward = self._step_ball()\n self._step_snell()\n return reward\n\n def get_state_size(self) -> Tuple[int, int]:\n \"\"\"\n Return the tuple (height, width) of the canvas dimensions\n \"\"\"\n return self.height, self.width\n\n def _step_snell(self) -> None:\n \"\"\"\n Step the snell layer\n \"\"\"\n self.snell.step()\n\n def _reset_ball(self):\n self.ball.reset((self.width / 2, self.height / 2))\n\n def _move_our_paddle(self, action) -> None:\n \"\"\"\n Move our paddle according to the provided action\n\n :param action: the action code\n \"\"\"\n if not isinstance(action, int):\n action = action.item() # pops the item if the action is a single tensor\n assert action in [a for a in self.action_meanings.keys()], f\"{action} is not a valid action\"\n if action == self.actions['UP']:\n if self.paddle_r.top_bound < self.top_bound:\n self.paddle_r.up()\n elif action == self.actions['DOWN']:\n if self.paddle_r.bottom_bound > self.bottom_bound:\n self.paddle_r.down()\n\n def _step_ball(self, speed: Union[float, int] = None):\n \"\"\"\n Move the ball to the next position according to the speed of the layer it is in.\n\n :param speed: used to continue the trajectory of a ball that interacted with an object\n \"\"\"\n trajectory = self._get_trajectory(speed)\n\n self._get_first_intersection(trajectory)\n reward = 0\n if trajectory.intersection is None: # No intersection\n self.ball.pos = trajectory.center.end\n else:\n reward = self._interaction_dispatcher(trajectory)\n\n return reward\n\n def _get_trajectory(self, speed) -> TrajectoryBase:\n \"\"\"\n Get the ball's trajectory\n\n :param speed: The speed of the starting medium\n :return: trajectory `Line`\n \"\"\"\n if speed is None:\n speed = self._get_ball_speed()\n if self.ball.has_volume:\n trajectory = Trajectory(self.ball, self.ball.get_velocity(speed))\n else:\n trajectory = Trajectory(self.ball.pos, self.ball.get_velocity(speed))\n return trajectory\n\n def _interaction_dispatcher(self, trajectory: TrajectoryBase):\n \"\"\"\n Dispatch data to the appropriate method based on the interaction `obj`.\n\n :param trajectory: the trajectory of the ball\n \"\"\"\n reward = 0\n\n obj = trajectory.intersected_object\n if obj is self: # border interaction\n reward = self._interact_border(trajectory)\n elif isinstance(obj, Paddle): # paddle interaction\n self._interact_paddle(trajectory)\n elif isinstance(obj, Snell):\n self._refract(trajectory)\n\n return reward\n\n def _interact_paddle(self, trajectory: TrajectoryBase) -> float:\n paddle = trajectory.intersected_object\n paddle_fraction = paddle.get_fraction_of_paddle(trajectory.get_center_at_intersection())\n angle = paddle_fraction * paddle.max_angle\n angle = math.pi - angle if self.ball.unit_velocity.x > 0 else angle\n\n self.ball.angle = angle\n reward = self._finish_step_ball(trajectory)\n return reward\n\n def _refract(self, trajectory: TrajectoryBase):\n edge = trajectory.intersected_edge\n if self.refract:\n s0, s1 = self._get_start_and_end_speed(trajectory)\n\n angle = edge.angle_to_normal(trajectory.center)\n if self._exceeds_critical_angle(angle, s0, s1):\n # TODO: reflect to arbitrary angle (non-vertical interface)\n self._reflect(Point(-1, 1), trajectory)\n return\n\n new_angle = math.asin(s1 / s0 * math.sin(angle))\n\n boundary_angle, new_angle = self._adjust_refraction_to_boundary_angle(edge, new_angle)\n new_angle = self._adjust_refraction_to_direction_of_incidence(boundary_angle, new_angle, trajectory)\n self.ball.angle = new_angle\n\n return self._finish_step_ball(trajectory)\n\n @staticmethod\n def _exceeds_critical_angle(angle: float, s0: float, s1: float) -> bool:\n \"\"\"\n Test if the angle exceeds the critical angle\n\n :param angle: The angle to the normal of the boundary\n :param s0: The speed of the original medium\n :param s1: The speed of the next medium\n :return: True if the angle exceeds the critical angle\n \"\"\"\n if s1 > s0: # if the second speed is faster, there is a critical angle\n critical_angle = get_critical_angle(s0, s1)\n if abs(angle) >= critical_angle:\n return True\n return False\n\n @staticmethod\n def _adjust_refraction_to_direction_of_incidence(boundary_angle: float, new_angle: float,\n trajectory: TrajectoryBase) -> float:\n \"\"\"\n If the direction of incidence was from the right of the boundary, reflect `new_angle`, otherwise, return\n `new_angle` without modification.\n\n :param boundary_angle: must be in the first or fourth quadrant\n :param new_angle: The angle to be reflected in the return\n :param trajectory: The angle of the incoming ball in global coordinates\n :return: The (possibly) reflected `new_angle`\n \"\"\"\n angle = trajectory.center.angle\n assert -math.pi / 2 <= boundary_angle <= math.pi / 2, \"boundary_angle should be in first or fourth quadrant\"\n # noinspection PyChainedComparisons\n if boundary_angle >= 0 and boundary_angle < angle % (2 * math.pi) < boundary_angle + math.pi:\n new_angle = math.pi - new_angle\n elif (boundary_angle < 0 and\n boundary_angle % (2 * math.pi) + math.pi < angle % (2 * math.pi) < boundary_angle % (\n 2 * math.pi)):\n new_angle = math.pi - new_angle\n return new_angle\n\n @staticmethod\n def _adjust_refraction_to_boundary_angle(boundary: Line, new_angle: float) -> Tuple[float, float]:\n \"\"\"\n Compute the rotation of `new_angle` back to global coordinates. Assume incidence from the left side of the\n boundary.\n\n :param boundary: The boundary `primitives.Line` object\n :param new_angle: The refracted angle normal to the boundary\n :return: The new angle in global coordinates\n \"\"\"\n # TODO: verify this works with a non-vertical interface\n\n boundary_angle = boundary.angle % (2 * math.pi)\n if 0 <= boundary_angle < math.pi / 2: # in the first quadrant\n boundary_angle = boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif math.pi / 2 <= boundary_angle < math.pi: # in the second quadrant\n boundary_angle = math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle + new_angle\n elif math.pi <= boundary_angle < 3 * math.pi / 2: # in the third quadrant\n boundary_angle = math.pi - boundary_angle\n new_angle = boundary_angle - math.pi / 2 + new_angle\n elif 2 * math.pi / 3 <= boundary_angle < 2 * math.pi: # in the fourth quadrant\n boundary_angle = 2 * math.pi - boundary_angle\n new_angle = math.pi / 2 - boundary_angle - new_angle\n else:\n raise ValueError(f'Unexpected angle {boundary_angle}')\n return boundary_angle, new_angle\n\n def _get_start_and_end_speed(self, trajectory: TrajectoryBase) -> Tuple[float, float]:\n \"\"\"\n Get the speed at the start of the trajectory and the speed at the end of the trajectory.\n\n :param trajectory: The trajectory `primitives.Line` object\n :return: (initial speed, final speed)\n \"\"\"\n snell = trajectory.intersected_object\n # todo: detect if start is in some other snell layer\n if snell.is_in(trajectory.center.start):\n s0 = snell.speed\n s1 = self.default_ball_speed\n else:\n s0 = self.default_ball_speed\n s1 = snell.speed\n return s0, s1\n\n def _interact_border(self, trajectory: TrajectoryBase) -> float:\n reward = 0.\n edge = trajectory.intersected_edge\n\n if edge == self.top_edge or edge == self.bot_edge:\n self._reflect(Point(1, -1), trajectory)\n elif edge == self.left_edge:\n reward = self.score('we')\n elif edge == self.right_edge:\n reward = self.score('they')\n else:\n raise ValueError(f'invalid edge, {edge}')\n\n return reward\n\n def _reflect(self, direction: Point, trajectory: TrajectoryBase):\n \"\"\"\n Multiplies the velocity of the ball by `direction`, continues the path of the ball by calculating the remaining\n speed using trajectory and point.\n\n :param direction: velocity multiplier\n :param trajectory: The original trajectory of the ball\n \"\"\"\n self.ball.unit_velocity *= direction\n return self._finish_step_ball(trajectory)\n\n def _finish_step_ball(self, trajectory: TrajectoryBase):\n \"\"\"\n Finish the remainder of the trajectory after any interactions.\n\n :param trajectory: The original trajectory\n :return: reward\n \"\"\"\n point = trajectory.get_center_at_intersection()\n self.ball.pos = point + self.ball.unit_velocity * EPSILON\n return self._step_ball(trajectory.remaining_speed)\n\n def _get_first_intersection(self, trajectory: TrajectoryBase):\n \"\"\"\n Find the first point at which the trajectory interacted with an object.\n\n :param trajectory: the trajectory of the object\n :return: (shape object interacted with, point of interaction, line object interacted with)\n \"\"\"\n for trajectory_line in trajectory.corners:\n for o in self.sprites:\n if not isinstance(o, Ball):\n intersection_result = o.get_intersection(trajectory_line)\n if intersection_result is not None:\n edge, point = intersection_result\n if trajectory.intersection is None:\n trajectory.set_intersection(point, trajectory_line, o, edge)\n elif point == trajectory.intersection and trajectory_line == trajectory.intersected_trajectory:\n raise NotImplementedError(\"overlapping parallel edges not implemented\")\n elif (point.l2_distance(trajectory_line.start) <\n trajectory.intersection.l2_distance(trajectory.intersected_trajectory.start)):\n trajectory.set_intersection(point, trajectory_line, o, edge)\n\n def _get_ball_speed(self) -> float:\n if self.uniform_speed:\n return self.default_ball_speed\n else:\n if self.ball.is_overlapping(self.snell):\n return self.snell.speed\n else:\n return self.default_ball_speed\n\n def _step_their_paddle(self):\n \"\"\"\n Move the opponents paddle. Override this in a subclass to change the behavior.\n \"\"\"\n if random.random() < self.their_update_probability:\n if self.paddle_l.y < self.ball.y:\n if self.paddle_l.top_bound < self.top_bound:\n self.paddle_l.up()\n else:\n if self.paddle_l.bottom_bound > self.bottom_bound:\n self.paddle_l.down()\n",
"step-ids": [
46,
51,
75,
76,
83
]
}
|
[
46,
51,
75,
76,
83
] |
#import getCanditatemap() from E_18_hacksub
import operator, pdb, collections, string
ETAOIN = """ etaoinsrhldcumgyfpwb.,vk0-'x)(1j2:q"/5!?z346879%[]*=+|_;\>$#^&@<~{}`""" #order taken from https://mdickens.me/typing/theory-of-letter-frequency.html, with space added at the start, 69 characters overall
length = 128
#ETAOIN ="ETAOINSHRDLCUMWFGYPBVKJXQZ"
def getCanditatemap():
return (dict.fromkeys((chr(i) for i in range (length)),0)) # https://stackoverflow.com/questions/2241891/how-to-initialize-a-dict-with-keys-from-a-list-and-empty-value-in-python/2241904
def getLettercount(mess):
charcount = getCanditatemap()
for char in mess:
if char in charcount:
charcount[char] +=1
return charcount
def getFreqOrder(mess):
#get a dictionary of each letter and its frequency count
lettertofreq = getLettercount(mess)
# second, make a dictionary of each frequency count to each letter(s) with that frequency
freqtochar = {}
for i in range(length):
i=chr(i)
if lettertofreq[i] not in freqtochar: # look for frequencies not present
freqtochar[lettertofreq[i]] = [i] # add if not present, else append
else:
freqtochar[lettertofreq[i]].append(i)
#reverse ETAOIN order, for each list of letters (per frequency)
for freq in freqtochar:
freqtochar[freq].sort(key=ETAOIN.find, reverse=True)
freqtochar[freq] = ''.join(freqtochar[freq]) # convert to string
# sort them in order of frequency
#freqpairs = sorted(freqtochar.items(), key=operator.itemgetter(0), reverse=True)
freqpairs = collections.OrderedDict(sorted(freqtochar.items(), reverse=True))
# extractst the values and joins them together
freqorder = []
#print freqtochar
values = freqpairs.values() # grabs the values only
for freqpair in values:
#print freqpair
#pdb.set_trace()
freqorder.append(freqpair)
return ''.join(freqorder)
def englishFreqMatch(message):
#print message
matchscore =0
freqOrder = getFreqOrder(message.lower()) # convert to lower case as we are just looking for frequency match score, so case of the letter should not matter
#print freqOrder
#pdb.set_trace()
for commletter in (ETAOIN[:16] or ETAOIN[-16:]):
if commletter in (freqOrder[:16] or freqOrder[-16:]):
matchscore +=1
return matchscore
|
normal
|
{
"blob_id": "63a9060e9933cc37b7039833be5f071cc7bf45bf",
"index": 7873,
"step-1": "<mask token>\n\n\ndef getLettercount(mess):\n charcount = getCanditatemap()\n for char in mess:\n if char in charcount:\n charcount[char] += 1\n return charcount\n\n\n<mask token>\n\n\ndef englishFreqMatch(message):\n matchscore = 0\n freqOrder = getFreqOrder(message.lower())\n for commletter in (ETAOIN[:16] or ETAOIN[-16:]):\n if commletter in (freqOrder[:16] or freqOrder[-16:]):\n matchscore += 1\n return matchscore\n",
"step-2": "<mask token>\n\n\ndef getCanditatemap():\n return dict.fromkeys((chr(i) for i in range(length)), 0)\n\n\ndef getLettercount(mess):\n charcount = getCanditatemap()\n for char in mess:\n if char in charcount:\n charcount[char] += 1\n return charcount\n\n\n<mask token>\n\n\ndef englishFreqMatch(message):\n matchscore = 0\n freqOrder = getFreqOrder(message.lower())\n for commletter in (ETAOIN[:16] or ETAOIN[-16:]):\n if commletter in (freqOrder[:16] or freqOrder[-16:]):\n matchscore += 1\n return matchscore\n",
"step-3": "<mask token>\nETAOIN = (\n ' etaoinsrhldcumgyfpwb.,vk0-\\'x)(1j2:q\"/5!?z346879%[]*=+|_;\\\\>$#^&@<~{}`')\nlength = 128\n\n\ndef getCanditatemap():\n return dict.fromkeys((chr(i) for i in range(length)), 0)\n\n\ndef getLettercount(mess):\n charcount = getCanditatemap()\n for char in mess:\n if char in charcount:\n charcount[char] += 1\n return charcount\n\n\ndef getFreqOrder(mess):\n lettertofreq = getLettercount(mess)\n freqtochar = {}\n for i in range(length):\n i = chr(i)\n if lettertofreq[i] not in freqtochar:\n freqtochar[lettertofreq[i]] = [i]\n else:\n freqtochar[lettertofreq[i]].append(i)\n for freq in freqtochar:\n freqtochar[freq].sort(key=ETAOIN.find, reverse=True)\n freqtochar[freq] = ''.join(freqtochar[freq])\n freqpairs = collections.OrderedDict(sorted(freqtochar.items(), reverse=\n True))\n freqorder = []\n values = freqpairs.values()\n for freqpair in values:\n freqorder.append(freqpair)\n return ''.join(freqorder)\n\n\ndef englishFreqMatch(message):\n matchscore = 0\n freqOrder = getFreqOrder(message.lower())\n for commletter in (ETAOIN[:16] or ETAOIN[-16:]):\n if commletter in (freqOrder[:16] or freqOrder[-16:]):\n matchscore += 1\n return matchscore\n",
"step-4": "import operator, pdb, collections, string\nETAOIN = (\n ' etaoinsrhldcumgyfpwb.,vk0-\\'x)(1j2:q\"/5!?z346879%[]*=+|_;\\\\>$#^&@<~{}`')\nlength = 128\n\n\ndef getCanditatemap():\n return dict.fromkeys((chr(i) for i in range(length)), 0)\n\n\ndef getLettercount(mess):\n charcount = getCanditatemap()\n for char in mess:\n if char in charcount:\n charcount[char] += 1\n return charcount\n\n\ndef getFreqOrder(mess):\n lettertofreq = getLettercount(mess)\n freqtochar = {}\n for i in range(length):\n i = chr(i)\n if lettertofreq[i] not in freqtochar:\n freqtochar[lettertofreq[i]] = [i]\n else:\n freqtochar[lettertofreq[i]].append(i)\n for freq in freqtochar:\n freqtochar[freq].sort(key=ETAOIN.find, reverse=True)\n freqtochar[freq] = ''.join(freqtochar[freq])\n freqpairs = collections.OrderedDict(sorted(freqtochar.items(), reverse=\n True))\n freqorder = []\n values = freqpairs.values()\n for freqpair in values:\n freqorder.append(freqpair)\n return ''.join(freqorder)\n\n\ndef englishFreqMatch(message):\n matchscore = 0\n freqOrder = getFreqOrder(message.lower())\n for commletter in (ETAOIN[:16] or ETAOIN[-16:]):\n if commletter in (freqOrder[:16] or freqOrder[-16:]):\n matchscore += 1\n return matchscore\n",
"step-5": "#import getCanditatemap() from E_18_hacksub\nimport operator, pdb, collections, string\n\nETAOIN = \"\"\" etaoinsrhldcumgyfpwb.,vk0-'x)(1j2:q\"/5!?z346879%[]*=+|_;\\>$#^&@<~{}`\"\"\" #order taken from https://mdickens.me/typing/theory-of-letter-frequency.html, with space added at the start, 69 characters overall\nlength = 128\n#ETAOIN =\"ETAOINSHRDLCUMWFGYPBVKJXQZ\"\n\ndef getCanditatemap():\n return (dict.fromkeys((chr(i) for i in range (length)),0)) # https://stackoverflow.com/questions/2241891/how-to-initialize-a-dict-with-keys-from-a-list-and-empty-value-in-python/2241904\n\ndef getLettercount(mess):\n \n charcount = getCanditatemap()\n for char in mess:\n if char in charcount:\n charcount[char] +=1\n \n return charcount\n\ndef getFreqOrder(mess):\n\n #get a dictionary of each letter and its frequency count\n lettertofreq = getLettercount(mess)\n\n # second, make a dictionary of each frequency count to each letter(s) with that frequency\n freqtochar = {}\n for i in range(length):\n i=chr(i)\n if lettertofreq[i] not in freqtochar: # look for frequencies not present\n freqtochar[lettertofreq[i]] = [i] # add if not present, else append\n else:\n freqtochar[lettertofreq[i]].append(i)\n\n #reverse ETAOIN order, for each list of letters (per frequency)\n for freq in freqtochar:\n freqtochar[freq].sort(key=ETAOIN.find, reverse=True)\n freqtochar[freq] = ''.join(freqtochar[freq]) # convert to string\n \n # sort them in order of frequency\n #freqpairs = sorted(freqtochar.items(), key=operator.itemgetter(0), reverse=True)\n freqpairs = collections.OrderedDict(sorted(freqtochar.items(), reverse=True))\n \n # extractst the values and joins them together\n freqorder = []\n #print freqtochar\n values = freqpairs.values() # grabs the values only\n for freqpair in values:\n #print freqpair\n #pdb.set_trace() \n freqorder.append(freqpair)\n\n return ''.join(freqorder)\n\ndef englishFreqMatch(message):\n \n #print message\n matchscore =0\n freqOrder = getFreqOrder(message.lower()) # convert to lower case as we are just looking for frequency match score, so case of the letter should not matter\n #print freqOrder\n #pdb.set_trace()\n\n for commletter in (ETAOIN[:16] or ETAOIN[-16:]):\n if commletter in (freqOrder[:16] or freqOrder[-16:]):\n matchscore +=1\n return matchscore\n",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
from random import shuffle, choice
from typing import Dict, List, Tuple
note_to_midi: Dict[int, int] = {
1: 0,
2: 2,
3: 4,
4: 5,
5: 7,
6: 9,
7: 11,
}
midi_to_note: Dict[int, int] = {
0: 1,
2: 2,
4: 3,
5: 4,
7: 5,
9: 6,
11: 7,
}
class Note:
num: int
@classmethod
def choice(cls, *args: int):
return Note(choice(args))
@classmethod
def from_midi(cls, midi: int, root: int):
note = midi_to_note.get(midi % root)
if isinstance(note, int):
return cls(note)
raise ValueError()
def __init__(self, num: int):
while num > 7:
num -= 7
while num <= 0:
num += 7
self.num = num
def __int__(self):
return self.num
def __repr__(self):
return str(self.num)
def __str__(self):
return f'Note: {self.num}'
def __hash__(self):
return hash(self.num)
def _distance(self, other):
if isinstance(other, Note):
return self.num - other.num
raise TypeError()
def __eq__(self, other):
return self._distance(other) == 0
def __lt__(self, other):
return self._distance(other) < 0
def __le__(self, other):
return self._distance(other) <= 0
def __gt__(self, other):
return self._distance(other) > 0
def __ge__(self, other):
return self._distance(other) >= 0
def _get_interval(self, interval: int):
return {Note(self.num - interval), Note(self.num + interval)}
def get_unison(self):
return self._get_interval(0)
def get_second(self):
return self._get_interval(1)
def get_thirds(self):
return self._get_interval(2)
def get_forth(self):
return self._get_interval(3)
def get_fifth(self):
return self._get_interval(4)
def get_sixth(self):
return self._get_interval(5)
def get_seventh(self):
return self._get_interval(6)
def inv(self):
return Note(6 - self.num)
def get_next_possible_notes(self, /, leap=True):
ret = [Note(self.num - 1), Note(self.num + 1)]
if leap:
ret += [Note(self.num - 2), Note(self.num + 2)]
shuffle(ret)
return ret
def __sub__(self, other) -> int:
dist = abs(self._distance(other))
if dist > 3:
dist = 7 - dist
return dist
def get_all_possible_midi(self, root: int) -> List[int]:
midi = self.convert_to_midi(root)
assert midi >= 0
ret: List[int] = []
while midi - 12 >= 0:
midi -= 12
while midi <= 127:
ret.append(midi)
midi += 12
return ret
def convert_to_midi(self, root: int) -> int:
return note_to_midi[self.num] + root
INVERSE_POSSIBLE_NOTE = {
Note(2), Note(3), Note(4),
}
def choose_from_inverse_possible_note():
return choice(list(INVERSE_POSSIBLE_NOTE))
ALL_NOTES = {
Note(1), Note(2), Note(3), Note(4), Note(5), Note(6), Note(7),
}
def choose_from_all_notes():
return choice(list(ALL_NOTES))
def fill_in_thirds() -> Tuple[Note, Note]:
first = choice(list(ALL_NOTES))
second = choice(list(first.get_thirds()))
return first, second
|
normal
|
{
"blob_id": "d70f77713abf4b35db9de72c1edbf4bf4580b2a4",
"index": 8795,
"step-1": "<mask token>\n\n\nclass Note:\n num: int\n\n @classmethod\n def choice(cls, *args: int):\n return Note(choice(args))\n\n @classmethod\n def from_midi(cls, midi: int, root: int):\n note = midi_to_note.get(midi % root)\n if isinstance(note, int):\n return cls(note)\n raise ValueError()\n\n def __init__(self, num: int):\n while num > 7:\n num -= 7\n while num <= 0:\n num += 7\n self.num = num\n <mask token>\n\n def __repr__(self):\n return str(self.num)\n\n def __str__(self):\n return f'Note: {self.num}'\n\n def __hash__(self):\n return hash(self.num)\n\n def _distance(self, other):\n if isinstance(other, Note):\n return self.num - other.num\n raise TypeError()\n\n def __eq__(self, other):\n return self._distance(other) == 0\n\n def __lt__(self, other):\n return self._distance(other) < 0\n\n def __le__(self, other):\n return self._distance(other) <= 0\n\n def __gt__(self, other):\n return self._distance(other) > 0\n\n def __ge__(self, other):\n return self._distance(other) >= 0\n\n def _get_interval(self, interval: int):\n return {Note(self.num - interval), Note(self.num + interval)}\n\n def get_unison(self):\n return self._get_interval(0)\n\n def get_second(self):\n return self._get_interval(1)\n <mask token>\n\n def get_forth(self):\n return self._get_interval(3)\n\n def get_fifth(self):\n return self._get_interval(4)\n\n def get_sixth(self):\n return self._get_interval(5)\n\n def get_seventh(self):\n return self._get_interval(6)\n\n def inv(self):\n return Note(6 - self.num)\n <mask token>\n\n def __sub__(self, other) ->int:\n dist = abs(self._distance(other))\n if dist > 3:\n dist = 7 - dist\n return dist\n\n def get_all_possible_midi(self, root: int) ->List[int]:\n midi = self.convert_to_midi(root)\n assert midi >= 0\n ret: List[int] = []\n while midi - 12 >= 0:\n midi -= 12\n while midi <= 127:\n ret.append(midi)\n midi += 12\n return ret\n\n def convert_to_midi(self, root: int) ->int:\n return note_to_midi[self.num] + root\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Note:\n num: int\n\n @classmethod\n def choice(cls, *args: int):\n return Note(choice(args))\n\n @classmethod\n def from_midi(cls, midi: int, root: int):\n note = midi_to_note.get(midi % root)\n if isinstance(note, int):\n return cls(note)\n raise ValueError()\n\n def __init__(self, num: int):\n while num > 7:\n num -= 7\n while num <= 0:\n num += 7\n self.num = num\n <mask token>\n\n def __repr__(self):\n return str(self.num)\n\n def __str__(self):\n return f'Note: {self.num}'\n\n def __hash__(self):\n return hash(self.num)\n\n def _distance(self, other):\n if isinstance(other, Note):\n return self.num - other.num\n raise TypeError()\n\n def __eq__(self, other):\n return self._distance(other) == 0\n\n def __lt__(self, other):\n return self._distance(other) < 0\n\n def __le__(self, other):\n return self._distance(other) <= 0\n\n def __gt__(self, other):\n return self._distance(other) > 0\n\n def __ge__(self, other):\n return self._distance(other) >= 0\n\n def _get_interval(self, interval: int):\n return {Note(self.num - interval), Note(self.num + interval)}\n\n def get_unison(self):\n return self._get_interval(0)\n\n def get_second(self):\n return self._get_interval(1)\n\n def get_thirds(self):\n return self._get_interval(2)\n\n def get_forth(self):\n return self._get_interval(3)\n\n def get_fifth(self):\n return self._get_interval(4)\n\n def get_sixth(self):\n return self._get_interval(5)\n\n def get_seventh(self):\n return self._get_interval(6)\n\n def inv(self):\n return Note(6 - self.num)\n\n def get_next_possible_notes(self, /, leap=True):\n ret = [Note(self.num - 1), Note(self.num + 1)]\n if leap:\n ret += [Note(self.num - 2), Note(self.num + 2)]\n shuffle(ret)\n return ret\n\n def __sub__(self, other) ->int:\n dist = abs(self._distance(other))\n if dist > 3:\n dist = 7 - dist\n return dist\n\n def get_all_possible_midi(self, root: int) ->List[int]:\n midi = self.convert_to_midi(root)\n assert midi >= 0\n ret: List[int] = []\n while midi - 12 >= 0:\n midi -= 12\n while midi <= 127:\n ret.append(midi)\n midi += 12\n return ret\n\n def convert_to_midi(self, root: int) ->int:\n return note_to_midi[self.num] + root\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Note:\n num: int\n\n @classmethod\n def choice(cls, *args: int):\n return Note(choice(args))\n\n @classmethod\n def from_midi(cls, midi: int, root: int):\n note = midi_to_note.get(midi % root)\n if isinstance(note, int):\n return cls(note)\n raise ValueError()\n\n def __init__(self, num: int):\n while num > 7:\n num -= 7\n while num <= 0:\n num += 7\n self.num = num\n\n def __int__(self):\n return self.num\n\n def __repr__(self):\n return str(self.num)\n\n def __str__(self):\n return f'Note: {self.num}'\n\n def __hash__(self):\n return hash(self.num)\n\n def _distance(self, other):\n if isinstance(other, Note):\n return self.num - other.num\n raise TypeError()\n\n def __eq__(self, other):\n return self._distance(other) == 0\n\n def __lt__(self, other):\n return self._distance(other) < 0\n\n def __le__(self, other):\n return self._distance(other) <= 0\n\n def __gt__(self, other):\n return self._distance(other) > 0\n\n def __ge__(self, other):\n return self._distance(other) >= 0\n\n def _get_interval(self, interval: int):\n return {Note(self.num - interval), Note(self.num + interval)}\n\n def get_unison(self):\n return self._get_interval(0)\n\n def get_second(self):\n return self._get_interval(1)\n\n def get_thirds(self):\n return self._get_interval(2)\n\n def get_forth(self):\n return self._get_interval(3)\n\n def get_fifth(self):\n return self._get_interval(4)\n\n def get_sixth(self):\n return self._get_interval(5)\n\n def get_seventh(self):\n return self._get_interval(6)\n\n def inv(self):\n return Note(6 - self.num)\n\n def get_next_possible_notes(self, /, leap=True):\n ret = [Note(self.num - 1), Note(self.num + 1)]\n if leap:\n ret += [Note(self.num - 2), Note(self.num + 2)]\n shuffle(ret)\n return ret\n\n def __sub__(self, other) ->int:\n dist = abs(self._distance(other))\n if dist > 3:\n dist = 7 - dist\n return dist\n\n def get_all_possible_midi(self, root: int) ->List[int]:\n midi = self.convert_to_midi(root)\n assert midi >= 0\n ret: List[int] = []\n while midi - 12 >= 0:\n midi -= 12\n while midi <= 127:\n ret.append(midi)\n midi += 12\n return ret\n\n def convert_to_midi(self, root: int) ->int:\n return note_to_midi[self.num] + root\n\n\n<mask token>\n\n\ndef choose_from_all_notes():\n return choice(list(ALL_NOTES))\n\n\n<mask token>\n",
"step-4": "from random import shuffle, choice\nfrom typing import Dict, List, Tuple\nnote_to_midi: Dict[int, int] = {(1): 0, (2): 2, (3): 4, (4): 5, (5): 7, (6):\n 9, (7): 11}\nmidi_to_note: Dict[int, int] = {(0): 1, (2): 2, (4): 3, (5): 4, (7): 5, (9):\n 6, (11): 7}\n\n\nclass Note:\n num: int\n\n @classmethod\n def choice(cls, *args: int):\n return Note(choice(args))\n\n @classmethod\n def from_midi(cls, midi: int, root: int):\n note = midi_to_note.get(midi % root)\n if isinstance(note, int):\n return cls(note)\n raise ValueError()\n\n def __init__(self, num: int):\n while num > 7:\n num -= 7\n while num <= 0:\n num += 7\n self.num = num\n\n def __int__(self):\n return self.num\n\n def __repr__(self):\n return str(self.num)\n\n def __str__(self):\n return f'Note: {self.num}'\n\n def __hash__(self):\n return hash(self.num)\n\n def _distance(self, other):\n if isinstance(other, Note):\n return self.num - other.num\n raise TypeError()\n\n def __eq__(self, other):\n return self._distance(other) == 0\n\n def __lt__(self, other):\n return self._distance(other) < 0\n\n def __le__(self, other):\n return self._distance(other) <= 0\n\n def __gt__(self, other):\n return self._distance(other) > 0\n\n def __ge__(self, other):\n return self._distance(other) >= 0\n\n def _get_interval(self, interval: int):\n return {Note(self.num - interval), Note(self.num + interval)}\n\n def get_unison(self):\n return self._get_interval(0)\n\n def get_second(self):\n return self._get_interval(1)\n\n def get_thirds(self):\n return self._get_interval(2)\n\n def get_forth(self):\n return self._get_interval(3)\n\n def get_fifth(self):\n return self._get_interval(4)\n\n def get_sixth(self):\n return self._get_interval(5)\n\n def get_seventh(self):\n return self._get_interval(6)\n\n def inv(self):\n return Note(6 - self.num)\n\n def get_next_possible_notes(self, /, leap=True):\n ret = [Note(self.num - 1), Note(self.num + 1)]\n if leap:\n ret += [Note(self.num - 2), Note(self.num + 2)]\n shuffle(ret)\n return ret\n\n def __sub__(self, other) ->int:\n dist = abs(self._distance(other))\n if dist > 3:\n dist = 7 - dist\n return dist\n\n def get_all_possible_midi(self, root: int) ->List[int]:\n midi = self.convert_to_midi(root)\n assert midi >= 0\n ret: List[int] = []\n while midi - 12 >= 0:\n midi -= 12\n while midi <= 127:\n ret.append(midi)\n midi += 12\n return ret\n\n def convert_to_midi(self, root: int) ->int:\n return note_to_midi[self.num] + root\n\n\nINVERSE_POSSIBLE_NOTE = {Note(2), Note(3), Note(4)}\n\n\ndef choose_from_inverse_possible_note():\n return choice(list(INVERSE_POSSIBLE_NOTE))\n\n\nALL_NOTES = {Note(1), Note(2), Note(3), Note(4), Note(5), Note(6), Note(7)}\n\n\ndef choose_from_all_notes():\n return choice(list(ALL_NOTES))\n\n\ndef fill_in_thirds() ->Tuple[Note, Note]:\n first = choice(list(ALL_NOTES))\n second = choice(list(first.get_thirds()))\n return first, second\n",
"step-5": "from random import shuffle, choice\nfrom typing import Dict, List, Tuple\n\nnote_to_midi: Dict[int, int] = {\n 1: 0,\n 2: 2,\n 3: 4,\n 4: 5,\n 5: 7,\n 6: 9,\n 7: 11,\n}\n\nmidi_to_note: Dict[int, int] = {\n 0: 1,\n 2: 2,\n 4: 3,\n 5: 4,\n 7: 5,\n 9: 6,\n 11: 7,\n}\n\n\nclass Note:\n num: int\n\n @classmethod\n def choice(cls, *args: int):\n return Note(choice(args))\n\n @classmethod\n def from_midi(cls, midi: int, root: int):\n note = midi_to_note.get(midi % root)\n if isinstance(note, int):\n return cls(note)\n raise ValueError()\n\n def __init__(self, num: int):\n while num > 7:\n num -= 7\n while num <= 0:\n num += 7\n self.num = num\n\n def __int__(self):\n return self.num\n\n def __repr__(self):\n return str(self.num)\n\n def __str__(self):\n return f'Note: {self.num}'\n\n def __hash__(self):\n return hash(self.num)\n\n def _distance(self, other):\n if isinstance(other, Note):\n return self.num - other.num\n raise TypeError()\n\n def __eq__(self, other):\n return self._distance(other) == 0\n\n def __lt__(self, other):\n return self._distance(other) < 0\n\n def __le__(self, other):\n return self._distance(other) <= 0\n\n def __gt__(self, other):\n return self._distance(other) > 0\n\n def __ge__(self, other):\n return self._distance(other) >= 0\n\n def _get_interval(self, interval: int):\n return {Note(self.num - interval), Note(self.num + interval)}\n\n def get_unison(self):\n return self._get_interval(0)\n\n def get_second(self):\n return self._get_interval(1)\n\n def get_thirds(self):\n return self._get_interval(2)\n\n def get_forth(self):\n return self._get_interval(3)\n\n def get_fifth(self):\n return self._get_interval(4)\n\n def get_sixth(self):\n return self._get_interval(5)\n\n def get_seventh(self):\n return self._get_interval(6)\n\n def inv(self):\n return Note(6 - self.num)\n\n def get_next_possible_notes(self, /, leap=True):\n ret = [Note(self.num - 1), Note(self.num + 1)]\n if leap:\n ret += [Note(self.num - 2), Note(self.num + 2)]\n shuffle(ret)\n return ret\n\n def __sub__(self, other) -> int:\n dist = abs(self._distance(other))\n if dist > 3:\n dist = 7 - dist\n return dist\n\n def get_all_possible_midi(self, root: int) -> List[int]:\n midi = self.convert_to_midi(root)\n assert midi >= 0\n ret: List[int] = []\n while midi - 12 >= 0:\n midi -= 12\n while midi <= 127:\n ret.append(midi)\n midi += 12\n return ret\n\n def convert_to_midi(self, root: int) -> int:\n return note_to_midi[self.num] + root\n\n\nINVERSE_POSSIBLE_NOTE = {\n Note(2), Note(3), Note(4),\n}\n\n\ndef choose_from_inverse_possible_note():\n return choice(list(INVERSE_POSSIBLE_NOTE))\n\n\nALL_NOTES = {\n Note(1), Note(2), Note(3), Note(4), Note(5), Note(6), Note(7),\n}\n\n\ndef choose_from_all_notes():\n return choice(list(ALL_NOTES))\n\n\ndef fill_in_thirds() -> Tuple[Note, Note]:\n first = choice(list(ALL_NOTES))\n second = choice(list(first.get_thirds()))\n return first, second\n",
"step-ids": [
24,
26,
28,
33,
34
]
}
|
[
24,
26,
28,
33,
34
] |
#! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'raek'
web = '910d59f0-30bd-495b-a54c-bf5addc81a8a'
app = '21ec74fb-e941-43be-8772-a2f8dc6ccc4f'
|
normal
|
{
"blob_id": "cce645073ba117b9e297dfccf5a39710b0c6cd14",
"index": 8479,
"step-1": "<mask token>\n",
"step-2": "__author__ = 'raek'\nweb = '910d59f0-30bd-495b-a54c-bf5addc81a8a'\napp = '21ec74fb-e941-43be-8772-a2f8dc6ccc4f'\n",
"step-3": "#! /usr/bin/python\n# -*- coding: utf-8 -*-\n__author__ = 'raek'\n\nweb = '910d59f0-30bd-495b-a54c-bf5addc81a8a'\napp = '21ec74fb-e941-43be-8772-a2f8dc6ccc4f'",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015-2016 Applatix, Inc. All rights reserved.
#
'''
cAdvisor CLI. Used by axstats temporarily before moving to Heapster
'''
import requests
import logging
import time
logger = logging.getLogger(__name__)
CHECK_LIVELINESS_INTERVAL = 5
CONNECTION_TIMEOUT = 5
class AXCadvisorClient(object):
def __init__(self, ip):
self._wait_interval = 60
# Using Kubernetes default cadvisor port
self._url_prefix = "http://{ip}:{port}/api/v2.0/".format(ip=ip, port=4194)
self.wait_for_cadvisor_up()
def wait_for_cadvisor_up(self):
"""
Poll cadvisor endpoint till there is a response.
Note it was calling /api/v2.0/version before, but this api in Kubernetes returns empty string
:param url:
:return:
"""
ping = None
while ping is None:
ping = requests.get(self._url_prefix, timeout=CONNECTION_TIMEOUT)
if ping is None:
logger.debug("Unable to connect to cadvisor %s. Will sleep for %s sec",
self._url_prefix, CHECK_LIVELINESS_INTERVAL)
time.sleep(CHECK_LIVELINESS_INTERVAL)
logger.info("cAdvisor client is up for endpoint %s", self._url_prefix)
def get_machine_info(self):
url = self._url_prefix + "machine"
return self._get_response(url)
def get_spec_info(self):
url = self._url_prefix + "spec"
data = {
"recursive": "true"
}
return self._get_response(url, data)
def get_events(self, event_start):
url = self._url_prefix + "events"
data = {
"all_events": "true",
"subcontainers": "true",
"start_time": event_start
}
return self._get_response(url, data)
def get_docker_stats(self):
url = self._url_prefix + "stats"
data = {
"recursive": "true",
"count": str(self._wait_interval)
}
return self._get_response(url, data)
@staticmethod
def _get_response(url, params=None):
out = None
try:
response = requests.get(url=url, params=params, timeout=CONNECTION_TIMEOUT)
if response.status_code == requests.codes.ok:
out = response.json()
except requests.exceptions.RequestException as e:
logger.error('Unexpected exception occurred during request: %s', e)
return out
|
normal
|
{
"blob_id": "87f672919f6019e549508b239c798301d5f549bd",
"index": 7667,
"step-1": "<mask token>\n\n\nclass AXCadvisorClient(object):\n\n def __init__(self, ip):\n self._wait_interval = 60\n self._url_prefix = 'http://{ip}:{port}/api/v2.0/'.format(ip=ip,\n port=4194)\n self.wait_for_cadvisor_up()\n\n def wait_for_cadvisor_up(self):\n \"\"\"\n Poll cadvisor endpoint till there is a response.\n Note it was calling /api/v2.0/version before, but this api in Kubernetes returns empty string\n :param url:\n :return:\n \"\"\"\n ping = None\n while ping is None:\n ping = requests.get(self._url_prefix, timeout=CONNECTION_TIMEOUT)\n if ping is None:\n logger.debug(\n 'Unable to connect to cadvisor %s. Will sleep for %s sec',\n self._url_prefix, CHECK_LIVELINESS_INTERVAL)\n time.sleep(CHECK_LIVELINESS_INTERVAL)\n logger.info('cAdvisor client is up for endpoint %s', self._url_prefix)\n <mask token>\n\n def get_spec_info(self):\n url = self._url_prefix + 'spec'\n data = {'recursive': 'true'}\n return self._get_response(url, data)\n\n def get_events(self, event_start):\n url = self._url_prefix + 'events'\n data = {'all_events': 'true', 'subcontainers': 'true', 'start_time':\n event_start}\n return self._get_response(url, data)\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AXCadvisorClient(object):\n\n def __init__(self, ip):\n self._wait_interval = 60\n self._url_prefix = 'http://{ip}:{port}/api/v2.0/'.format(ip=ip,\n port=4194)\n self.wait_for_cadvisor_up()\n\n def wait_for_cadvisor_up(self):\n \"\"\"\n Poll cadvisor endpoint till there is a response.\n Note it was calling /api/v2.0/version before, but this api in Kubernetes returns empty string\n :param url:\n :return:\n \"\"\"\n ping = None\n while ping is None:\n ping = requests.get(self._url_prefix, timeout=CONNECTION_TIMEOUT)\n if ping is None:\n logger.debug(\n 'Unable to connect to cadvisor %s. Will sleep for %s sec',\n self._url_prefix, CHECK_LIVELINESS_INTERVAL)\n time.sleep(CHECK_LIVELINESS_INTERVAL)\n logger.info('cAdvisor client is up for endpoint %s', self._url_prefix)\n\n def get_machine_info(self):\n url = self._url_prefix + 'machine'\n return self._get_response(url)\n\n def get_spec_info(self):\n url = self._url_prefix + 'spec'\n data = {'recursive': 'true'}\n return self._get_response(url, data)\n\n def get_events(self, event_start):\n url = self._url_prefix + 'events'\n data = {'all_events': 'true', 'subcontainers': 'true', 'start_time':\n event_start}\n return self._get_response(url, data)\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass AXCadvisorClient(object):\n\n def __init__(self, ip):\n self._wait_interval = 60\n self._url_prefix = 'http://{ip}:{port}/api/v2.0/'.format(ip=ip,\n port=4194)\n self.wait_for_cadvisor_up()\n\n def wait_for_cadvisor_up(self):\n \"\"\"\n Poll cadvisor endpoint till there is a response.\n Note it was calling /api/v2.0/version before, but this api in Kubernetes returns empty string\n :param url:\n :return:\n \"\"\"\n ping = None\n while ping is None:\n ping = requests.get(self._url_prefix, timeout=CONNECTION_TIMEOUT)\n if ping is None:\n logger.debug(\n 'Unable to connect to cadvisor %s. Will sleep for %s sec',\n self._url_prefix, CHECK_LIVELINESS_INTERVAL)\n time.sleep(CHECK_LIVELINESS_INTERVAL)\n logger.info('cAdvisor client is up for endpoint %s', self._url_prefix)\n\n def get_machine_info(self):\n url = self._url_prefix + 'machine'\n return self._get_response(url)\n\n def get_spec_info(self):\n url = self._url_prefix + 'spec'\n data = {'recursive': 'true'}\n return self._get_response(url, data)\n\n def get_events(self, event_start):\n url = self._url_prefix + 'events'\n data = {'all_events': 'true', 'subcontainers': 'true', 'start_time':\n event_start}\n return self._get_response(url, data)\n\n def get_docker_stats(self):\n url = self._url_prefix + 'stats'\n data = {'recursive': 'true', 'count': str(self._wait_interval)}\n return self._get_response(url, data)\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass AXCadvisorClient(object):\n\n def __init__(self, ip):\n self._wait_interval = 60\n self._url_prefix = 'http://{ip}:{port}/api/v2.0/'.format(ip=ip,\n port=4194)\n self.wait_for_cadvisor_up()\n\n def wait_for_cadvisor_up(self):\n \"\"\"\n Poll cadvisor endpoint till there is a response.\n Note it was calling /api/v2.0/version before, but this api in Kubernetes returns empty string\n :param url:\n :return:\n \"\"\"\n ping = None\n while ping is None:\n ping = requests.get(self._url_prefix, timeout=CONNECTION_TIMEOUT)\n if ping is None:\n logger.debug(\n 'Unable to connect to cadvisor %s. Will sleep for %s sec',\n self._url_prefix, CHECK_LIVELINESS_INTERVAL)\n time.sleep(CHECK_LIVELINESS_INTERVAL)\n logger.info('cAdvisor client is up for endpoint %s', self._url_prefix)\n\n def get_machine_info(self):\n url = self._url_prefix + 'machine'\n return self._get_response(url)\n\n def get_spec_info(self):\n url = self._url_prefix + 'spec'\n data = {'recursive': 'true'}\n return self._get_response(url, data)\n\n def get_events(self, event_start):\n url = self._url_prefix + 'events'\n data = {'all_events': 'true', 'subcontainers': 'true', 'start_time':\n event_start}\n return self._get_response(url, data)\n\n def get_docker_stats(self):\n url = self._url_prefix + 'stats'\n data = {'recursive': 'true', 'count': str(self._wait_interval)}\n return self._get_response(url, data)\n\n @staticmethod\n def _get_response(url, params=None):\n out = None\n try:\n response = requests.get(url=url, params=params, timeout=\n CONNECTION_TIMEOUT)\n if response.status_code == requests.codes.ok:\n out = response.json()\n except requests.exceptions.RequestException as e:\n logger.error('Unexpected exception occurred during request: %s', e)\n return out\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2015-2016 Applatix, Inc. All rights reserved.\n#\n\n'''\ncAdvisor CLI. Used by axstats temporarily before moving to Heapster\n'''\n\nimport requests\nimport logging\nimport time\n\nlogger = logging.getLogger(__name__)\nCHECK_LIVELINESS_INTERVAL = 5\nCONNECTION_TIMEOUT = 5\n\n\nclass AXCadvisorClient(object):\n def __init__(self, ip):\n self._wait_interval = 60\n\n # Using Kubernetes default cadvisor port\n self._url_prefix = \"http://{ip}:{port}/api/v2.0/\".format(ip=ip, port=4194)\n self.wait_for_cadvisor_up()\n\n def wait_for_cadvisor_up(self):\n \"\"\"\n Poll cadvisor endpoint till there is a response.\n Note it was calling /api/v2.0/version before, but this api in Kubernetes returns empty string\n :param url:\n :return:\n \"\"\"\n ping = None\n while ping is None:\n ping = requests.get(self._url_prefix, timeout=CONNECTION_TIMEOUT)\n if ping is None:\n logger.debug(\"Unable to connect to cadvisor %s. Will sleep for %s sec\",\n self._url_prefix, CHECK_LIVELINESS_INTERVAL)\n time.sleep(CHECK_LIVELINESS_INTERVAL)\n logger.info(\"cAdvisor client is up for endpoint %s\", self._url_prefix)\n\n def get_machine_info(self):\n url = self._url_prefix + \"machine\"\n return self._get_response(url)\n\n def get_spec_info(self):\n url = self._url_prefix + \"spec\"\n data = {\n \"recursive\": \"true\"\n }\n return self._get_response(url, data)\n\n def get_events(self, event_start):\n url = self._url_prefix + \"events\"\n data = {\n \"all_events\": \"true\",\n \"subcontainers\": \"true\",\n \"start_time\": event_start\n }\n return self._get_response(url, data)\n\n def get_docker_stats(self):\n url = self._url_prefix + \"stats\"\n data = {\n \"recursive\": \"true\",\n \"count\": str(self._wait_interval)\n }\n return self._get_response(url, data)\n\n @staticmethod\n def _get_response(url, params=None):\n out = None\n try:\n response = requests.get(url=url, params=params, timeout=CONNECTION_TIMEOUT)\n if response.status_code == requests.codes.ok:\n out = response.json()\n except requests.exceptions.RequestException as e:\n logger.error('Unexpected exception occurred during request: %s', e)\n return out\n",
"step-ids": [
5,
6,
7,
8,
11
]
}
|
[
5,
6,
7,
8,
11
] |
import pytest
import responses
from auctioneer import constants, controllers, entities
from common.http import UnExpectedResult
def test_keywordbid_rule_init(kwb_rule, account):
assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1_000_000
assert kwb_rule.get_bid_increase_percentage_display() == kwb_rule.bid_increase_percentage / 100
assert kwb_rule.get_target_bid_diff_display() == kwb_rule.target_bid_diff / 100
assert kwb_rule.account is account
assert kwb_rule.target_values == [1,2,3]
assert kwb_rule.get_target_type_display() in map(lambda t: t[1], constants.KEYWORD_BID_TARGET_TYPES)
def test_make_keywordbid_rule(kwb_rule):
kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)
assert kwb_rule == kw_bid_rule
assert kw_bid_rule.account == kwb_rule.account
not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)
assert not_found_kwb_rule is None
def test_map_keywordbid_rule(kwb_rule, account):
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
assert isinstance(kwb_ent, entities.KeywordBidRule)
assert kwb_ent.account == account.id
for f in kwb_rule._meta.fields:
if f.name in ('id', 'title') :
continue
model_attr = getattr(kwb_rule, f.name)
ent_attr = getattr(kwb_ent, f.name)
if not hasattr(model_attr, 'pk'):
try:
assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()
except AttributeError:
assert ent_attr == model_attr
else:
assert ent_attr == model_attr.id
def test_get_keyword_bids(yd_gateway, keyword_bids):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
data = keyword_bids
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=data)
mock.add(method='POST', url=url, status=404)
mock.add(method='POST', url=url, status=200, json=data)
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={"CampaignIds": []})
assert next(kwb).keyword_id == 13102117581
assert next(kwb).keyword_id == 13102117582
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={"CampaignIds": []})
with pytest.raises(UnExpectedResult):
next(kwb)
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={"CampaignIds": []})
assert type(next(kwb).as_dict()) is dict
def test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result']['KeywordBids'])
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)
response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)
assert len(list(response)) == 1514
def test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids, keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=keyword_bids)
mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)
mock.add(method='POST', url=url, status=200, json={'error': {'error_code': 0000, 'error_message': 'oops!'}})
result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,
selection_criteria={"CampaignIds": []})
assert len(result) == 1514
with pytest.raises(UnExpectedResult):
controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,
selection_criteria={"CampaignIds": []})
|
normal
|
{
"blob_id": "e0435b0b34fc011e7330ab8882865131f7f78882",
"index": 922,
"step-1": "<mask token>\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000\n assert kwb_rule.get_bid_increase_percentage_display(\n ) == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display(\n ) == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1, 2, 3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1],\n constants.KEYWORD_BID_TARGET_TYPES)\n\n\n<mask token>\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title'):\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\n<mask token>\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"step-2": "<mask token>\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000\n assert kwb_rule.get_bid_increase_percentage_display(\n ) == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display(\n ) == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1, 2, 3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1],\n constants.KEYWORD_BID_TARGET_TYPES)\n\n\n<mask token>\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title'):\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\n<mask token>\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"step-3": "<mask token>\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000\n assert kwb_rule.get_bid_increase_percentage_display(\n ) == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display(\n ) == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1, 2, 3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1],\n constants.KEYWORD_BID_TARGET_TYPES)\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title'):\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"step-4": "import pytest\nimport responses\nfrom auctioneer import constants, controllers, entities\nfrom common.http import UnExpectedResult\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000\n assert kwb_rule.get_bid_increase_percentage_display(\n ) == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display(\n ) == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1, 2, 3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1],\n constants.KEYWORD_BID_TARGET_TYPES)\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title'):\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"step-5": "import pytest\nimport responses\n\nfrom auctioneer import constants, controllers, entities\nfrom common.http import UnExpectedResult\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1_000_000\n assert kwb_rule.get_bid_increase_percentage_display() == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display() == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1,2,3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1], constants.KEYWORD_BID_TARGET_TYPES)\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title') :\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={\"CampaignIds\": []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={\"CampaignIds\": []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={\"CampaignIds\": []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result']['KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {'error_code': 0000, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,\n selection_criteria={\"CampaignIds\": []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,\n selection_criteria={\"CampaignIds\": []})\n\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from nemo_text_processing.text_normalization.en.graph_utils import (
NEMO_NOT_QUOTE,
NEMO_SIGMA,
NEMO_SPACE,
GraphFst,
delete_preserve_order,
)
from nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope
from nemo_text_processing.text_normalization.es.utils import get_abs_path
try:
import pynini
from pynini.lib import pynutil
fem = pynini.string_file((get_abs_path("data/money/currency_plural_fem.tsv")))
masc = pynini.string_file((get_abs_path("data/money/currency_plural_masc.tsv")))
fem_singular = pynini.project(fem, "input")
masc_singular = pynini.project(masc, "input")
fem_plural = pynini.project(fem, "output")
masc_plural = pynini.project(masc, "output")
PYNINI_AVAILABLE = True
except (ModuleNotFoundError, ImportError):
fem_plural = None
masc_plural = None
fem_singular = None
masc_singular = None
PYNINI_AVAILABLE = False
class MoneyFst(GraphFst):
"""
Finite state transducer for verbalizing money, e.g.
money { currency_maj: "euro" integer_part: "un"} -> "un euro"
money { currency_maj: "euro" integer_part: "un" fractional_part: "cero cero un"} -> "uno coma cero cero uno euros"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" preserve_order: true} -> "una libra cuarenta"
money { integer_part: "un" currency_maj: "libra" fractional_part: "cuarenta" currency_min: "peniques" preserve_order: true} -> "una libra con cuarenta peniques"
money { fractional_part: "un" currency_min: "penique" preserve_order: true} -> "un penique"
Args:
decimal: GraphFst
deterministic: if True will provide a single transduction option,
for False multiple transduction are generated (used for audio-based normalization)
"""
def __init__(self, decimal: GraphFst, deterministic: bool = True):
super().__init__(name="money", kind="verbalize", deterministic=deterministic)
maj_singular_masc = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)
+ pynutil.delete("\"")
)
maj_singular_fem = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)
+ pynutil.delete("\"")
)
maj_plural_masc = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)
+ pynutil.delete("\"")
)
maj_plural_fem = (
pynutil.delete("currency_maj: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)
+ pynutil.delete("\"")
)
maj_masc = maj_plural_masc | maj_singular_masc # Tagger kept quantity resolution stable
maj_fem = maj_plural_fem | maj_singular_fem
min_singular_masc = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)
+ pynutil.delete("\"")
)
min_singular_fem = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)
+ pynutil.delete("\"")
)
min_plural_masc = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)
+ pynutil.delete("\"")
)
min_plural_fem = (
pynutil.delete("currency_min: \"")
+ (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)
+ pynutil.delete("\"")
)
min_masc = min_plural_masc | min_singular_masc
min_fem = min_plural_fem | min_singular_fem
fractional_part = (
pynutil.delete("fractional_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
)
integer_part = pynutil.delete("integer_part: \"") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete("\"")
optional_add_and = pynini.closure(pynutil.insert(pynini.union("con ", "y ")), 0, 1)
# *** currency_maj
graph_integer_masc = integer_part + NEMO_SPACE + maj_masc
graph_integer_fem = shift_cardinal_gender(integer_part) + NEMO_SPACE + maj_fem
graph_integer = graph_integer_fem | graph_integer_masc
# *** currency_maj + (***) | ((con) *** current_min)
graph_integer_with_minor_masc = (
graph_integer_masc
+ NEMO_SPACE
+ pynini.union(
optional_add_and + strip_cardinal_apocope(fractional_part),
(optional_add_and + fractional_part + NEMO_SPACE + min_masc),
(optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),
) # Could be minor currency that is different gender
+ delete_preserve_order
)
graph_integer_with_minor_fem = (
graph_integer_fem
+ NEMO_SPACE
+ pynini.union(
optional_add_and + shift_cardinal_gender(fractional_part),
(optional_add_and + fractional_part + NEMO_SPACE + min_masc),
(optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),
) # Could be minor currency that is different gender
+ delete_preserve_order
)
graph_integer_with_minor = graph_integer_with_minor_fem | graph_integer_with_minor_masc
## *** coma *** currency_maj
graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc
graph_decimal_fem = decimal.graph_fem
graph_decimal_fem |= decimal.numbers_only_quantity # can still have "x billions" with fem currency
graph_decimal_fem += NEMO_SPACE + maj_fem
graph_decimal = graph_decimal_fem | graph_decimal_masc
graph_decimal = (
pynini.cdrewrite(
pynutil.insert(" de"), "quantity: \"" + pynini.closure(NEMO_NOT_QUOTE, 1), "\"", NEMO_SIGMA
)
@ graph_decimal
) # formally it's millones/billones de ***
# *** current_min
graph_minor_masc = fractional_part + NEMO_SPACE + min_masc + delete_preserve_order
graph_minor_fem = shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem + delete_preserve_order
graph_minor = graph_minor_fem | graph_minor_masc
graph = graph_integer | graph_integer_with_minor | graph_decimal | graph_minor
delete_tokens = self.delete_tokens(graph)
self.fst = delete_tokens.optimize()
|
normal
|
{
"blob_id": "dccdca65cce2959b07657636e23e7c9ab8a4f96c",
"index": 1382,
"step-1": "<mask token>\n\n\nclass MoneyFst(GraphFst):\n <mask token>\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n",
"step-2": "<mask token>\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n",
"step-3": "<mask token>\ntry:\n import pynini\n from pynini.lib import pynutil\n fem = pynini.string_file(get_abs_path('data/money/currency_plural_fem.tsv')\n )\n masc = pynini.string_file(get_abs_path(\n 'data/money/currency_plural_masc.tsv'))\n fem_singular = pynini.project(fem, 'input')\n masc_singular = pynini.project(masc, 'input')\n fem_plural = pynini.project(fem, 'output')\n masc_plural = pynini.project(masc, 'output')\n PYNINI_AVAILABLE = True\nexcept (ModuleNotFoundError, ImportError):\n fem_plural = None\n masc_plural = None\n fem_singular = None\n masc_singular = None\n PYNINI_AVAILABLE = False\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n",
"step-4": "from nemo_text_processing.text_normalization.en.graph_utils import NEMO_NOT_QUOTE, NEMO_SIGMA, NEMO_SPACE, GraphFst, delete_preserve_order\nfrom nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope\nfrom nemo_text_processing.text_normalization.es.utils import get_abs_path\ntry:\n import pynini\n from pynini.lib import pynutil\n fem = pynini.string_file(get_abs_path('data/money/currency_plural_fem.tsv')\n )\n masc = pynini.string_file(get_abs_path(\n 'data/money/currency_plural_masc.tsv'))\n fem_singular = pynini.project(fem, 'input')\n masc_singular = pynini.project(masc, 'input')\n fem_plural = pynini.project(fem, 'output')\n masc_plural = pynini.project(masc, 'output')\n PYNINI_AVAILABLE = True\nexcept (ModuleNotFoundError, ImportError):\n fem_plural = None\n masc_plural = None\n fem_singular = None\n masc_singular = None\n PYNINI_AVAILABLE = False\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool=True):\n super().__init__(name='money', kind='verbalize', deterministic=\n deterministic)\n maj_singular_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n maj_singular_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n maj_plural_masc = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n maj_plural_fem = pynutil.delete('currency_maj: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n maj_masc = maj_plural_masc | maj_singular_masc\n maj_fem = maj_plural_fem | maj_singular_fem\n min_singular_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_singular + pynutil.delete('\"')\n min_singular_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_singular + pynutil.delete('\"')\n min_plural_masc = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ masc_plural + pynutil.delete('\"')\n min_plural_fem = pynutil.delete('currency_min: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) @ fem_plural + pynutil.delete('\"')\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n fractional_part = pynutil.delete('fractional_part: \"'\n ) + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n integer_part = pynutil.delete('integer_part: \"') + pynini.closure(\n NEMO_NOT_QUOTE, 1) + pynutil.delete('\"')\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\n 'con ', 'y ')), 0, 1)\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part\n ) + NEMO_SPACE + maj_fem\n graph_integer = graph_integer_fem | graph_integer_masc\n graph_integer_with_minor_masc = (graph_integer_masc + NEMO_SPACE +\n pynini.union(optional_add_and + strip_cardinal_apocope(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor_fem = (graph_integer_fem + NEMO_SPACE +\n pynini.union(optional_add_and + shift_cardinal_gender(\n fractional_part), optional_add_and + fractional_part +\n NEMO_SPACE + min_masc, optional_add_and + shift_cardinal_gender\n (fractional_part) + NEMO_SPACE + min_fem) + delete_preserve_order)\n graph_integer_with_minor = (graph_integer_with_minor_fem |\n graph_integer_with_minor_masc)\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity\n graph_decimal_fem += NEMO_SPACE + maj_fem\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = pynini.cdrewrite(pynutil.insert(' de'), \n 'quantity: \"' + pynini.closure(NEMO_NOT_QUOTE, 1), '\"', NEMO_SIGMA\n ) @ graph_decimal\n graph_minor_masc = (fractional_part + NEMO_SPACE + min_masc +\n delete_preserve_order)\n graph_minor_fem = shift_cardinal_gender(fractional_part\n ) + NEMO_SPACE + min_fem + delete_preserve_order\n graph_minor = graph_minor_fem | graph_minor_masc\n graph = (graph_integer | graph_integer_with_minor | graph_decimal |\n graph_minor)\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n",
"step-5": "# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom nemo_text_processing.text_normalization.en.graph_utils import (\n NEMO_NOT_QUOTE,\n NEMO_SIGMA,\n NEMO_SPACE,\n GraphFst,\n delete_preserve_order,\n)\nfrom nemo_text_processing.text_normalization.es.graph_utils import shift_cardinal_gender, strip_cardinal_apocope\nfrom nemo_text_processing.text_normalization.es.utils import get_abs_path\n\ntry:\n import pynini\n from pynini.lib import pynutil\n\n fem = pynini.string_file((get_abs_path(\"data/money/currency_plural_fem.tsv\")))\n masc = pynini.string_file((get_abs_path(\"data/money/currency_plural_masc.tsv\")))\n\n fem_singular = pynini.project(fem, \"input\")\n masc_singular = pynini.project(masc, \"input\")\n\n fem_plural = pynini.project(fem, \"output\")\n masc_plural = pynini.project(masc, \"output\")\n\n PYNINI_AVAILABLE = True\n\nexcept (ModuleNotFoundError, ImportError):\n fem_plural = None\n masc_plural = None\n\n fem_singular = None\n masc_singular = None\n\n PYNINI_AVAILABLE = False\n\n\nclass MoneyFst(GraphFst):\n \"\"\"\n Finite state transducer for verbalizing money, e.g.\n money { currency_maj: \"euro\" integer_part: \"un\"} -> \"un euro\"\n money { currency_maj: \"euro\" integer_part: \"un\" fractional_part: \"cero cero un\"} -> \"uno coma cero cero uno euros\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" preserve_order: true} -> \"una libra cuarenta\"\n money { integer_part: \"un\" currency_maj: \"libra\" fractional_part: \"cuarenta\" currency_min: \"peniques\" preserve_order: true} -> \"una libra con cuarenta peniques\"\n money { fractional_part: \"un\" currency_min: \"penique\" preserve_order: true} -> \"un penique\"\n\n Args:\n decimal: GraphFst\n deterministic: if True will provide a single transduction option,\n for False multiple transduction are generated (used for audio-based normalization)\n \"\"\"\n\n def __init__(self, decimal: GraphFst, deterministic: bool = True):\n super().__init__(name=\"money\", kind=\"verbalize\", deterministic=deterministic)\n\n maj_singular_masc = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)\n + pynutil.delete(\"\\\"\")\n )\n maj_singular_fem = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)\n + pynutil.delete(\"\\\"\")\n )\n\n maj_plural_masc = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)\n + pynutil.delete(\"\\\"\")\n )\n maj_plural_fem = (\n pynutil.delete(\"currency_maj: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)\n + pynutil.delete(\"\\\"\")\n )\n\n maj_masc = maj_plural_masc | maj_singular_masc # Tagger kept quantity resolution stable\n maj_fem = maj_plural_fem | maj_singular_fem\n\n min_singular_masc = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_singular)\n + pynutil.delete(\"\\\"\")\n )\n min_singular_fem = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_singular)\n + pynutil.delete(\"\\\"\")\n )\n\n min_plural_masc = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ masc_plural)\n + pynutil.delete(\"\\\"\")\n )\n min_plural_fem = (\n pynutil.delete(\"currency_min: \\\"\")\n + (pynini.closure(NEMO_NOT_QUOTE, 1) @ fem_plural)\n + pynutil.delete(\"\\\"\")\n )\n\n min_masc = min_plural_masc | min_singular_masc\n min_fem = min_plural_fem | min_singular_fem\n\n fractional_part = (\n pynutil.delete(\"fractional_part: \\\"\") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete(\"\\\"\")\n )\n\n integer_part = pynutil.delete(\"integer_part: \\\"\") + pynini.closure(NEMO_NOT_QUOTE, 1) + pynutil.delete(\"\\\"\")\n optional_add_and = pynini.closure(pynutil.insert(pynini.union(\"con \", \"y \")), 0, 1)\n\n # *** currency_maj\n graph_integer_masc = integer_part + NEMO_SPACE + maj_masc\n graph_integer_fem = shift_cardinal_gender(integer_part) + NEMO_SPACE + maj_fem\n\n graph_integer = graph_integer_fem | graph_integer_masc\n\n # *** currency_maj + (***) | ((con) *** current_min)\n graph_integer_with_minor_masc = (\n graph_integer_masc\n + NEMO_SPACE\n + pynini.union(\n optional_add_and + strip_cardinal_apocope(fractional_part),\n (optional_add_and + fractional_part + NEMO_SPACE + min_masc),\n (optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),\n ) # Could be minor currency that is different gender\n + delete_preserve_order\n )\n\n graph_integer_with_minor_fem = (\n graph_integer_fem\n + NEMO_SPACE\n + pynini.union(\n optional_add_and + shift_cardinal_gender(fractional_part),\n (optional_add_and + fractional_part + NEMO_SPACE + min_masc),\n (optional_add_and + shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem),\n ) # Could be minor currency that is different gender\n + delete_preserve_order\n )\n\n graph_integer_with_minor = graph_integer_with_minor_fem | graph_integer_with_minor_masc\n\n ## *** coma *** currency_maj\n graph_decimal_masc = decimal.graph_masc + NEMO_SPACE + maj_masc\n\n graph_decimal_fem = decimal.graph_fem\n graph_decimal_fem |= decimal.numbers_only_quantity # can still have \"x billions\" with fem currency\n graph_decimal_fem += NEMO_SPACE + maj_fem\n\n graph_decimal = graph_decimal_fem | graph_decimal_masc\n graph_decimal = (\n pynini.cdrewrite(\n pynutil.insert(\" de\"), \"quantity: \\\"\" + pynini.closure(NEMO_NOT_QUOTE, 1), \"\\\"\", NEMO_SIGMA\n )\n @ graph_decimal\n ) # formally it's millones/billones de ***\n\n # *** current_min\n graph_minor_masc = fractional_part + NEMO_SPACE + min_masc + delete_preserve_order\n graph_minor_fem = shift_cardinal_gender(fractional_part) + NEMO_SPACE + min_fem + delete_preserve_order\n\n graph_minor = graph_minor_fem | graph_minor_masc\n\n graph = graph_integer | graph_integer_with_minor | graph_decimal | graph_minor\n\n delete_tokens = self.delete_tokens(graph)\n self.fst = delete_tokens.optimize()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
'''
Author: ulysses
Date: 1970-01-01 08:00:00
LastEditTime: 2020-08-03 15:44:57
LastEditors: Please set LastEditors
Description:
'''
from pyspark.sql import SparkSession
from pyspark.sql.functions import split, explode
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName('StructedSocketWordCount')\
.master('local[4]')\
.getOrCreate()
sc =spark.sparkContext
sc.setLogLevel('WARN')
# 从socket源读取stream
lines = spark\
.readStream\
.format('socket')\
.option('host', 'localhost')\
.option('port', 9999)\
.load()
words = lines.select(
explode(
split(lines.value, ' ') # 空格拆开
).alias('word') # 将一行列表 打开 一列数据
)
# word , count
wordcounts = words.groupBy('word').count()
# 输出
query = wordcounts\
.writeStream\
.outputMode('complete')\
.format('console')\
.trigger(processingTime="8 seconds")\
.start()
query.awaitTermination()
|
normal
|
{
"blob_id": "991260c268d53fbe73e9bff9990ac536ed802d7a",
"index": 6887,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n spark = SparkSession.builder.appName('StructedSocketWordCount').master(\n 'local[4]').getOrCreate()\n sc = spark.sparkContext\n sc.setLogLevel('WARN')\n lines = spark.readStream.format('socket').option('host', 'localhost'\n ).option('port', 9999).load()\n words = lines.select(explode(split(lines.value, ' ')).alias('word'))\n wordcounts = words.groupBy('word').count()\n query = wordcounts.writeStream.outputMode('complete').format('console'\n ).trigger(processingTime='8 seconds').start()\n query.awaitTermination()\n",
"step-3": "<mask token>\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import split, explode\nif __name__ == '__main__':\n spark = SparkSession.builder.appName('StructedSocketWordCount').master(\n 'local[4]').getOrCreate()\n sc = spark.sparkContext\n sc.setLogLevel('WARN')\n lines = spark.readStream.format('socket').option('host', 'localhost'\n ).option('port', 9999).load()\n words = lines.select(explode(split(lines.value, ' ')).alias('word'))\n wordcounts = words.groupBy('word').count()\n query = wordcounts.writeStream.outputMode('complete').format('console'\n ).trigger(processingTime='8 seconds').start()\n query.awaitTermination()\n",
"step-4": "'''\nAuthor: ulysses\nDate: 1970-01-01 08:00:00\nLastEditTime: 2020-08-03 15:44:57\nLastEditors: Please set LastEditors\nDescription: \n'''\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import split, explode\n\n\nif __name__ == \"__main__\":\n spark = SparkSession\\\n .builder\\\n .appName('StructedSocketWordCount')\\\n .master('local[4]')\\\n .getOrCreate()\n \n sc =spark.sparkContext\n sc.setLogLevel('WARN')\n\n # 从socket源读取stream\n lines = spark\\\n .readStream\\\n .format('socket')\\\n .option('host', 'localhost')\\\n .option('port', 9999)\\\n .load()\n \n words = lines.select(\n explode(\n split(lines.value, ' ') # 空格拆开\n ).alias('word') # 将一行列表 打开 一列数据\n )\n # word , count\n wordcounts = words.groupBy('word').count()\n \n # 输出\n query = wordcounts\\\n .writeStream\\\n .outputMode('complete')\\\n .format('console')\\\n .trigger(processingTime=\"8 seconds\")\\\n .start()\n \n query.awaitTermination()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
user_schema = {
'id': {
'type': 'string',
'required': True,
'coerce': (str, lambda x: x.lower())
},
'latitude':{
'type': 'float',
'required': True,
'min': -60.0,
'max': 10,
'coerce': (float, lambda x: round(x, 5))
},
'longitude':{
'type': 'float',
'required': True,
'min': -80.0,
'max': -30.0,
'coerce': (float, lambda x: round(x, 5))
},
'radius':{
'type': 'float',
'default': 10,
'max': 30.0,
'min': 0.1,
'coerce': (float, lambda x: round(x, 1))
},
'variables':{
'type': 'list',
'default': ['lightning','precipitation'],
'allowed': [
'lightning',
'precipitation'
]
}
}
create_schema = {
'payload':{
'oneof':[
{
'type': 'list',
'schema':{
'type': 'dict',
'schema': user_schema
}
},
{
'type': 'dict',
'schema': user_schema
}
]
}
}
batch_create_schema = {
'payload':{
'type': 'list',
'schema':{
'type': 'dict',
'schema': user_schema
}
}
}
payload_schema = {
'payload':{
'type': 'dict',
'schema': user_schema
}
}
# Schema of AWS event
event_schema = {
'pathParameters':{
'type': 'dict',
'default': {},
'schema':{
'uid':{
'type': 'string',
'required': True,
},
}
}
}
|
normal
|
{
"blob_id": "bf41ab20b9fae9f19efdc58852e48d9b735f34c3",
"index": 1645,
"step-1": "<mask token>\n",
"step-2": "user_schema = {'id': {'type': 'string', 'required': True, 'coerce': (str, \n lambda x: x.lower())}, 'latitude': {'type': 'float', 'required': True,\n 'min': -60.0, 'max': 10, 'coerce': (float, lambda x: round(x, 5))},\n 'longitude': {'type': 'float', 'required': True, 'min': -80.0, 'max': -\n 30.0, 'coerce': (float, lambda x: round(x, 5))}, 'radius': {'type':\n 'float', 'default': 10, 'max': 30.0, 'min': 0.1, 'coerce': (float, lambda\n x: round(x, 1))}, 'variables': {'type': 'list', 'default': ['lightning',\n 'precipitation'], 'allowed': ['lightning', 'precipitation']}}\ncreate_schema = {'payload': {'oneof': [{'type': 'list', 'schema': {'type':\n 'dict', 'schema': user_schema}}, {'type': 'dict', 'schema': user_schema}]}}\nbatch_create_schema = {'payload': {'type': 'list', 'schema': {'type':\n 'dict', 'schema': user_schema}}}\npayload_schema = {'payload': {'type': 'dict', 'schema': user_schema}}\nevent_schema = {'pathParameters': {'type': 'dict', 'default': {}, 'schema':\n {'uid': {'type': 'string', 'required': True}}}}\n",
"step-3": "user_schema = { \n 'id': {\n 'type': 'string',\n 'required': True,\n 'coerce': (str, lambda x: x.lower())\n },\n 'latitude':{\n 'type': 'float',\n 'required': True,\n 'min': -60.0,\n 'max': 10,\n 'coerce': (float, lambda x: round(x, 5))\n },\n 'longitude':{\n 'type': 'float',\n 'required': True,\n 'min': -80.0,\n 'max': -30.0,\n 'coerce': (float, lambda x: round(x, 5))\n },\n 'radius':{\n 'type': 'float',\n 'default': 10,\n 'max': 30.0,\n 'min': 0.1,\n 'coerce': (float, lambda x: round(x, 1))\n },\n 'variables':{\n 'type': 'list',\n 'default': ['lightning','precipitation'],\n 'allowed': [\n 'lightning',\n 'precipitation'\n ]\n } \n}\n\ncreate_schema = {\n 'payload':{\n 'oneof':[\n {\n 'type': 'list',\n 'schema':{\n 'type': 'dict',\n 'schema': user_schema\n }\n },\n {\n 'type': 'dict',\n 'schema': user_schema\n \n }\n\n ]\n }\n}\n\nbatch_create_schema = {\n 'payload':{\n 'type': 'list',\n 'schema':{\n 'type': 'dict',\n 'schema': user_schema\n }\n }\n}\n\npayload_schema = {\n 'payload':{\n 'type': 'dict',\n 'schema': user_schema\n }\n}\n\n# Schema of AWS event\nevent_schema = {\n 'pathParameters':{\n 'type': 'dict',\n 'default': {},\n 'schema':{\n 'uid':{\n 'type': 'string',\n 'required': True,\n },\n }\n }\n}",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import copy
import numpy as np
from PySide2.QtCore import QItemSelectionModel, QObject, Signal
from PySide2.QtWidgets import (
QComboBox, QLineEdit, QSizePolicy, QTableWidgetItem
)
from hexrd.constants import chargestate
from hexrd.material import Material
from hexrd.ui.periodic_table_dialog import PeriodicTableDialog
from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox
from hexrd.ui.thermal_factor_editor import ThermalFactorEditor
from hexrd.ui.ui_loader import UiLoader
from hexrd.ui.utils import block_signals
COLUMNS = {
'symbol': 0,
'charge': 1,
'occupancy': 2,
'thermal_factor': 3
}
DEFAULT_CHARGE = '0'
DEFAULT_U = Material.DFLT_U[0]
OCCUPATION_MIN = 0
OCCUPATION_MAX = 1
THERMAL_FACTOR_MIN = -1.e7
THERMAL_FACTOR_MAX = 1.e7
U_TO_B = 8 * np.pi ** 2
B_TO_U = 1 / U_TO_B
class MaterialSiteEditor(QObject):
site_modified = Signal()
def __init__(self, site, parent=None):
super().__init__(parent)
loader = UiLoader()
self.ui = loader.load_file('material_site_editor.ui', parent)
self._site = site
self.charge_comboboxes = []
self.occupancy_spinboxes = []
self.thermal_factor_spinboxes = []
self.update_gui()
self.setup_connections()
def setup_connections(self):
self.ui.select_atom_types.pressed.connect(self.select_atom_types)
self.ui.thermal_factor_type.currentIndexChanged.connect(
self.thermal_factor_type_changed)
for w in self.site_settings_widgets:
w.valueChanged.connect(self.update_config)
self.ui.table.selectionModel().selectionChanged.connect(
self.selection_changed)
self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)
self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)
def select_atom_types(self):
dialog = PeriodicTableDialog(self.atom_types, self.ui)
if not dialog.exec_():
return
self.atom_types = dialog.selected_atoms
@property
def site(self):
return self._site
@site.setter
def site(self, v):
self._site = v
self.update_gui()
@property
def atoms(self):
return self.site['atoms']
@property
def total_occupancy(self):
return sum(x['occupancy'] for x in self.atoms)
@property
def fractional_coords(self):
return self.site['fractional_coords']
@property
def thermal_factor_type(self):
return self.ui.thermal_factor_type.currentText()
def U(self, val):
# Take a thermal factor from a spin box and convert it to U
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = B_TO_U
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def B(self, val):
# Take a thermal factor from a spin box and convert it to B
type = self.thermal_factor_type
if type == 'U':
multiplier = U_TO_B
elif type == 'B':
multiplier = 1
else:
raise Exception(f'Unknown type: {type}')
return val * multiplier
def thermal_factor(self, atom):
# Given an atom, return the thermal factor in either B or U
type = self.thermal_factor_type
if type == 'U':
multiplier = 1
elif type == 'B':
multiplier = U_TO_B
else:
raise Exception(f'Unknown type: {type}')
return atom['U'] * multiplier
@property
def atom_types(self):
return [x['symbol'] for x in self.site['atoms']]
@atom_types.setter
def atom_types(self, v):
if v == self.atom_types:
# No changes needed...
return
# Reset all the occupancies
atoms = self.atoms
previous_u_values = {x['symbol']: x['U'] for x in atoms}
previous_charges = {x['symbol']: x['charge'] for x in atoms}
atoms.clear()
for symbol in v:
# Use previous values if available. Otherwise, use the defaults.
atom = {
'symbol': symbol,
'U': previous_u_values.get(symbol, DEFAULT_U),
'charge': previous_charges.get(symbol, DEFAULT_CHARGE),
}
atoms.append(atom)
self.reset_occupancies()
self.update_table()
self.emit_site_modified_if_valid()
@property
def num_rows(self):
return self.ui.table.rowCount()
@property
def selected_row(self):
selected = self.ui.table.selectionModel().selectedRows()
return selected[0].row() if selected else None
def select_row(self, i):
if i is None or i >= self.num_rows:
# Out of range. Don't do anything.
return
# Select the row
selection_model = self.ui.table.selectionModel()
selection_model.clearSelection()
model_index = selection_model.model().index(i, 0)
command = QItemSelectionModel.Select | QItemSelectionModel.Rows
selection_model.select(model_index, command)
def selection_changed(self):
self.update_enable_states()
def update_enable_states(self):
enable_remove = self.num_rows > 1 and self.selected_row is not None
self.ui.remove_atom_type.setEnabled(enable_remove)
def remove_selected_atom(self):
if self.selected_row is None:
return
atom_types = self.atom_types
del atom_types[self.selected_row]
self.atom_types = atom_types
def create_symbol_label(self, v):
w = QTableWidgetItem(v)
return w
def create_charge_combobox(self, charge, symbol):
cb = QComboBox(self.ui.table)
if charge not in chargestate[symbol]:
raise Exception(f'Invalid charge {charge} for {symbol}')
cb.addItems(chargestate[symbol])
cb.setCurrentText(charge)
cb.currentIndexChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
cb.setSizePolicy(size_policy)
self.charge_comboboxes.append(cb)
return cb
def create_occupancy_spinbox(self, v):
sb = ScientificDoubleSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(OCCUPATION_MIN)
sb.setMaximum(OCCUPATION_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.occupancy_spinboxes.append(sb)
return sb
def create_thermal_factor_spinbox(self, v):
sb = ThermalFactorSpinBox(self.ui.table)
sb.setKeyboardTracking(False)
sb.setMinimum(THERMAL_FACTOR_MIN)
sb.setMaximum(THERMAL_FACTOR_MAX)
sb.setValue(v)
sb.valueChanged.connect(self.update_config)
sb.setToolTip('Double-click to open tensor editor')
size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
sb.setSizePolicy(size_policy)
self.thermal_factor_spinboxes.append(sb)
return sb
def clear_table(self):
self.charge_comboboxes.clear()
self.occupancy_spinboxes.clear()
self.thermal_factor_spinboxes.clear()
self.ui.table.clearContents()
def update_gui(self):
with block_signals(*self.site_settings_widgets):
for i, w in enumerate(self.fractional_coords_widgets):
w.setValue(self.fractional_coords[i])
self.update_total_occupancy()
self.update_table()
self.reset_scalar_tensor_toggle()
def reset_scalar_tensor_toggle(self):
any_scalars = any(not isinstance(w.value(), np.ndarray)
for w in self.thermal_factor_spinboxes)
with block_signals(self.ui.convert_u_to_tensors):
self.ui.convert_u_to_tensors.setChecked(not any_scalars)
def update_table(self):
prev_selected = self.selected_row
block_list = [
self.ui.table,
self.ui.table.selectionModel()
]
with block_signals(*block_list):
atoms = self.site['atoms']
self.clear_table()
self.ui.table.setRowCount(len(atoms))
for i, atom in enumerate(atoms):
w = self.create_symbol_label(atom['symbol'])
self.ui.table.setItem(i, COLUMNS['symbol'], w)
w = self.create_charge_combobox(atom['charge'], atom['symbol'])
self.ui.table.setCellWidget(i, COLUMNS['charge'], w)
w = self.create_occupancy_spinbox(atom['occupancy'])
self.ui.table.setCellWidget(i, COLUMNS['occupancy'], w)
v = self.thermal_factor(atom)
w = self.create_thermal_factor_spinbox(v)
self.ui.table.setCellWidget(i, COLUMNS['thermal_factor'], w)
self.update_occupancy_validity()
if prev_selected is not None:
select_row = (prev_selected if prev_selected < self.num_rows
else self.num_rows - 1)
self.select_row(select_row)
# Just in case the selection actually changed...
self.selection_changed()
def thermal_factor_type_changed(self):
self.update_thermal_factor_header()
self.update_table()
# Update the text for the tensor toggle as well
text = f'Convert {self.thermal_factor_type} to tensors'
self.ui.convert_u_to_tensors.setText(text)
def update_thermal_factor_header(self):
w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])
w.setText(self.thermal_factor_type)
def update_config(self):
for i, w in enumerate(self.fractional_coords_widgets):
self.fractional_coords[i] = w.value()
for atom, combobox in zip(self.atoms, self.charge_comboboxes):
atom['charge'] = combobox.currentText()
for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):
atom['occupancy'] = spinbox.value()
for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):
atom['U'] = self.U(spinbox.value())
self.update_total_occupancy()
self.update_occupancy_validity()
self.emit_site_modified_if_valid()
def update_total_occupancy(self):
self.ui.total_occupancy.setValue(self.total_occupancy)
def reset_occupancies(self):
total = 1.0
atoms = self.atoms
num_atoms = len(atoms)
for atom in atoms:
atom['occupancy'] = total / num_atoms
self.update_total_occupancy()
self.update_occupancy_validity()
@property
def site_valid(self):
return self.occupancies_valid
@property
def occupancies_valid(self):
return self.total_occupancy <= 1.0
def update_occupancy_validity(self):
valid = self.occupancies_valid
color = 'white' if valid else 'red'
msg = '' if valid else 'Sum of occupancies must be <= 1'
self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')
self.ui.total_occupancy.setToolTip(msg)
def emit_site_modified_if_valid(self):
if not self.site_valid:
return
self.site_modified.emit()
@property
def fractional_coords_widgets(self):
return [
self.ui.coords_x,
self.ui.coords_y,
self.ui.coords_z
]
@property
def site_settings_widgets(self):
return self.fractional_coords_widgets
def convert_u_to_tensors(self, b):
def scalar_to_tensor(spinbox):
if isinstance(spinbox.value(), np.ndarray):
# Already a tensor
return
tensor = np.zeros(6, dtype=np.float64)
tensor[:3] = spinbox.value()
spinbox.setValue(tensor)
def tensor_to_scalar(spinbox):
value = spinbox.value()
if not isinstance(value, np.ndarray):
# Already a scalar
return
# Use the previous spinbox value if available
scalar = spinbox.editor.ui.scalar_value.value()
if (np.isclose(scalar, 0) and np.allclose(value[:3], value[0]) and
np.allclose(value[3:], 0)):
# If the previous value is zero, and the tensor is diagonal,
# use the diagonal value
scalar = value[0]
spinbox.setValue(scalar)
f = scalar_to_tensor if b else tensor_to_scalar
for spinbox in self.thermal_factor_spinboxes:
f(spinbox)
class ThermalFactorSpinBox(ScientificDoubleSpinBox):
def __init__(self, parent=None):
super().__init__(parent)
self.editor = ThermalFactorEditor(0, parent)
self.setLineEdit(ThermalFactorLineEdit(self, self))
self.valueChanged.connect(self.update_editor_value)
def value(self):
return self.editor.value
def setValue(self, v):
self.editor.value = v
if self.editor.is_tensor:
# Force an update
super().setValue(super().value())
self.valueChanged.emit(super().value())
self.setReadOnly(True)
else:
super().setValue(v)
self.valueChanged.emit(v)
self.setReadOnly(False)
def update_editor_value(self):
if not self.editor.is_tensor:
self.editor.value = super().value()
def textFromValue(self, value):
if not hasattr(self, 'editor') or not self.editor.is_tensor:
return super().textFromValue(value)
return 'Tensor'
def open_editor(self):
original = copy.deepcopy(self.editor.value)
if not self.editor.exec_():
self.editor.value = original
return
self.setValue(self.editor.value)
class ThermalFactorLineEdit(QLineEdit):
def __init__(self, spinbox, parent=None):
super().__init__(parent)
self.spinbox = spinbox
def mousePressEvent(self, event):
if self.isReadOnly():
self.open_editor()
return
super().mousePressEvent(event)
def mouseDoubleClickEvent(self, event):
self.open_editor()
def open_editor(self):
self.spinbox.open_editor()
|
normal
|
{
"blob_id": "ec2be72f81d260c491cdc31b68b34401fb49b91e",
"index": 2660,
"step-1": "<mask token>\n\n\nclass MaterialSiteEditor(QObject):\n <mask token>\n\n def __init__(self, site, parent=None):\n super().__init__(parent)\n loader = UiLoader()\n self.ui = loader.load_file('material_site_editor.ui', parent)\n self._site = site\n self.charge_comboboxes = []\n self.occupancy_spinboxes = []\n self.thermal_factor_spinboxes = []\n self.update_gui()\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.select_atom_types.pressed.connect(self.select_atom_types)\n self.ui.thermal_factor_type.currentIndexChanged.connect(self.\n thermal_factor_type_changed)\n for w in self.site_settings_widgets:\n w.valueChanged.connect(self.update_config)\n self.ui.table.selectionModel().selectionChanged.connect(self.\n selection_changed)\n self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)\n self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)\n\n def select_atom_types(self):\n dialog = PeriodicTableDialog(self.atom_types, self.ui)\n if not dialog.exec_():\n return\n self.atom_types = dialog.selected_atoms\n\n @property\n def site(self):\n return self._site\n\n @site.setter\n def site(self, v):\n self._site = v\n self.update_gui()\n <mask token>\n <mask token>\n\n @property\n def fractional_coords(self):\n return self.site['fractional_coords']\n\n @property\n def thermal_factor_type(self):\n return self.ui.thermal_factor_type.currentText()\n\n def U(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = B_TO_U\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n <mask token>\n\n def thermal_factor(self, atom):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = U_TO_B\n else:\n raise Exception(f'Unknown type: {type}')\n return atom['U'] * multiplier\n <mask token>\n <mask token>\n\n @property\n def num_rows(self):\n return self.ui.table.rowCount()\n\n @property\n def selected_row(self):\n selected = self.ui.table.selectionModel().selectedRows()\n return selected[0].row() if selected else None\n\n def select_row(self, i):\n if i is None or i >= self.num_rows:\n return\n selection_model = self.ui.table.selectionModel()\n selection_model.clearSelection()\n model_index = selection_model.model().index(i, 0)\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n selection_model.select(model_index, command)\n\n def selection_changed(self):\n self.update_enable_states()\n\n def update_enable_states(self):\n enable_remove = self.num_rows > 1 and self.selected_row is not None\n self.ui.remove_atom_type.setEnabled(enable_remove)\n <mask token>\n\n def create_symbol_label(self, v):\n w = QTableWidgetItem(v)\n return w\n\n def create_charge_combobox(self, charge, symbol):\n cb = QComboBox(self.ui.table)\n if charge not in chargestate[symbol]:\n raise Exception(f'Invalid charge {charge} for {symbol}')\n cb.addItems(chargestate[symbol])\n cb.setCurrentText(charge)\n cb.currentIndexChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n cb.setSizePolicy(size_policy)\n self.charge_comboboxes.append(cb)\n return cb\n\n def create_occupancy_spinbox(self, v):\n sb = ScientificDoubleSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(OCCUPATION_MIN)\n sb.setMaximum(OCCUPATION_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.occupancy_spinboxes.append(sb)\n return sb\n\n def create_thermal_factor_spinbox(self, v):\n sb = ThermalFactorSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(THERMAL_FACTOR_MIN)\n sb.setMaximum(THERMAL_FACTOR_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n sb.setToolTip('Double-click to open tensor editor')\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.thermal_factor_spinboxes.append(sb)\n return sb\n <mask token>\n\n def update_gui(self):\n with block_signals(*self.site_settings_widgets):\n for i, w in enumerate(self.fractional_coords_widgets):\n w.setValue(self.fractional_coords[i])\n self.update_total_occupancy()\n self.update_table()\n self.reset_scalar_tensor_toggle()\n\n def reset_scalar_tensor_toggle(self):\n any_scalars = any(not isinstance(w.value(), np.ndarray) for w in\n self.thermal_factor_spinboxes)\n with block_signals(self.ui.convert_u_to_tensors):\n self.ui.convert_u_to_tensors.setChecked(not any_scalars)\n <mask token>\n <mask token>\n <mask token>\n\n def update_config(self):\n for i, w in enumerate(self.fractional_coords_widgets):\n self.fractional_coords[i] = w.value()\n for atom, combobox in zip(self.atoms, self.charge_comboboxes):\n atom['charge'] = combobox.currentText()\n for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):\n atom['occupancy'] = spinbox.value()\n for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):\n atom['U'] = self.U(spinbox.value())\n self.update_total_occupancy()\n self.update_occupancy_validity()\n self.emit_site_modified_if_valid()\n <mask token>\n\n def reset_occupancies(self):\n total = 1.0\n atoms = self.atoms\n num_atoms = len(atoms)\n for atom in atoms:\n atom['occupancy'] = total / num_atoms\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n @property\n def site_valid(self):\n return self.occupancies_valid\n <mask token>\n\n def update_occupancy_validity(self):\n valid = self.occupancies_valid\n color = 'white' if valid else 'red'\n msg = '' if valid else 'Sum of occupancies must be <= 1'\n self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')\n self.ui.total_occupancy.setToolTip(msg)\n <mask token>\n\n @property\n def fractional_coords_widgets(self):\n return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]\n\n @property\n def site_settings_widgets(self):\n return self.fractional_coords_widgets\n <mask token>\n\n\nclass ThermalFactorSpinBox(ScientificDoubleSpinBox):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.editor = ThermalFactorEditor(0, parent)\n self.setLineEdit(ThermalFactorLineEdit(self, self))\n self.valueChanged.connect(self.update_editor_value)\n\n def value(self):\n return self.editor.value\n\n def setValue(self, v):\n self.editor.value = v\n if self.editor.is_tensor:\n super().setValue(super().value())\n self.valueChanged.emit(super().value())\n self.setReadOnly(True)\n else:\n super().setValue(v)\n self.valueChanged.emit(v)\n self.setReadOnly(False)\n\n def update_editor_value(self):\n if not self.editor.is_tensor:\n self.editor.value = super().value()\n\n def textFromValue(self, value):\n if not hasattr(self, 'editor') or not self.editor.is_tensor:\n return super().textFromValue(value)\n return 'Tensor'\n\n def open_editor(self):\n original = copy.deepcopy(self.editor.value)\n if not self.editor.exec_():\n self.editor.value = original\n return\n self.setValue(self.editor.value)\n\n\nclass ThermalFactorLineEdit(QLineEdit):\n\n def __init__(self, spinbox, parent=None):\n super().__init__(parent)\n self.spinbox = spinbox\n\n def mousePressEvent(self, event):\n if self.isReadOnly():\n self.open_editor()\n return\n super().mousePressEvent(event)\n\n def mouseDoubleClickEvent(self, event):\n self.open_editor()\n\n def open_editor(self):\n self.spinbox.open_editor()\n",
"step-2": "<mask token>\n\n\nclass MaterialSiteEditor(QObject):\n <mask token>\n\n def __init__(self, site, parent=None):\n super().__init__(parent)\n loader = UiLoader()\n self.ui = loader.load_file('material_site_editor.ui', parent)\n self._site = site\n self.charge_comboboxes = []\n self.occupancy_spinboxes = []\n self.thermal_factor_spinboxes = []\n self.update_gui()\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.select_atom_types.pressed.connect(self.select_atom_types)\n self.ui.thermal_factor_type.currentIndexChanged.connect(self.\n thermal_factor_type_changed)\n for w in self.site_settings_widgets:\n w.valueChanged.connect(self.update_config)\n self.ui.table.selectionModel().selectionChanged.connect(self.\n selection_changed)\n self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)\n self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)\n\n def select_atom_types(self):\n dialog = PeriodicTableDialog(self.atom_types, self.ui)\n if not dialog.exec_():\n return\n self.atom_types = dialog.selected_atoms\n\n @property\n def site(self):\n return self._site\n\n @site.setter\n def site(self, v):\n self._site = v\n self.update_gui()\n <mask token>\n <mask token>\n\n @property\n def fractional_coords(self):\n return self.site['fractional_coords']\n\n @property\n def thermal_factor_type(self):\n return self.ui.thermal_factor_type.currentText()\n\n def U(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = B_TO_U\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n <mask token>\n\n def thermal_factor(self, atom):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = U_TO_B\n else:\n raise Exception(f'Unknown type: {type}')\n return atom['U'] * multiplier\n <mask token>\n <mask token>\n\n @property\n def num_rows(self):\n return self.ui.table.rowCount()\n\n @property\n def selected_row(self):\n selected = self.ui.table.selectionModel().selectedRows()\n return selected[0].row() if selected else None\n\n def select_row(self, i):\n if i is None or i >= self.num_rows:\n return\n selection_model = self.ui.table.selectionModel()\n selection_model.clearSelection()\n model_index = selection_model.model().index(i, 0)\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n selection_model.select(model_index, command)\n\n def selection_changed(self):\n self.update_enable_states()\n\n def update_enable_states(self):\n enable_remove = self.num_rows > 1 and self.selected_row is not None\n self.ui.remove_atom_type.setEnabled(enable_remove)\n <mask token>\n\n def create_symbol_label(self, v):\n w = QTableWidgetItem(v)\n return w\n\n def create_charge_combobox(self, charge, symbol):\n cb = QComboBox(self.ui.table)\n if charge not in chargestate[symbol]:\n raise Exception(f'Invalid charge {charge} for {symbol}')\n cb.addItems(chargestate[symbol])\n cb.setCurrentText(charge)\n cb.currentIndexChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n cb.setSizePolicy(size_policy)\n self.charge_comboboxes.append(cb)\n return cb\n\n def create_occupancy_spinbox(self, v):\n sb = ScientificDoubleSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(OCCUPATION_MIN)\n sb.setMaximum(OCCUPATION_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.occupancy_spinboxes.append(sb)\n return sb\n\n def create_thermal_factor_spinbox(self, v):\n sb = ThermalFactorSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(THERMAL_FACTOR_MIN)\n sb.setMaximum(THERMAL_FACTOR_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n sb.setToolTip('Double-click to open tensor editor')\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.thermal_factor_spinboxes.append(sb)\n return sb\n <mask token>\n\n def update_gui(self):\n with block_signals(*self.site_settings_widgets):\n for i, w in enumerate(self.fractional_coords_widgets):\n w.setValue(self.fractional_coords[i])\n self.update_total_occupancy()\n self.update_table()\n self.reset_scalar_tensor_toggle()\n\n def reset_scalar_tensor_toggle(self):\n any_scalars = any(not isinstance(w.value(), np.ndarray) for w in\n self.thermal_factor_spinboxes)\n with block_signals(self.ui.convert_u_to_tensors):\n self.ui.convert_u_to_tensors.setChecked(not any_scalars)\n <mask token>\n <mask token>\n\n def update_thermal_factor_header(self):\n w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])\n w.setText(self.thermal_factor_type)\n\n def update_config(self):\n for i, w in enumerate(self.fractional_coords_widgets):\n self.fractional_coords[i] = w.value()\n for atom, combobox in zip(self.atoms, self.charge_comboboxes):\n atom['charge'] = combobox.currentText()\n for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):\n atom['occupancy'] = spinbox.value()\n for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):\n atom['U'] = self.U(spinbox.value())\n self.update_total_occupancy()\n self.update_occupancy_validity()\n self.emit_site_modified_if_valid()\n <mask token>\n\n def reset_occupancies(self):\n total = 1.0\n atoms = self.atoms\n num_atoms = len(atoms)\n for atom in atoms:\n atom['occupancy'] = total / num_atoms\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n @property\n def site_valid(self):\n return self.occupancies_valid\n <mask token>\n\n def update_occupancy_validity(self):\n valid = self.occupancies_valid\n color = 'white' if valid else 'red'\n msg = '' if valid else 'Sum of occupancies must be <= 1'\n self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')\n self.ui.total_occupancy.setToolTip(msg)\n <mask token>\n\n @property\n def fractional_coords_widgets(self):\n return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]\n\n @property\n def site_settings_widgets(self):\n return self.fractional_coords_widgets\n <mask token>\n\n\nclass ThermalFactorSpinBox(ScientificDoubleSpinBox):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.editor = ThermalFactorEditor(0, parent)\n self.setLineEdit(ThermalFactorLineEdit(self, self))\n self.valueChanged.connect(self.update_editor_value)\n\n def value(self):\n return self.editor.value\n\n def setValue(self, v):\n self.editor.value = v\n if self.editor.is_tensor:\n super().setValue(super().value())\n self.valueChanged.emit(super().value())\n self.setReadOnly(True)\n else:\n super().setValue(v)\n self.valueChanged.emit(v)\n self.setReadOnly(False)\n\n def update_editor_value(self):\n if not self.editor.is_tensor:\n self.editor.value = super().value()\n\n def textFromValue(self, value):\n if not hasattr(self, 'editor') or not self.editor.is_tensor:\n return super().textFromValue(value)\n return 'Tensor'\n\n def open_editor(self):\n original = copy.deepcopy(self.editor.value)\n if not self.editor.exec_():\n self.editor.value = original\n return\n self.setValue(self.editor.value)\n\n\nclass ThermalFactorLineEdit(QLineEdit):\n\n def __init__(self, spinbox, parent=None):\n super().__init__(parent)\n self.spinbox = spinbox\n\n def mousePressEvent(self, event):\n if self.isReadOnly():\n self.open_editor()\n return\n super().mousePressEvent(event)\n\n def mouseDoubleClickEvent(self, event):\n self.open_editor()\n\n def open_editor(self):\n self.spinbox.open_editor()\n",
"step-3": "<mask token>\n\n\nclass MaterialSiteEditor(QObject):\n <mask token>\n\n def __init__(self, site, parent=None):\n super().__init__(parent)\n loader = UiLoader()\n self.ui = loader.load_file('material_site_editor.ui', parent)\n self._site = site\n self.charge_comboboxes = []\n self.occupancy_spinboxes = []\n self.thermal_factor_spinboxes = []\n self.update_gui()\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.select_atom_types.pressed.connect(self.select_atom_types)\n self.ui.thermal_factor_type.currentIndexChanged.connect(self.\n thermal_factor_type_changed)\n for w in self.site_settings_widgets:\n w.valueChanged.connect(self.update_config)\n self.ui.table.selectionModel().selectionChanged.connect(self.\n selection_changed)\n self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)\n self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)\n\n def select_atom_types(self):\n dialog = PeriodicTableDialog(self.atom_types, self.ui)\n if not dialog.exec_():\n return\n self.atom_types = dialog.selected_atoms\n\n @property\n def site(self):\n return self._site\n\n @site.setter\n def site(self, v):\n self._site = v\n self.update_gui()\n\n @property\n def atoms(self):\n return self.site['atoms']\n <mask token>\n\n @property\n def fractional_coords(self):\n return self.site['fractional_coords']\n\n @property\n def thermal_factor_type(self):\n return self.ui.thermal_factor_type.currentText()\n\n def U(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = B_TO_U\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n\n def B(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = U_TO_B\n elif type == 'B':\n multiplier = 1\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n\n def thermal_factor(self, atom):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = U_TO_B\n else:\n raise Exception(f'Unknown type: {type}')\n return atom['U'] * multiplier\n\n @property\n def atom_types(self):\n return [x['symbol'] for x in self.site['atoms']]\n\n @atom_types.setter\n def atom_types(self, v):\n if v == self.atom_types:\n return\n atoms = self.atoms\n previous_u_values = {x['symbol']: x['U'] for x in atoms}\n previous_charges = {x['symbol']: x['charge'] for x in atoms}\n atoms.clear()\n for symbol in v:\n atom = {'symbol': symbol, 'U': previous_u_values.get(symbol,\n DEFAULT_U), 'charge': previous_charges.get(symbol,\n DEFAULT_CHARGE)}\n atoms.append(atom)\n self.reset_occupancies()\n self.update_table()\n self.emit_site_modified_if_valid()\n\n @property\n def num_rows(self):\n return self.ui.table.rowCount()\n\n @property\n def selected_row(self):\n selected = self.ui.table.selectionModel().selectedRows()\n return selected[0].row() if selected else None\n\n def select_row(self, i):\n if i is None or i >= self.num_rows:\n return\n selection_model = self.ui.table.selectionModel()\n selection_model.clearSelection()\n model_index = selection_model.model().index(i, 0)\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n selection_model.select(model_index, command)\n\n def selection_changed(self):\n self.update_enable_states()\n\n def update_enable_states(self):\n enable_remove = self.num_rows > 1 and self.selected_row is not None\n self.ui.remove_atom_type.setEnabled(enable_remove)\n <mask token>\n\n def create_symbol_label(self, v):\n w = QTableWidgetItem(v)\n return w\n\n def create_charge_combobox(self, charge, symbol):\n cb = QComboBox(self.ui.table)\n if charge not in chargestate[symbol]:\n raise Exception(f'Invalid charge {charge} for {symbol}')\n cb.addItems(chargestate[symbol])\n cb.setCurrentText(charge)\n cb.currentIndexChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n cb.setSizePolicy(size_policy)\n self.charge_comboboxes.append(cb)\n return cb\n\n def create_occupancy_spinbox(self, v):\n sb = ScientificDoubleSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(OCCUPATION_MIN)\n sb.setMaximum(OCCUPATION_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.occupancy_spinboxes.append(sb)\n return sb\n\n def create_thermal_factor_spinbox(self, v):\n sb = ThermalFactorSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(THERMAL_FACTOR_MIN)\n sb.setMaximum(THERMAL_FACTOR_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n sb.setToolTip('Double-click to open tensor editor')\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.thermal_factor_spinboxes.append(sb)\n return sb\n <mask token>\n\n def update_gui(self):\n with block_signals(*self.site_settings_widgets):\n for i, w in enumerate(self.fractional_coords_widgets):\n w.setValue(self.fractional_coords[i])\n self.update_total_occupancy()\n self.update_table()\n self.reset_scalar_tensor_toggle()\n\n def reset_scalar_tensor_toggle(self):\n any_scalars = any(not isinstance(w.value(), np.ndarray) for w in\n self.thermal_factor_spinboxes)\n with block_signals(self.ui.convert_u_to_tensors):\n self.ui.convert_u_to_tensors.setChecked(not any_scalars)\n <mask token>\n <mask token>\n\n def update_thermal_factor_header(self):\n w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])\n w.setText(self.thermal_factor_type)\n\n def update_config(self):\n for i, w in enumerate(self.fractional_coords_widgets):\n self.fractional_coords[i] = w.value()\n for atom, combobox in zip(self.atoms, self.charge_comboboxes):\n atom['charge'] = combobox.currentText()\n for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):\n atom['occupancy'] = spinbox.value()\n for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):\n atom['U'] = self.U(spinbox.value())\n self.update_total_occupancy()\n self.update_occupancy_validity()\n self.emit_site_modified_if_valid()\n\n def update_total_occupancy(self):\n self.ui.total_occupancy.setValue(self.total_occupancy)\n\n def reset_occupancies(self):\n total = 1.0\n atoms = self.atoms\n num_atoms = len(atoms)\n for atom in atoms:\n atom['occupancy'] = total / num_atoms\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n @property\n def site_valid(self):\n return self.occupancies_valid\n\n @property\n def occupancies_valid(self):\n return self.total_occupancy <= 1.0\n\n def update_occupancy_validity(self):\n valid = self.occupancies_valid\n color = 'white' if valid else 'red'\n msg = '' if valid else 'Sum of occupancies must be <= 1'\n self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')\n self.ui.total_occupancy.setToolTip(msg)\n <mask token>\n\n @property\n def fractional_coords_widgets(self):\n return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]\n\n @property\n def site_settings_widgets(self):\n return self.fractional_coords_widgets\n\n def convert_u_to_tensors(self, b):\n\n def scalar_to_tensor(spinbox):\n if isinstance(spinbox.value(), np.ndarray):\n return\n tensor = np.zeros(6, dtype=np.float64)\n tensor[:3] = spinbox.value()\n spinbox.setValue(tensor)\n\n def tensor_to_scalar(spinbox):\n value = spinbox.value()\n if not isinstance(value, np.ndarray):\n return\n scalar = spinbox.editor.ui.scalar_value.value()\n if np.isclose(scalar, 0) and np.allclose(value[:3], value[0]\n ) and np.allclose(value[3:], 0):\n scalar = value[0]\n spinbox.setValue(scalar)\n f = scalar_to_tensor if b else tensor_to_scalar\n for spinbox in self.thermal_factor_spinboxes:\n f(spinbox)\n\n\nclass ThermalFactorSpinBox(ScientificDoubleSpinBox):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.editor = ThermalFactorEditor(0, parent)\n self.setLineEdit(ThermalFactorLineEdit(self, self))\n self.valueChanged.connect(self.update_editor_value)\n\n def value(self):\n return self.editor.value\n\n def setValue(self, v):\n self.editor.value = v\n if self.editor.is_tensor:\n super().setValue(super().value())\n self.valueChanged.emit(super().value())\n self.setReadOnly(True)\n else:\n super().setValue(v)\n self.valueChanged.emit(v)\n self.setReadOnly(False)\n\n def update_editor_value(self):\n if not self.editor.is_tensor:\n self.editor.value = super().value()\n\n def textFromValue(self, value):\n if not hasattr(self, 'editor') or not self.editor.is_tensor:\n return super().textFromValue(value)\n return 'Tensor'\n\n def open_editor(self):\n original = copy.deepcopy(self.editor.value)\n if not self.editor.exec_():\n self.editor.value = original\n return\n self.setValue(self.editor.value)\n\n\nclass ThermalFactorLineEdit(QLineEdit):\n\n def __init__(self, spinbox, parent=None):\n super().__init__(parent)\n self.spinbox = spinbox\n\n def mousePressEvent(self, event):\n if self.isReadOnly():\n self.open_editor()\n return\n super().mousePressEvent(event)\n\n def mouseDoubleClickEvent(self, event):\n self.open_editor()\n\n def open_editor(self):\n self.spinbox.open_editor()\n",
"step-4": "<mask token>\n\n\nclass MaterialSiteEditor(QObject):\n site_modified = Signal()\n\n def __init__(self, site, parent=None):\n super().__init__(parent)\n loader = UiLoader()\n self.ui = loader.load_file('material_site_editor.ui', parent)\n self._site = site\n self.charge_comboboxes = []\n self.occupancy_spinboxes = []\n self.thermal_factor_spinboxes = []\n self.update_gui()\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.select_atom_types.pressed.connect(self.select_atom_types)\n self.ui.thermal_factor_type.currentIndexChanged.connect(self.\n thermal_factor_type_changed)\n for w in self.site_settings_widgets:\n w.valueChanged.connect(self.update_config)\n self.ui.table.selectionModel().selectionChanged.connect(self.\n selection_changed)\n self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)\n self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)\n\n def select_atom_types(self):\n dialog = PeriodicTableDialog(self.atom_types, self.ui)\n if not dialog.exec_():\n return\n self.atom_types = dialog.selected_atoms\n\n @property\n def site(self):\n return self._site\n\n @site.setter\n def site(self, v):\n self._site = v\n self.update_gui()\n\n @property\n def atoms(self):\n return self.site['atoms']\n\n @property\n def total_occupancy(self):\n return sum(x['occupancy'] for x in self.atoms)\n\n @property\n def fractional_coords(self):\n return self.site['fractional_coords']\n\n @property\n def thermal_factor_type(self):\n return self.ui.thermal_factor_type.currentText()\n\n def U(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = B_TO_U\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n\n def B(self, val):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = U_TO_B\n elif type == 'B':\n multiplier = 1\n else:\n raise Exception(f'Unknown type: {type}')\n return val * multiplier\n\n def thermal_factor(self, atom):\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = U_TO_B\n else:\n raise Exception(f'Unknown type: {type}')\n return atom['U'] * multiplier\n\n @property\n def atom_types(self):\n return [x['symbol'] for x in self.site['atoms']]\n\n @atom_types.setter\n def atom_types(self, v):\n if v == self.atom_types:\n return\n atoms = self.atoms\n previous_u_values = {x['symbol']: x['U'] for x in atoms}\n previous_charges = {x['symbol']: x['charge'] for x in atoms}\n atoms.clear()\n for symbol in v:\n atom = {'symbol': symbol, 'U': previous_u_values.get(symbol,\n DEFAULT_U), 'charge': previous_charges.get(symbol,\n DEFAULT_CHARGE)}\n atoms.append(atom)\n self.reset_occupancies()\n self.update_table()\n self.emit_site_modified_if_valid()\n\n @property\n def num_rows(self):\n return self.ui.table.rowCount()\n\n @property\n def selected_row(self):\n selected = self.ui.table.selectionModel().selectedRows()\n return selected[0].row() if selected else None\n\n def select_row(self, i):\n if i is None or i >= self.num_rows:\n return\n selection_model = self.ui.table.selectionModel()\n selection_model.clearSelection()\n model_index = selection_model.model().index(i, 0)\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n selection_model.select(model_index, command)\n\n def selection_changed(self):\n self.update_enable_states()\n\n def update_enable_states(self):\n enable_remove = self.num_rows > 1 and self.selected_row is not None\n self.ui.remove_atom_type.setEnabled(enable_remove)\n\n def remove_selected_atom(self):\n if self.selected_row is None:\n return\n atom_types = self.atom_types\n del atom_types[self.selected_row]\n self.atom_types = atom_types\n\n def create_symbol_label(self, v):\n w = QTableWidgetItem(v)\n return w\n\n def create_charge_combobox(self, charge, symbol):\n cb = QComboBox(self.ui.table)\n if charge not in chargestate[symbol]:\n raise Exception(f'Invalid charge {charge} for {symbol}')\n cb.addItems(chargestate[symbol])\n cb.setCurrentText(charge)\n cb.currentIndexChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n cb.setSizePolicy(size_policy)\n self.charge_comboboxes.append(cb)\n return cb\n\n def create_occupancy_spinbox(self, v):\n sb = ScientificDoubleSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(OCCUPATION_MIN)\n sb.setMaximum(OCCUPATION_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.occupancy_spinboxes.append(sb)\n return sb\n\n def create_thermal_factor_spinbox(self, v):\n sb = ThermalFactorSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(THERMAL_FACTOR_MIN)\n sb.setMaximum(THERMAL_FACTOR_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n sb.setToolTip('Double-click to open tensor editor')\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n self.thermal_factor_spinboxes.append(sb)\n return sb\n\n def clear_table(self):\n self.charge_comboboxes.clear()\n self.occupancy_spinboxes.clear()\n self.thermal_factor_spinboxes.clear()\n self.ui.table.clearContents()\n\n def update_gui(self):\n with block_signals(*self.site_settings_widgets):\n for i, w in enumerate(self.fractional_coords_widgets):\n w.setValue(self.fractional_coords[i])\n self.update_total_occupancy()\n self.update_table()\n self.reset_scalar_tensor_toggle()\n\n def reset_scalar_tensor_toggle(self):\n any_scalars = any(not isinstance(w.value(), np.ndarray) for w in\n self.thermal_factor_spinboxes)\n with block_signals(self.ui.convert_u_to_tensors):\n self.ui.convert_u_to_tensors.setChecked(not any_scalars)\n\n def update_table(self):\n prev_selected = self.selected_row\n block_list = [self.ui.table, self.ui.table.selectionModel()]\n with block_signals(*block_list):\n atoms = self.site['atoms']\n self.clear_table()\n self.ui.table.setRowCount(len(atoms))\n for i, atom in enumerate(atoms):\n w = self.create_symbol_label(atom['symbol'])\n self.ui.table.setItem(i, COLUMNS['symbol'], w)\n w = self.create_charge_combobox(atom['charge'], atom['symbol'])\n self.ui.table.setCellWidget(i, COLUMNS['charge'], w)\n w = self.create_occupancy_spinbox(atom['occupancy'])\n self.ui.table.setCellWidget(i, COLUMNS['occupancy'], w)\n v = self.thermal_factor(atom)\n w = self.create_thermal_factor_spinbox(v)\n self.ui.table.setCellWidget(i, COLUMNS['thermal_factor'], w)\n self.update_occupancy_validity()\n if prev_selected is not None:\n select_row = (prev_selected if prev_selected < self.\n num_rows else self.num_rows - 1)\n self.select_row(select_row)\n self.selection_changed()\n\n def thermal_factor_type_changed(self):\n self.update_thermal_factor_header()\n self.update_table()\n text = f'Convert {self.thermal_factor_type} to tensors'\n self.ui.convert_u_to_tensors.setText(text)\n\n def update_thermal_factor_header(self):\n w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])\n w.setText(self.thermal_factor_type)\n\n def update_config(self):\n for i, w in enumerate(self.fractional_coords_widgets):\n self.fractional_coords[i] = w.value()\n for atom, combobox in zip(self.atoms, self.charge_comboboxes):\n atom['charge'] = combobox.currentText()\n for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):\n atom['occupancy'] = spinbox.value()\n for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):\n atom['U'] = self.U(spinbox.value())\n self.update_total_occupancy()\n self.update_occupancy_validity()\n self.emit_site_modified_if_valid()\n\n def update_total_occupancy(self):\n self.ui.total_occupancy.setValue(self.total_occupancy)\n\n def reset_occupancies(self):\n total = 1.0\n atoms = self.atoms\n num_atoms = len(atoms)\n for atom in atoms:\n atom['occupancy'] = total / num_atoms\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n @property\n def site_valid(self):\n return self.occupancies_valid\n\n @property\n def occupancies_valid(self):\n return self.total_occupancy <= 1.0\n\n def update_occupancy_validity(self):\n valid = self.occupancies_valid\n color = 'white' if valid else 'red'\n msg = '' if valid else 'Sum of occupancies must be <= 1'\n self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')\n self.ui.total_occupancy.setToolTip(msg)\n\n def emit_site_modified_if_valid(self):\n if not self.site_valid:\n return\n self.site_modified.emit()\n\n @property\n def fractional_coords_widgets(self):\n return [self.ui.coords_x, self.ui.coords_y, self.ui.coords_z]\n\n @property\n def site_settings_widgets(self):\n return self.fractional_coords_widgets\n\n def convert_u_to_tensors(self, b):\n\n def scalar_to_tensor(spinbox):\n if isinstance(spinbox.value(), np.ndarray):\n return\n tensor = np.zeros(6, dtype=np.float64)\n tensor[:3] = spinbox.value()\n spinbox.setValue(tensor)\n\n def tensor_to_scalar(spinbox):\n value = spinbox.value()\n if not isinstance(value, np.ndarray):\n return\n scalar = spinbox.editor.ui.scalar_value.value()\n if np.isclose(scalar, 0) and np.allclose(value[:3], value[0]\n ) and np.allclose(value[3:], 0):\n scalar = value[0]\n spinbox.setValue(scalar)\n f = scalar_to_tensor if b else tensor_to_scalar\n for spinbox in self.thermal_factor_spinboxes:\n f(spinbox)\n\n\nclass ThermalFactorSpinBox(ScientificDoubleSpinBox):\n\n def __init__(self, parent=None):\n super().__init__(parent)\n self.editor = ThermalFactorEditor(0, parent)\n self.setLineEdit(ThermalFactorLineEdit(self, self))\n self.valueChanged.connect(self.update_editor_value)\n\n def value(self):\n return self.editor.value\n\n def setValue(self, v):\n self.editor.value = v\n if self.editor.is_tensor:\n super().setValue(super().value())\n self.valueChanged.emit(super().value())\n self.setReadOnly(True)\n else:\n super().setValue(v)\n self.valueChanged.emit(v)\n self.setReadOnly(False)\n\n def update_editor_value(self):\n if not self.editor.is_tensor:\n self.editor.value = super().value()\n\n def textFromValue(self, value):\n if not hasattr(self, 'editor') or not self.editor.is_tensor:\n return super().textFromValue(value)\n return 'Tensor'\n\n def open_editor(self):\n original = copy.deepcopy(self.editor.value)\n if not self.editor.exec_():\n self.editor.value = original\n return\n self.setValue(self.editor.value)\n\n\nclass ThermalFactorLineEdit(QLineEdit):\n\n def __init__(self, spinbox, parent=None):\n super().__init__(parent)\n self.spinbox = spinbox\n\n def mousePressEvent(self, event):\n if self.isReadOnly():\n self.open_editor()\n return\n super().mousePressEvent(event)\n\n def mouseDoubleClickEvent(self, event):\n self.open_editor()\n\n def open_editor(self):\n self.spinbox.open_editor()\n",
"step-5": "import copy\n\nimport numpy as np\n\nfrom PySide2.QtCore import QItemSelectionModel, QObject, Signal\nfrom PySide2.QtWidgets import (\n QComboBox, QLineEdit, QSizePolicy, QTableWidgetItem\n)\n\nfrom hexrd.constants import chargestate\nfrom hexrd.material import Material\n\nfrom hexrd.ui.periodic_table_dialog import PeriodicTableDialog\nfrom hexrd.ui.scientificspinbox import ScientificDoubleSpinBox\nfrom hexrd.ui.thermal_factor_editor import ThermalFactorEditor\nfrom hexrd.ui.ui_loader import UiLoader\nfrom hexrd.ui.utils import block_signals\n\n\nCOLUMNS = {\n 'symbol': 0,\n 'charge': 1,\n 'occupancy': 2,\n 'thermal_factor': 3\n}\n\nDEFAULT_CHARGE = '0'\nDEFAULT_U = Material.DFLT_U[0]\n\nOCCUPATION_MIN = 0\nOCCUPATION_MAX = 1\n\nTHERMAL_FACTOR_MIN = -1.e7\nTHERMAL_FACTOR_MAX = 1.e7\n\nU_TO_B = 8 * np.pi ** 2\nB_TO_U = 1 / U_TO_B\n\n\nclass MaterialSiteEditor(QObject):\n\n site_modified = Signal()\n\n def __init__(self, site, parent=None):\n super().__init__(parent)\n\n loader = UiLoader()\n self.ui = loader.load_file('material_site_editor.ui', parent)\n\n self._site = site\n\n self.charge_comboboxes = []\n self.occupancy_spinboxes = []\n self.thermal_factor_spinboxes = []\n\n self.update_gui()\n\n self.setup_connections()\n\n def setup_connections(self):\n self.ui.select_atom_types.pressed.connect(self.select_atom_types)\n self.ui.thermal_factor_type.currentIndexChanged.connect(\n self.thermal_factor_type_changed)\n for w in self.site_settings_widgets:\n w.valueChanged.connect(self.update_config)\n self.ui.table.selectionModel().selectionChanged.connect(\n self.selection_changed)\n self.ui.remove_atom_type.pressed.connect(self.remove_selected_atom)\n self.ui.convert_u_to_tensors.toggled.connect(self.convert_u_to_tensors)\n\n def select_atom_types(self):\n dialog = PeriodicTableDialog(self.atom_types, self.ui)\n if not dialog.exec_():\n return\n\n self.atom_types = dialog.selected_atoms\n\n @property\n def site(self):\n return self._site\n\n @site.setter\n def site(self, v):\n self._site = v\n self.update_gui()\n\n @property\n def atoms(self):\n return self.site['atoms']\n\n @property\n def total_occupancy(self):\n return sum(x['occupancy'] for x in self.atoms)\n\n @property\n def fractional_coords(self):\n return self.site['fractional_coords']\n\n @property\n def thermal_factor_type(self):\n return self.ui.thermal_factor_type.currentText()\n\n def U(self, val):\n # Take a thermal factor from a spin box and convert it to U\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = B_TO_U\n else:\n raise Exception(f'Unknown type: {type}')\n\n return val * multiplier\n\n def B(self, val):\n # Take a thermal factor from a spin box and convert it to B\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = U_TO_B\n elif type == 'B':\n multiplier = 1\n else:\n raise Exception(f'Unknown type: {type}')\n\n return val * multiplier\n\n def thermal_factor(self, atom):\n # Given an atom, return the thermal factor in either B or U\n type = self.thermal_factor_type\n if type == 'U':\n multiplier = 1\n elif type == 'B':\n multiplier = U_TO_B\n else:\n raise Exception(f'Unknown type: {type}')\n\n return atom['U'] * multiplier\n\n @property\n def atom_types(self):\n return [x['symbol'] for x in self.site['atoms']]\n\n @atom_types.setter\n def atom_types(self, v):\n if v == self.atom_types:\n # No changes needed...\n return\n\n # Reset all the occupancies\n atoms = self.atoms\n previous_u_values = {x['symbol']: x['U'] for x in atoms}\n previous_charges = {x['symbol']: x['charge'] for x in atoms}\n atoms.clear()\n\n for symbol in v:\n # Use previous values if available. Otherwise, use the defaults.\n atom = {\n 'symbol': symbol,\n 'U': previous_u_values.get(symbol, DEFAULT_U),\n 'charge': previous_charges.get(symbol, DEFAULT_CHARGE),\n }\n atoms.append(atom)\n\n self.reset_occupancies()\n self.update_table()\n self.emit_site_modified_if_valid()\n\n @property\n def num_rows(self):\n return self.ui.table.rowCount()\n\n @property\n def selected_row(self):\n selected = self.ui.table.selectionModel().selectedRows()\n return selected[0].row() if selected else None\n\n def select_row(self, i):\n if i is None or i >= self.num_rows:\n # Out of range. Don't do anything.\n return\n\n # Select the row\n selection_model = self.ui.table.selectionModel()\n selection_model.clearSelection()\n\n model_index = selection_model.model().index(i, 0)\n command = QItemSelectionModel.Select | QItemSelectionModel.Rows\n selection_model.select(model_index, command)\n\n def selection_changed(self):\n self.update_enable_states()\n\n def update_enable_states(self):\n enable_remove = self.num_rows > 1 and self.selected_row is not None\n self.ui.remove_atom_type.setEnabled(enable_remove)\n\n def remove_selected_atom(self):\n if self.selected_row is None:\n return\n\n atom_types = self.atom_types\n del atom_types[self.selected_row]\n self.atom_types = atom_types\n\n def create_symbol_label(self, v):\n w = QTableWidgetItem(v)\n return w\n\n def create_charge_combobox(self, charge, symbol):\n cb = QComboBox(self.ui.table)\n\n if charge not in chargestate[symbol]:\n raise Exception(f'Invalid charge {charge} for {symbol}')\n\n cb.addItems(chargestate[symbol])\n cb.setCurrentText(charge)\n cb.currentIndexChanged.connect(self.update_config)\n\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n cb.setSizePolicy(size_policy)\n\n self.charge_comboboxes.append(cb)\n return cb\n\n def create_occupancy_spinbox(self, v):\n sb = ScientificDoubleSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(OCCUPATION_MIN)\n sb.setMaximum(OCCUPATION_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n\n self.occupancy_spinboxes.append(sb)\n return sb\n\n def create_thermal_factor_spinbox(self, v):\n sb = ThermalFactorSpinBox(self.ui.table)\n sb.setKeyboardTracking(False)\n sb.setMinimum(THERMAL_FACTOR_MIN)\n sb.setMaximum(THERMAL_FACTOR_MAX)\n sb.setValue(v)\n sb.valueChanged.connect(self.update_config)\n sb.setToolTip('Double-click to open tensor editor')\n\n size_policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)\n sb.setSizePolicy(size_policy)\n\n self.thermal_factor_spinboxes.append(sb)\n return sb\n\n def clear_table(self):\n self.charge_comboboxes.clear()\n self.occupancy_spinboxes.clear()\n self.thermal_factor_spinboxes.clear()\n self.ui.table.clearContents()\n\n def update_gui(self):\n with block_signals(*self.site_settings_widgets):\n for i, w in enumerate(self.fractional_coords_widgets):\n w.setValue(self.fractional_coords[i])\n\n self.update_total_occupancy()\n self.update_table()\n self.reset_scalar_tensor_toggle()\n\n def reset_scalar_tensor_toggle(self):\n any_scalars = any(not isinstance(w.value(), np.ndarray)\n for w in self.thermal_factor_spinboxes)\n\n with block_signals(self.ui.convert_u_to_tensors):\n self.ui.convert_u_to_tensors.setChecked(not any_scalars)\n\n def update_table(self):\n prev_selected = self.selected_row\n\n block_list = [\n self.ui.table,\n self.ui.table.selectionModel()\n ]\n with block_signals(*block_list):\n atoms = self.site['atoms']\n self.clear_table()\n self.ui.table.setRowCount(len(atoms))\n for i, atom in enumerate(atoms):\n w = self.create_symbol_label(atom['symbol'])\n self.ui.table.setItem(i, COLUMNS['symbol'], w)\n\n w = self.create_charge_combobox(atom['charge'], atom['symbol'])\n self.ui.table.setCellWidget(i, COLUMNS['charge'], w)\n\n w = self.create_occupancy_spinbox(atom['occupancy'])\n self.ui.table.setCellWidget(i, COLUMNS['occupancy'], w)\n\n v = self.thermal_factor(atom)\n w = self.create_thermal_factor_spinbox(v)\n self.ui.table.setCellWidget(i, COLUMNS['thermal_factor'], w)\n\n self.update_occupancy_validity()\n\n if prev_selected is not None:\n select_row = (prev_selected if prev_selected < self.num_rows\n else self.num_rows - 1)\n self.select_row(select_row)\n\n # Just in case the selection actually changed...\n self.selection_changed()\n\n def thermal_factor_type_changed(self):\n self.update_thermal_factor_header()\n self.update_table()\n\n # Update the text for the tensor toggle as well\n text = f'Convert {self.thermal_factor_type} to tensors'\n self.ui.convert_u_to_tensors.setText(text)\n\n def update_thermal_factor_header(self):\n w = self.ui.table.horizontalHeaderItem(COLUMNS['thermal_factor'])\n w.setText(self.thermal_factor_type)\n\n def update_config(self):\n for i, w in enumerate(self.fractional_coords_widgets):\n self.fractional_coords[i] = w.value()\n\n for atom, combobox in zip(self.atoms, self.charge_comboboxes):\n atom['charge'] = combobox.currentText()\n\n for atom, spinbox in zip(self.atoms, self.occupancy_spinboxes):\n atom['occupancy'] = spinbox.value()\n\n for atom, spinbox in zip(self.atoms, self.thermal_factor_spinboxes):\n atom['U'] = self.U(spinbox.value())\n\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n self.emit_site_modified_if_valid()\n\n def update_total_occupancy(self):\n self.ui.total_occupancy.setValue(self.total_occupancy)\n\n def reset_occupancies(self):\n total = 1.0\n atoms = self.atoms\n num_atoms = len(atoms)\n for atom in atoms:\n atom['occupancy'] = total / num_atoms\n\n self.update_total_occupancy()\n self.update_occupancy_validity()\n\n @property\n def site_valid(self):\n return self.occupancies_valid\n\n @property\n def occupancies_valid(self):\n return self.total_occupancy <= 1.0\n\n def update_occupancy_validity(self):\n valid = self.occupancies_valid\n color = 'white' if valid else 'red'\n msg = '' if valid else 'Sum of occupancies must be <= 1'\n\n self.ui.total_occupancy.setStyleSheet(f'background-color: {color}')\n self.ui.total_occupancy.setToolTip(msg)\n\n def emit_site_modified_if_valid(self):\n if not self.site_valid:\n return\n\n self.site_modified.emit()\n\n @property\n def fractional_coords_widgets(self):\n return [\n self.ui.coords_x,\n self.ui.coords_y,\n self.ui.coords_z\n ]\n\n @property\n def site_settings_widgets(self):\n return self.fractional_coords_widgets\n\n def convert_u_to_tensors(self, b):\n\n def scalar_to_tensor(spinbox):\n if isinstance(spinbox.value(), np.ndarray):\n # Already a tensor\n return\n\n tensor = np.zeros(6, dtype=np.float64)\n tensor[:3] = spinbox.value()\n spinbox.setValue(tensor)\n\n def tensor_to_scalar(spinbox):\n value = spinbox.value()\n if not isinstance(value, np.ndarray):\n # Already a scalar\n return\n\n # Use the previous spinbox value if available\n scalar = spinbox.editor.ui.scalar_value.value()\n if (np.isclose(scalar, 0) and np.allclose(value[:3], value[0]) and\n np.allclose(value[3:], 0)):\n # If the previous value is zero, and the tensor is diagonal,\n # use the diagonal value\n scalar = value[0]\n\n spinbox.setValue(scalar)\n\n f = scalar_to_tensor if b else tensor_to_scalar\n\n for spinbox in self.thermal_factor_spinboxes:\n f(spinbox)\n\n\nclass ThermalFactorSpinBox(ScientificDoubleSpinBox):\n def __init__(self, parent=None):\n super().__init__(parent)\n self.editor = ThermalFactorEditor(0, parent)\n self.setLineEdit(ThermalFactorLineEdit(self, self))\n self.valueChanged.connect(self.update_editor_value)\n\n def value(self):\n return self.editor.value\n\n def setValue(self, v):\n self.editor.value = v\n if self.editor.is_tensor:\n # Force an update\n super().setValue(super().value())\n self.valueChanged.emit(super().value())\n self.setReadOnly(True)\n else:\n super().setValue(v)\n self.valueChanged.emit(v)\n self.setReadOnly(False)\n\n def update_editor_value(self):\n if not self.editor.is_tensor:\n self.editor.value = super().value()\n\n def textFromValue(self, value):\n if not hasattr(self, 'editor') or not self.editor.is_tensor:\n return super().textFromValue(value)\n\n return 'Tensor'\n\n def open_editor(self):\n original = copy.deepcopy(self.editor.value)\n if not self.editor.exec_():\n self.editor.value = original\n return\n\n self.setValue(self.editor.value)\n\n\nclass ThermalFactorLineEdit(QLineEdit):\n def __init__(self, spinbox, parent=None):\n super().__init__(parent)\n\n self.spinbox = spinbox\n\n def mousePressEvent(self, event):\n if self.isReadOnly():\n self.open_editor()\n return\n\n super().mousePressEvent(event)\n\n def mouseDoubleClickEvent(self, event):\n self.open_editor()\n\n def open_editor(self):\n self.spinbox.open_editor()\n",
"step-ids": [
39,
40,
47,
54,
57
]
}
|
[
39,
40,
47,
54,
57
] |
import re
import gpxpy
def extract_gpx_data(gpx_file_path, attribute='elevation'):
"""Reads in a GPX file and returns a list of values
for a specified GPX attribute.
Parameters
----------
gpx_file_path : str
File path to the GPX file (.gpx extension).
attribute: str
Name of the attribute to extract. Default
value is 'elevation'. Must match one of the
entries in the function-defined list.
Returns
-------
data : list
List containing float values of the extracted
GPX attributes.
"""
# Open GPX file in context manager and parse with gpxpy
with open(gpx_file_path) as gpx_file:
gpx = gpxpy.parse(gpx_file)
# Define GPX main attributes
primary_attributes = [
"latitude",
"longitude",
"elevation",
"time"
]
# Define GPX extension attributes
secondary_attributes = [
"cadence", "distance", "altitude",
"energy", "speed", "verticalSpeed"
]
# Check if specified attribute is in main
# GPX attributes (lat/lon/elevation/time)
if attribute in primary_attributes:
# Create list of values for attribute
data = [{
"latitude": point.latitude,
"longitude": point.longitude,
"elevation": point.elevation,
"time": point.time
}.get(attribute)
for track in gpx.tracks
for segment in track.segments
for point in segment.points
]
print(f"Extracted {attribute} data.")
# Check if specified attribute is in
# GPX extensions (cadence/distance/altitude
# /energy/speed/verticalSpeed)
elif attribute in secondary_attributes:
# Define pattern for attribute to match on
pattern = re.compile(f"^.*{attribute}.*$")
# Create list of values for attribute
data = [
float(extension.text)
for track in gpx.tracks
for segment in track.segments
for point in segment.points
for extension in point.extensions
if pattern.match(extension.tag)
]
print(f"Extracted {attribute} data.")
else:
data = []
print("Invalid attribute. Must be one of the following: "
"latitude, longitude, elevation, time, cadence "
"distance, altitude, energy, speed, verticalSpeed.")
# List of attribute values
return data
|
normal
|
{
"blob_id": "cc6d18785eff0406ff7f38f18f15476375e31b76",
"index": 9254,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef extract_gpx_data(gpx_file_path, attribute='elevation'):\n \"\"\"Reads in a GPX file and returns a list of values\n for a specified GPX attribute.\n\n Parameters\n ----------\n gpx_file_path : str\n File path to the GPX file (.gpx extension).\n\n attribute: str\n Name of the attribute to extract. Default\n value is 'elevation'. Must match one of the\n entries in the function-defined list.\n\n Returns\n -------\n data : list\n List containing float values of the extracted\n GPX attributes.\n \"\"\"\n with open(gpx_file_path) as gpx_file:\n gpx = gpxpy.parse(gpx_file)\n primary_attributes = ['latitude', 'longitude', 'elevation', 'time']\n secondary_attributes = ['cadence', 'distance', 'altitude', 'energy',\n 'speed', 'verticalSpeed']\n if attribute in primary_attributes:\n data = [{'latitude': point.latitude, 'longitude': point.longitude,\n 'elevation': point.elevation, 'time': point.time}.get(attribute\n ) for track in gpx.tracks for segment in track.segments for\n point in segment.points]\n print(f'Extracted {attribute} data.')\n elif attribute in secondary_attributes:\n pattern = re.compile(f'^.*{attribute}.*$')\n data = [float(extension.text) for track in gpx.tracks for segment in\n track.segments for point in segment.points for extension in\n point.extensions if pattern.match(extension.tag)]\n print(f'Extracted {attribute} data.')\n else:\n data = []\n print(\n 'Invalid attribute. Must be one of the following: latitude, longitude, elevation, time, cadence distance, altitude, energy, speed, verticalSpeed.'\n )\n return data\n",
"step-3": "import re\nimport gpxpy\n\n\ndef extract_gpx_data(gpx_file_path, attribute='elevation'):\n \"\"\"Reads in a GPX file and returns a list of values\n for a specified GPX attribute.\n\n Parameters\n ----------\n gpx_file_path : str\n File path to the GPX file (.gpx extension).\n\n attribute: str\n Name of the attribute to extract. Default\n value is 'elevation'. Must match one of the\n entries in the function-defined list.\n\n Returns\n -------\n data : list\n List containing float values of the extracted\n GPX attributes.\n \"\"\"\n with open(gpx_file_path) as gpx_file:\n gpx = gpxpy.parse(gpx_file)\n primary_attributes = ['latitude', 'longitude', 'elevation', 'time']\n secondary_attributes = ['cadence', 'distance', 'altitude', 'energy',\n 'speed', 'verticalSpeed']\n if attribute in primary_attributes:\n data = [{'latitude': point.latitude, 'longitude': point.longitude,\n 'elevation': point.elevation, 'time': point.time}.get(attribute\n ) for track in gpx.tracks for segment in track.segments for\n point in segment.points]\n print(f'Extracted {attribute} data.')\n elif attribute in secondary_attributes:\n pattern = re.compile(f'^.*{attribute}.*$')\n data = [float(extension.text) for track in gpx.tracks for segment in\n track.segments for point in segment.points for extension in\n point.extensions if pattern.match(extension.tag)]\n print(f'Extracted {attribute} data.')\n else:\n data = []\n print(\n 'Invalid attribute. Must be one of the following: latitude, longitude, elevation, time, cadence distance, altitude, energy, speed, verticalSpeed.'\n )\n return data\n",
"step-4": "import re\nimport gpxpy\n\n\ndef extract_gpx_data(gpx_file_path, attribute='elevation'):\n \"\"\"Reads in a GPX file and returns a list of values\n for a specified GPX attribute.\n\n Parameters\n ----------\n gpx_file_path : str\n File path to the GPX file (.gpx extension).\n\n attribute: str\n Name of the attribute to extract. Default\n value is 'elevation'. Must match one of the\n entries in the function-defined list.\n\n Returns\n -------\n data : list\n List containing float values of the extracted\n GPX attributes.\n \"\"\"\n # Open GPX file in context manager and parse with gpxpy\n with open(gpx_file_path) as gpx_file:\n gpx = gpxpy.parse(gpx_file)\n\n # Define GPX main attributes\n primary_attributes = [\n \"latitude\",\n \"longitude\",\n \"elevation\",\n \"time\"\n ]\n\n # Define GPX extension attributes\n secondary_attributes = [\n \"cadence\", \"distance\", \"altitude\",\n \"energy\", \"speed\", \"verticalSpeed\"\n ]\n\n # Check if specified attribute is in main\n # GPX attributes (lat/lon/elevation/time)\n if attribute in primary_attributes:\n\n # Create list of values for attribute\n data = [{\n \"latitude\": point.latitude,\n \"longitude\": point.longitude,\n \"elevation\": point.elevation,\n \"time\": point.time\n }.get(attribute)\n for track in gpx.tracks\n for segment in track.segments\n for point in segment.points\n ]\n\n print(f\"Extracted {attribute} data.\")\n\n # Check if specified attribute is in\n # GPX extensions (cadence/distance/altitude\n # /energy/speed/verticalSpeed)\n elif attribute in secondary_attributes:\n\n # Define pattern for attribute to match on\n pattern = re.compile(f\"^.*{attribute}.*$\")\n\n # Create list of values for attribute\n data = [\n float(extension.text)\n for track in gpx.tracks\n for segment in track.segments\n for point in segment.points\n for extension in point.extensions\n if pattern.match(extension.tag)\n ]\n\n print(f\"Extracted {attribute} data.\")\n\n else:\n data = []\n print(\"Invalid attribute. Must be one of the following: \"\n \"latitude, longitude, elevation, time, cadence \"\n \"distance, altitude, energy, speed, verticalSpeed.\")\n\n # List of attribute values\n return data\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from odoo import models, fields, api
class Aceptar_letras_wizard(models.TransientModel):
_name = 'aceptar_letras_wizard'
_description = "Aceptar letras"
def _get_letras(self):
if self.env.context and self.env.context.get('active_ids'):
return self.env.context.get('active_ids')
return []
letra_ids = fields.Many2many('letra_cambio.letra', default=_get_letras, string='Letras')
@api.multi
def aceptar_letras(self):
active_ids = self.env.context.get('active_ids', []) or []
records = self.env['letra_cambio.letra'].browse(active_ids)
self.env['letra_cambio.letra'].cambiar_estado_all(records, "ACE")
|
normal
|
{
"blob_id": "4ad3390f8f2c92f35acde507be7a7b713af997f2",
"index": 5092,
"step-1": "<mask token>\n\n\nclass Aceptar_letras_wizard(models.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.multi\n def aceptar_letras(self):\n active_ids = self.env.context.get('active_ids', []) or []\n records = self.env['letra_cambio.letra'].browse(active_ids)\n self.env['letra_cambio.letra'].cambiar_estado_all(records, 'ACE')\n",
"step-2": "<mask token>\n\n\nclass Aceptar_letras_wizard(models.TransientModel):\n <mask token>\n <mask token>\n\n def _get_letras(self):\n if self.env.context and self.env.context.get('active_ids'):\n return self.env.context.get('active_ids')\n return []\n <mask token>\n\n @api.multi\n def aceptar_letras(self):\n active_ids = self.env.context.get('active_ids', []) or []\n records = self.env['letra_cambio.letra'].browse(active_ids)\n self.env['letra_cambio.letra'].cambiar_estado_all(records, 'ACE')\n",
"step-3": "<mask token>\n\n\nclass Aceptar_letras_wizard(models.TransientModel):\n _name = 'aceptar_letras_wizard'\n _description = 'Aceptar letras'\n\n def _get_letras(self):\n if self.env.context and self.env.context.get('active_ids'):\n return self.env.context.get('active_ids')\n return []\n letra_ids = fields.Many2many('letra_cambio.letra', default=_get_letras,\n string='Letras')\n\n @api.multi\n def aceptar_letras(self):\n active_ids = self.env.context.get('active_ids', []) or []\n records = self.env['letra_cambio.letra'].browse(active_ids)\n self.env['letra_cambio.letra'].cambiar_estado_all(records, 'ACE')\n",
"step-4": "from odoo import models, fields, api\n\n\nclass Aceptar_letras_wizard(models.TransientModel):\n _name = 'aceptar_letras_wizard'\n _description = 'Aceptar letras'\n\n def _get_letras(self):\n if self.env.context and self.env.context.get('active_ids'):\n return self.env.context.get('active_ids')\n return []\n letra_ids = fields.Many2many('letra_cambio.letra', default=_get_letras,\n string='Letras')\n\n @api.multi\n def aceptar_letras(self):\n active_ids = self.env.context.get('active_ids', []) or []\n records = self.env['letra_cambio.letra'].browse(active_ids)\n self.env['letra_cambio.letra'].cambiar_estado_all(records, 'ACE')\n",
"step-5": "from odoo import models, fields, api\n\n\nclass Aceptar_letras_wizard(models.TransientModel):\n _name = 'aceptar_letras_wizard'\n _description = \"Aceptar letras\"\n\n def _get_letras(self):\n if self.env.context and self.env.context.get('active_ids'):\n return self.env.context.get('active_ids')\n return []\n\n\n letra_ids = fields.Many2many('letra_cambio.letra', default=_get_letras, string='Letras')\n\n @api.multi\n def aceptar_letras(self):\n active_ids = self.env.context.get('active_ids', []) or []\n records = self.env['letra_cambio.letra'].browse(active_ids)\n self.env['letra_cambio.letra'].cambiar_estado_all(records, \"ACE\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# https://www.acmicpc.net/problem/3584
import sys, collections
input = sys.stdin.readline
N = int(input())
for _ in range(N):
n = int(input())
arr = collections.defaultdict(list)
parent = [i for i in range(n + 1)]
for i in range(n - 1):
a, b = map(int, input().split())
arr[a].append(b)
parent[b] = a
node_1, node_2 = map(int, input().split())
p = [i for i, e in enumerate(parent) if i > 0 and i == e]
def bfs(p, goal):
queue = collections.deque()
queue.append([p, [p]])
discoverd = [False] * (n + 1)
while queue:
m, r = queue.popleft()
if not discoverd[m]:
discoverd[m] = True
if m == goal:
return r
for i in arr[m]:
queue.append([i, r + [i]])
for i in p:
a = bfs(i, node_1)
b = bfs(i, node_2)
result = 0
for aa, bb in zip(a,b):
if aa==bb:
result = aa
print(result)
|
normal
|
{
"blob_id": "d60a2d4c819f701e8e439b8839415aa2838df185",
"index": 6415,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a, b):\n if aa == bb:\n result = aa\n print(result)\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\nN = int(input())\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a, b):\n if aa == bb:\n result = aa\n print(result)\n",
"step-4": "import sys, collections\ninput = sys.stdin.readline\nN = int(input())\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a, b):\n if aa == bb:\n result = aa\n print(result)\n",
"step-5": "# https://www.acmicpc.net/problem/3584\nimport sys, collections\ninput = sys.stdin.readline\nN = int(input())\nfor _ in range(N):\n n = int(input())\n arr = collections.defaultdict(list)\n parent = [i for i in range(n + 1)]\n for i in range(n - 1):\n a, b = map(int, input().split())\n arr[a].append(b)\n parent[b] = a\n node_1, node_2 = map(int, input().split())\n p = [i for i, e in enumerate(parent) if i > 0 and i == e]\n\n\n def bfs(p, goal):\n queue = collections.deque()\n queue.append([p, [p]])\n discoverd = [False] * (n + 1)\n while queue:\n m, r = queue.popleft()\n if not discoverd[m]:\n discoverd[m] = True\n if m == goal:\n return r\n for i in arr[m]:\n queue.append([i, r + [i]])\n\n for i in p:\n a = bfs(i, node_1)\n b = bfs(i, node_2)\n result = 0\n for aa, bb in zip(a,b):\n if aa==bb:\n result = aa\n print(result)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 29 20:33:32 2013
@author: ste
"""
#Convert input file for graph from adjacency list version, where each line is
#vertex adjacent adjacent adjacent ...
#to edge representation where each line is
#tail head
edges=[]
with open("/Users/ste/Desktop/Ste/Python/AlgorithmsCourse/KargerMinCut.txt") as v_list_file:
for line in v_list_file:
node=map(int, line.split())
for adjacent in node[1:]:
edges.append([node[0], adjacent])
with open("/Users/ste/Desktop/Ste/C++/Programs/AlgorithmCourse/GraphSearch/KargerMinCut(edges).txt", "w+") as outfile:
for edge in edges:
outfile.write(str(edge[0])+' '+str(edge[1])+'\n')
|
normal
|
{
"blob_id": "1b7b94a0331e2462f83f4f77bcfaefbeefdf24f4",
"index": 3754,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('/Users/ste/Desktop/Ste/Python/AlgorithmsCourse/KargerMinCut.txt'\n ) as v_list_file:\n for line in v_list_file:\n node = map(int, line.split())\n for adjacent in node[1:]:\n edges.append([node[0], adjacent])\nwith open(\n '/Users/ste/Desktop/Ste/C++/Programs/AlgorithmCourse/GraphSearch/KargerMinCut(edges).txt'\n , 'w+') as outfile:\n for edge in edges:\n outfile.write(str(edge[0]) + ' ' + str(edge[1]) + '\\n')\n",
"step-3": "<mask token>\nedges = []\nwith open('/Users/ste/Desktop/Ste/Python/AlgorithmsCourse/KargerMinCut.txt'\n ) as v_list_file:\n for line in v_list_file:\n node = map(int, line.split())\n for adjacent in node[1:]:\n edges.append([node[0], adjacent])\nwith open(\n '/Users/ste/Desktop/Ste/C++/Programs/AlgorithmCourse/GraphSearch/KargerMinCut(edges).txt'\n , 'w+') as outfile:\n for edge in edges:\n outfile.write(str(edge[0]) + ' ' + str(edge[1]) + '\\n')\n",
"step-4": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jul 29 20:33:32 2013\n\n@author: ste\n\"\"\"\n\n#Convert input file for graph from adjacency list version, where each line is\n#vertex adjacent adjacent adjacent ...\n#to edge representation where each line is\n#tail head\n\nedges=[]\nwith open(\"/Users/ste/Desktop/Ste/Python/AlgorithmsCourse/KargerMinCut.txt\") as v_list_file:\n for line in v_list_file:\n node=map(int, line.split())\n for adjacent in node[1:]:\n edges.append([node[0], adjacent])\n\nwith open(\"/Users/ste/Desktop/Ste/C++/Programs/AlgorithmCourse/GraphSearch/KargerMinCut(edges).txt\", \"w+\") as outfile:\n for edge in edges:\n outfile.write(str(edge[0])+' '+str(edge[1])+'\\n')\n ",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#-*- coding: UTF-8 -*-
#Author Motuii
'''
* ┏┓ ┏┓
* ┏┛┻━━━┛┻┓
* ┃ ┃
* ┃ ━ ┃
* ┃ ┳┛ ┗┳ ┃
* ┃ ┃
* ┃ ┻ ┃
* ┃ ┃
* ┗━┓ ┏━┛
* ┃ ┃ 神兽保佑
* ┃ ┃ 代码无BUG!
* ┃ ┗━━━┓
* ┃ ┣┓
* ┃ ┏┛
* ┗┓┓┏━┳┓┏┛
* ┃┫┫ ┃┫┫
* ┗┻┛ ┗┻┛
*
'''
n = 10
# arr = [[1]*i for i in range(1,n+1)]
# for i in range(len(arr)):
# for j in range(len(arr[i])):
# if (j!=0 and j!=len(arr[i-1])):
# arr[i][j] = arr[i-1][j-1] + arr[i-1][j]
# print ' '.join(map(lambda x:str(x),arr[i]))
an = [1]*n
for i in range(n):
for j in range(i-1,0,-1):
an[j] = an[j]+an[j-1]
print an[0:i+1]
#print "\t".join(map(lambda x:str(x),an[0:i+1]))
|
normal
|
{
"blob_id": "131caf50cc8682cf180168a1b136b1dcdd70fa76",
"index": 6837,
"step-1": "#-*- coding: UTF-8 -*-\n#Author Motuii\n'''\n * ┏┓ ┏┓ \n * ┏┛┻━━━┛┻┓ \n * ┃ ┃ \n * ┃ ━ ┃ \n * ┃ ┳┛ ┗┳ ┃ \n * ┃ ┃ \n * ┃ ┻ ┃ \n * ┃ ┃ \n * ┗━┓ ┏━┛ \n * ┃ ┃ 神兽保佑 \n * ┃ ┃ 代码无BUG! \n * ┃ ┗━━━┓ \n * ┃ ┣┓ \n * ┃ ┏┛ \n * ┗┓┓┏━┳┓┏┛ \n * ┃┫┫ ┃┫┫ \n * ┗┻┛ ┗┻┛ \n * \n '''\n\nn = 10\n# arr = [[1]*i for i in range(1,n+1)]\n# for i in range(len(arr)):\n# \tfor j in range(len(arr[i])):\n# \t\tif (j!=0 and j!=len(arr[i-1])):\n# \t\t\tarr[i][j] = arr[i-1][j-1] + arr[i-1][j]\n# \tprint ' '.join(map(lambda x:str(x),arr[i]))\n\nan = [1]*n\nfor i in range(n):\n\tfor j in range(i-1,0,-1):\n\t\tan[j] = an[j]+an[j-1]\n\n\tprint an[0:i+1]\n\t#print \"\\t\".join(map(lambda x:str(x),an[0:i+1]))\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import asyncio
import secrets
import pytest
from libp2p.host.ping import ID, PING_LENGTH
from libp2p.tools.factories import pair_of_connected_hosts
@pytest.mark.asyncio
async def test_ping_once():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
await stream.close()
SOME_PING_COUNT = 3
@pytest.mark.asyncio
async def test_ping_several():
async with pair_of_connected_hosts() as (host_a, host_b):
stream = await host_b.new_stream(host_a.get_id(), (ID,))
for _ in range(SOME_PING_COUNT):
some_ping = secrets.token_bytes(PING_LENGTH)
await stream.write(some_ping)
some_pong = await stream.read(PING_LENGTH)
assert some_ping == some_pong
# NOTE: simulate some time to sleep to mirror a real
# world usage where a peer sends pings on some periodic interval
# NOTE: this interval can be `0` for this test.
await asyncio.sleep(0)
await stream.close()
|
normal
|
{
"blob_id": "0233b46da3b9351f110ffc7f8622ca8f9ee9944d",
"index": 3000,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\n<mask token>\n\n\[email protected]\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await asyncio.sleep(0)\n await stream.close()\n",
"step-3": "<mask token>\n\n\[email protected]\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\nSOME_PING_COUNT = 3\n\n\[email protected]\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await asyncio.sleep(0)\n await stream.close()\n",
"step-4": "import asyncio\nimport secrets\nimport pytest\nfrom libp2p.host.ping import ID, PING_LENGTH\nfrom libp2p.tools.factories import pair_of_connected_hosts\n\n\[email protected]\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\nSOME_PING_COUNT = 3\n\n\[email protected]\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await asyncio.sleep(0)\n await stream.close()\n",
"step-5": "import asyncio\nimport secrets\n\nimport pytest\n\nfrom libp2p.host.ping import ID, PING_LENGTH\nfrom libp2p.tools.factories import pair_of_connected_hosts\n\n\[email protected]\nasync def test_ping_once():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n await stream.close()\n\n\nSOME_PING_COUNT = 3\n\n\[email protected]\nasync def test_ping_several():\n async with pair_of_connected_hosts() as (host_a, host_b):\n stream = await host_b.new_stream(host_a.get_id(), (ID,))\n for _ in range(SOME_PING_COUNT):\n some_ping = secrets.token_bytes(PING_LENGTH)\n await stream.write(some_ping)\n some_pong = await stream.read(PING_LENGTH)\n assert some_ping == some_pong\n # NOTE: simulate some time to sleep to mirror a real\n # world usage where a peer sends pings on some periodic interval\n # NOTE: this interval can be `0` for this test.\n await asyncio.sleep(0)\n await stream.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 *-*
import MySQLdb
conn = MySQLdb.connect('localhost', 'ABarbara', 'root', '1dawabarbara') # Abro la conexión
def crearTabla(query): # Le paso la cadena que realizará el create como parámetro.
cursor = conn.cursor() #En un cursor (de la conexión) almaceno lo que quiero enviar a la base de datos.
cursor.execute(query) #Ejecuto la orden
cursor.close() # Una vez utilizado, cierro mi cursor.
def insertarEmpleados():
cursor= conn.cursor()
for x in range(2):
try:
nombre = raw_input('Nombre: ')
apellido = raw_input('Apellido: ')
sueldoBase = comprobarSueldo(float(raw_input ('Sueldo base: ')))
hijos = (int(raw_input('Número de hijos: ')))
sueldoFinal = calcularImponible(sueldoBase, hijos)
insert = (("INSERT INTO EMPLEADOS VALUES('%s', '%s', '%f', '%d', '%f')" ) % (nombre, apellido, sueldoBase, hijos, sueldoFinal))
cursor.execute(insert)
except ValueError:
print "Error, tipo de dato incorrecto"
except Exception:
print "Error"
cursor.close()
def comprobarSueldo(sueldoBase):
if sueldoBase<600:
sueldoBase=600
return sueldoBase
def calcularImponible(sueldo, hijos):
if hijos>0:
sueldoFinal= sueldo+((0.05*sueldo)*hijos)
else:
sueldoFinal= sueldo
return sueldoFinal
crearTabla("CREATE TABLE EMPLEADOS (nombre varchar(100), apellido varchar(100), sueldo_base Decimal, hijos int, sueldo_final Decimal)")
insertarEmpleados()
conn.commit()
conn.close()
|
normal
|
{
"blob_id": "8a2b7376369513ce403a2542fb8c6d5826b2169b",
"index": 9949,
"step-1": "# -*- coding: utf-8 *-*\nimport MySQLdb \n\nconn = MySQLdb.connect('localhost', 'ABarbara', 'root', '1dawabarbara') # Abro la conexión \n\ndef crearTabla(query): # Le paso la cadena que realizará el create como parámetro.\n\tcursor = conn.cursor() #En un cursor (de la conexión) almaceno lo que quiero enviar a la base de datos.\n\tcursor.execute(query) #Ejecuto la orden\n\tcursor.close() # Una vez utilizado, cierro mi cursor.\n\ndef insertarEmpleados():\n\tcursor= conn.cursor()\n\tfor x in range(2):\n\t\ttry:\n\t\t\tnombre = raw_input('Nombre: ')\n\t\t\tapellido = raw_input('Apellido: ')\n\t\t\tsueldoBase = comprobarSueldo(float(raw_input ('Sueldo base: ')))\n\t\t\thijos = (int(raw_input('Número de hijos: ')))\n\t\t\tsueldoFinal = calcularImponible(sueldoBase, hijos)\n\t\t\tinsert = ((\"INSERT INTO EMPLEADOS VALUES('%s', '%s', '%f', '%d', '%f')\" ) % (nombre, apellido, sueldoBase, hijos, sueldoFinal))\n\n\t\t\tcursor.execute(insert) \n\n\t\texcept ValueError:\n\t\t\tprint \"Error, tipo de dato incorrecto\"\n\t\texcept Exception:\n\t\t\tprint \"Error\"\n\tcursor.close()\n\ndef comprobarSueldo(sueldoBase):\n\tif sueldoBase<600:\n\t\tsueldoBase=600\n\treturn sueldoBase\n\ndef calcularImponible(sueldo, hijos):\n\tif hijos>0:\n\t\tsueldoFinal= sueldo+((0.05*sueldo)*hijos)\n\telse:\n\t\tsueldoFinal= sueldo\n\treturn sueldoFinal\n\ncrearTabla(\"CREATE TABLE EMPLEADOS (nombre varchar(100), apellido varchar(100), sueldo_base Decimal, hijos int, sueldo_final Decimal)\")\ninsertarEmpleados()\nconn.commit() \nconn.close()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import preprocessing
import tokenization
import vectorspacemodel
import pickle
import collections
import os
import math
import operator
from itertools import islice
def take(n, iterable):
# "Return first n items of the iterable as a list"
return list(islice(iterable, n))
directory = os.getcwd()
links_path = os.path.join(directory, 'links')
# Getting Index from pickle dump
with open("D_INDEXED_FILE/index", 'rb') as f:
while True:
try:
index = pickle.load(f)
except EOFError:
break
inv_index = index
# Getting Document vectors from pickle dump
dv = {}
vec_files = [file for file in os.listdir("D_INDEXED_FILE/vectors/.") if file.endswith("vector")]
# x = index, y = filename
for x, y in enumerate(vec_files):
# Open all of the token lists
with open("D_INDEXED_FILE/vectors/" + y, 'rb') as ff:
while True:
try:
vector = pickle.load(ff)
except EOFError:
break
dv[y] = vector
# By here you will get all document vectors in dv variable
#print("Document vectors are: ", dv)
query = input("Enter the query: ")
query_vector = []
idf,terms = vectorspacemodel.get_idf(inv_index)
od = collections.OrderedDict(sorted(idf.items()))
#print("idf is: ", idf)
#print("terms are: ", terms)
processed_query = preprocessing.parse_query(query.lower())
#print("processed query is: ", processed_query)
tokenized_query = tokenization.query_tokenization(processed_query)
#print("tokenized query is: ", tokenized_query)
# This code makes the query vector and normalizes it
for x,y in enumerate((od.items())):
for i in tokenized_query.split():
if i == y[0]:
#print(y[1])
if [y[1],x] in query_vector:
query_vector.remove([y[1], x])
query_vector.append([y[1]+y[1],x])
else:
query_vector.append([y[1],x])
#print("Unnormalized query vector is: ", query_vector)
# Normalizing here
weight = 0.0
for i in range(len(query_vector)):
weight = weight + (query_vector[i][0] ** 2)
weight = math.sqrt(weight)
# print("weight is: ", weight)
for i in range(len(query_vector)):
query_vector[i][0] = query_vector[i][0] / weight
#print("the Normalized query vector is: ", query_vector)
# Calculate Similarity between query vector and all document vectors
similarity = {}
for k in dv.keys():
sim = float(0)
for i in range(len(query_vector)):
di = query_vector[i][1]
#import pdb; pdb.set_trace()
for j in range(len(dv[k])):
dj = dv[k][j][1]
if di == dj:
mul = query_vector[i][0] * dv[k][j][0]
sim += mul
#print (mul)
break
elif di < dj:
break
similarity[k] = sim
#print("document vector is: ", dv[k])
#print("query vector is: ", v1)
#print ("similarity is: ", sim)
#print(sim)
#print("cosine similarity is: ", similarity)
sorted_x = sorted(similarity.items(), key=operator.itemgetter(1), reverse=True)
#print("Sorted Cosine Similarity",sorted_x)
top_7 = take(7, sorted_x)
#print("Top 7 documents are: ", top_7)
# Getting the links file to match file with link
with open(links_path, 'rb') as f:
while True:
try:
web_links = pickle.load(f)
except EOFError:
break
#print("All the web links are: ", web_links)
#print("Top 10 documents are:\n ", ("\n".join(str(x[0][0:-7]) for x in top_5)).strip())
print("Our Search Results are: ")
for x in top_7:
#print("".join(str(x[0][0:-7])))
if x[1] == float(0):
print("No relevant documents found!")
break
else:
for j in web_links.keys():
if "".join(str(x[0][0:-7])) == j[0:-5]:
print(repr(web_links[j]).strip('\''))
# print("Total document vectors are: ", len(dv))
# print("Total unique terms for index are: ", len(inv_index))
# print("Total unique terms from terms are: ", len(terms))
# print("Toal unique terms from idf are: ", len(idf))
|
normal
|
{
"blob_id": "1630a3d0becac195feee95a1c3b23568612a48d2",
"index": 3194,
"step-1": "<mask token>\n\n\ndef take(n, iterable):\n return list(islice(iterable, n))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef take(n, iterable):\n return list(islice(iterable, n))\n\n\n<mask token>\nwith open('D_INDEXED_FILE/index', 'rb') as f:\n while True:\n try:\n index = pickle.load(f)\n except EOFError:\n break\n<mask token>\nfor x, y in enumerate(vec_files):\n with open('D_INDEXED_FILE/vectors/' + y, 'rb') as ff:\n while True:\n try:\n vector = pickle.load(ff)\n except EOFError:\n break\n dv[y] = vector\n<mask token>\nfor x, y in enumerate(od.items()):\n for i in tokenized_query.split():\n if i == y[0]:\n if [y[1], x] in query_vector:\n query_vector.remove([y[1], x])\n query_vector.append([y[1] + y[1], x])\n else:\n query_vector.append([y[1], x])\n<mask token>\nfor i in range(len(query_vector)):\n weight = weight + query_vector[i][0] ** 2\n<mask token>\nfor i in range(len(query_vector)):\n query_vector[i][0] = query_vector[i][0] / weight\n<mask token>\nfor k in dv.keys():\n sim = float(0)\n for i in range(len(query_vector)):\n di = query_vector[i][1]\n for j in range(len(dv[k])):\n dj = dv[k][j][1]\n if di == dj:\n mul = query_vector[i][0] * dv[k][j][0]\n sim += mul\n break\n elif di < dj:\n break\n similarity[k] = sim\n<mask token>\nwith open(links_path, 'rb') as f:\n while True:\n try:\n web_links = pickle.load(f)\n except EOFError:\n break\nprint('Our Search Results are: ')\nfor x in top_7:\n if x[1] == float(0):\n print('No relevant documents found!')\n break\n else:\n for j in web_links.keys():\n if ''.join(str(x[0][0:-7])) == j[0:-5]:\n print(repr(web_links[j]).strip(\"'\"))\n",
"step-3": "<mask token>\n\n\ndef take(n, iterable):\n return list(islice(iterable, n))\n\n\ndirectory = os.getcwd()\nlinks_path = os.path.join(directory, 'links')\nwith open('D_INDEXED_FILE/index', 'rb') as f:\n while True:\n try:\n index = pickle.load(f)\n except EOFError:\n break\ninv_index = index\ndv = {}\nvec_files = [file for file in os.listdir('D_INDEXED_FILE/vectors/.') if\n file.endswith('vector')]\nfor x, y in enumerate(vec_files):\n with open('D_INDEXED_FILE/vectors/' + y, 'rb') as ff:\n while True:\n try:\n vector = pickle.load(ff)\n except EOFError:\n break\n dv[y] = vector\nquery = input('Enter the query: ')\nquery_vector = []\nidf, terms = vectorspacemodel.get_idf(inv_index)\nod = collections.OrderedDict(sorted(idf.items()))\nprocessed_query = preprocessing.parse_query(query.lower())\ntokenized_query = tokenization.query_tokenization(processed_query)\nfor x, y in enumerate(od.items()):\n for i in tokenized_query.split():\n if i == y[0]:\n if [y[1], x] in query_vector:\n query_vector.remove([y[1], x])\n query_vector.append([y[1] + y[1], x])\n else:\n query_vector.append([y[1], x])\nweight = 0.0\nfor i in range(len(query_vector)):\n weight = weight + query_vector[i][0] ** 2\nweight = math.sqrt(weight)\nfor i in range(len(query_vector)):\n query_vector[i][0] = query_vector[i][0] / weight\nsimilarity = {}\nfor k in dv.keys():\n sim = float(0)\n for i in range(len(query_vector)):\n di = query_vector[i][1]\n for j in range(len(dv[k])):\n dj = dv[k][j][1]\n if di == dj:\n mul = query_vector[i][0] * dv[k][j][0]\n sim += mul\n break\n elif di < dj:\n break\n similarity[k] = sim\nsorted_x = sorted(similarity.items(), key=operator.itemgetter(1), reverse=True)\ntop_7 = take(7, sorted_x)\nwith open(links_path, 'rb') as f:\n while True:\n try:\n web_links = pickle.load(f)\n except EOFError:\n break\nprint('Our Search Results are: ')\nfor x in top_7:\n if x[1] == float(0):\n print('No relevant documents found!')\n break\n else:\n for j in web_links.keys():\n if ''.join(str(x[0][0:-7])) == j[0:-5]:\n print(repr(web_links[j]).strip(\"'\"))\n",
"step-4": "import preprocessing\nimport tokenization\nimport vectorspacemodel\nimport pickle\nimport collections\nimport os\nimport math\nimport operator\nfrom itertools import islice\n\n\ndef take(n, iterable):\n return list(islice(iterable, n))\n\n\ndirectory = os.getcwd()\nlinks_path = os.path.join(directory, 'links')\nwith open('D_INDEXED_FILE/index', 'rb') as f:\n while True:\n try:\n index = pickle.load(f)\n except EOFError:\n break\ninv_index = index\ndv = {}\nvec_files = [file for file in os.listdir('D_INDEXED_FILE/vectors/.') if\n file.endswith('vector')]\nfor x, y in enumerate(vec_files):\n with open('D_INDEXED_FILE/vectors/' + y, 'rb') as ff:\n while True:\n try:\n vector = pickle.load(ff)\n except EOFError:\n break\n dv[y] = vector\nquery = input('Enter the query: ')\nquery_vector = []\nidf, terms = vectorspacemodel.get_idf(inv_index)\nod = collections.OrderedDict(sorted(idf.items()))\nprocessed_query = preprocessing.parse_query(query.lower())\ntokenized_query = tokenization.query_tokenization(processed_query)\nfor x, y in enumerate(od.items()):\n for i in tokenized_query.split():\n if i == y[0]:\n if [y[1], x] in query_vector:\n query_vector.remove([y[1], x])\n query_vector.append([y[1] + y[1], x])\n else:\n query_vector.append([y[1], x])\nweight = 0.0\nfor i in range(len(query_vector)):\n weight = weight + query_vector[i][0] ** 2\nweight = math.sqrt(weight)\nfor i in range(len(query_vector)):\n query_vector[i][0] = query_vector[i][0] / weight\nsimilarity = {}\nfor k in dv.keys():\n sim = float(0)\n for i in range(len(query_vector)):\n di = query_vector[i][1]\n for j in range(len(dv[k])):\n dj = dv[k][j][1]\n if di == dj:\n mul = query_vector[i][0] * dv[k][j][0]\n sim += mul\n break\n elif di < dj:\n break\n similarity[k] = sim\nsorted_x = sorted(similarity.items(), key=operator.itemgetter(1), reverse=True)\ntop_7 = take(7, sorted_x)\nwith open(links_path, 'rb') as f:\n while True:\n try:\n web_links = pickle.load(f)\n except EOFError:\n break\nprint('Our Search Results are: ')\nfor x in top_7:\n if x[1] == float(0):\n print('No relevant documents found!')\n break\n else:\n for j in web_links.keys():\n if ''.join(str(x[0][0:-7])) == j[0:-5]:\n print(repr(web_links[j]).strip(\"'\"))\n",
"step-5": "import preprocessing\r\nimport tokenization\r\nimport vectorspacemodel\r\nimport pickle\r\nimport collections\r\nimport os\r\nimport math\r\nimport operator\r\nfrom itertools import islice\r\n\r\ndef take(n, iterable):\r\n # \"Return first n items of the iterable as a list\"\r\n return list(islice(iterable, n))\r\n\r\ndirectory = os.getcwd()\r\nlinks_path = os.path.join(directory, 'links')\r\n\r\n# Getting Index from pickle dump\r\nwith open(\"D_INDEXED_FILE/index\", 'rb') as f:\r\n while True:\r\n try:\r\n index = pickle.load(f)\r\n except EOFError:\r\n break\r\ninv_index = index\r\n\r\n# Getting Document vectors from pickle dump\r\ndv = {}\r\nvec_files = [file for file in os.listdir(\"D_INDEXED_FILE/vectors/.\") if file.endswith(\"vector\")]\r\n# x = index, y = filename\r\nfor x, y in enumerate(vec_files):\r\n # Open all of the token lists\r\n with open(\"D_INDEXED_FILE/vectors/\" + y, 'rb') as ff:\r\n while True:\r\n try:\r\n vector = pickle.load(ff)\r\n except EOFError:\r\n break\r\n dv[y] = vector\r\n# By here you will get all document vectors in dv variable\r\n\r\n#print(\"Document vectors are: \", dv)\r\n\r\nquery = input(\"Enter the query: \")\r\nquery_vector = []\r\n\r\n\r\nidf,terms = vectorspacemodel.get_idf(inv_index)\r\nod = collections.OrderedDict(sorted(idf.items()))\r\n#print(\"idf is: \", idf)\r\n#print(\"terms are: \", terms)\r\n\r\nprocessed_query = preprocessing.parse_query(query.lower())\r\n#print(\"processed query is: \", processed_query)\r\ntokenized_query = tokenization.query_tokenization(processed_query)\r\n#print(\"tokenized query is: \", tokenized_query)\r\n\r\n# This code makes the query vector and normalizes it\r\nfor x,y in enumerate((od.items())):\r\n for i in tokenized_query.split():\r\n if i == y[0]:\r\n #print(y[1])\r\n if [y[1],x] in query_vector:\r\n query_vector.remove([y[1], x])\r\n query_vector.append([y[1]+y[1],x])\r\n else:\r\n query_vector.append([y[1],x])\r\n\r\n#print(\"Unnormalized query vector is: \", query_vector)\r\n\r\n# Normalizing here\r\nweight = 0.0\r\nfor i in range(len(query_vector)):\r\n weight = weight + (query_vector[i][0] ** 2)\r\nweight = math.sqrt(weight)\r\n# print(\"weight is: \", weight)\r\nfor i in range(len(query_vector)):\r\n query_vector[i][0] = query_vector[i][0] / weight\r\n\r\n#print(\"the Normalized query vector is: \", query_vector)\r\n\r\n# Calculate Similarity between query vector and all document vectors\r\nsimilarity = {}\r\nfor k in dv.keys():\r\n sim = float(0)\r\n for i in range(len(query_vector)):\r\n di = query_vector[i][1]\r\n #import pdb; pdb.set_trace()\r\n for j in range(len(dv[k])):\r\n dj = dv[k][j][1]\r\n if di == dj:\r\n mul = query_vector[i][0] * dv[k][j][0]\r\n sim += mul\r\n #print (mul)\r\n break\r\n elif di < dj:\r\n break\r\n similarity[k] = sim\r\n #print(\"document vector is: \", dv[k])\r\n #print(\"query vector is: \", v1)\r\n #print (\"similarity is: \", sim)\r\n #print(sim)\r\n\r\n#print(\"cosine similarity is: \", similarity)\r\n\r\nsorted_x = sorted(similarity.items(), key=operator.itemgetter(1), reverse=True)\r\n#print(\"Sorted Cosine Similarity\",sorted_x)\r\ntop_7 = take(7, sorted_x)\r\n#print(\"Top 7 documents are: \", top_7)\r\n\r\n# Getting the links file to match file with link\r\nwith open(links_path, 'rb') as f:\r\n while True:\r\n try:\r\n web_links = pickle.load(f)\r\n except EOFError:\r\n break\r\n#print(\"All the web links are: \", web_links)\r\n\r\n\r\n\r\n#print(\"Top 10 documents are:\\n \", (\"\\n\".join(str(x[0][0:-7]) for x in top_5)).strip())\r\nprint(\"Our Search Results are: \")\r\nfor x in top_7:\r\n #print(\"\".join(str(x[0][0:-7])))\r\n if x[1] == float(0):\r\n print(\"No relevant documents found!\")\r\n break\r\n else:\r\n for j in web_links.keys():\r\n if \"\".join(str(x[0][0:-7])) == j[0:-5]:\r\n print(repr(web_links[j]).strip('\\''))\r\n\r\n# print(\"Total document vectors are: \", len(dv))\r\n# print(\"Total unique terms for index are: \", len(inv_index))\r\n# print(\"Total unique terms from terms are: \", len(terms))\r\n# print(\"Toal unique terms from idf are: \", len(idf))",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# coding=utf-8
# __author__ = 'lyl'
import json
import csv
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
def read_json(filename):
"""
读取json格式的文件
:param filename: json文件的文件名
:return: [{}, {}, {}, {}, {},{} ......]
"""
return json.loads(open(filename).read())
def write_csv(filename, data_list):
"""
将python对象 [{}, {}. {}, {} ...] 写入到csv文件中
:param filename: 生成的csv文件名
:param data_list: [{}, {}. {}, {} ...]
:return: None
"""
with open(filename,'w') as f:
dict_writer = csv.DictWriter(f, data_list[0].keys())
dict_writer.writeheader()
dict_writer.writerows(data_list)
def write_csv2(filename, content_list):
"""
与 write_csv 类似
:param filename:
:param content_list:
:return:
"""
with open(filename, 'w') as f:
csv_writer = csv.writer(f)
head_list = content_list[0].keys()
data_list = [content.values() for content in content_list]
csv_writer.writerow(head_list)
csv_writer.writerows(data_list)
if __name__ == "__main__":
# 读出json数据内容
content_list = read_json('lagou_info_lin3.json')
# 将数据写入到csv文件
write_csv( "lagou_info_lin3.csv", content_list)
|
normal
|
{
"blob_id": "7531480f629c1b3d28210afac4ef84b06edcd420",
"index": 3825,
"step-1": "<mask token>\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef read_json(filename):\n \"\"\"\n 读取json格式的文件\n :param filename: json文件的文件名\n :return: [{}, {}, {}, {}, {},{} ......]\n \"\"\"\n return json.loads(open(filename).read())\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\ndef write_csv2(filename, content_list):\n \"\"\"\n 与 write_csv 类似\n :param filename:\n :param content_list:\n :return:\n \"\"\"\n with open(filename, 'w') as f:\n csv_writer = csv.writer(f)\n head_list = content_list[0].keys()\n data_list = [content.values() for content in content_list]\n csv_writer.writerow(head_list)\n csv_writer.writerows(data_list)\n\n\n<mask token>\n",
"step-3": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\ndef read_json(filename):\n \"\"\"\n 读取json格式的文件\n :param filename: json文件的文件名\n :return: [{}, {}, {}, {}, {},{} ......]\n \"\"\"\n return json.loads(open(filename).read())\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\ndef write_csv2(filename, content_list):\n \"\"\"\n 与 write_csv 类似\n :param filename:\n :param content_list:\n :return:\n \"\"\"\n with open(filename, 'w') as f:\n csv_writer = csv.writer(f)\n head_list = content_list[0].keys()\n data_list = [content.values() for content in content_list]\n csv_writer.writerow(head_list)\n csv_writer.writerows(data_list)\n\n\nif __name__ == '__main__':\n content_list = read_json('lagou_info_lin3.json')\n write_csv('lagou_info_lin3.csv', content_list)\n",
"step-4": "import json\nimport csv\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\ndef read_json(filename):\n \"\"\"\n 读取json格式的文件\n :param filename: json文件的文件名\n :return: [{}, {}, {}, {}, {},{} ......]\n \"\"\"\n return json.loads(open(filename).read())\n\n\ndef write_csv(filename, data_list):\n \"\"\"\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\n :param filename: 生成的csv文件名\n :param data_list: [{}, {}. {}, {} ...]\n :return: None\n \"\"\"\n with open(filename, 'w') as f:\n dict_writer = csv.DictWriter(f, data_list[0].keys())\n dict_writer.writeheader()\n dict_writer.writerows(data_list)\n\n\ndef write_csv2(filename, content_list):\n \"\"\"\n 与 write_csv 类似\n :param filename:\n :param content_list:\n :return:\n \"\"\"\n with open(filename, 'w') as f:\n csv_writer = csv.writer(f)\n head_list = content_list[0].keys()\n data_list = [content.values() for content in content_list]\n csv_writer.writerow(head_list)\n csv_writer.writerows(data_list)\n\n\nif __name__ == '__main__':\n content_list = read_json('lagou_info_lin3.json')\n write_csv('lagou_info_lin3.csv', content_list)\n",
"step-5": "# coding=utf-8\r\n# __author__ = 'lyl'\r\n\r\nimport json\r\nimport csv\r\n\r\nimport sys\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\n\r\n\r\ndef read_json(filename):\r\n \"\"\"\r\n 读取json格式的文件\r\n :param filename: json文件的文件名\r\n :return: [{}, {}, {}, {}, {},{} ......]\r\n \"\"\"\r\n return json.loads(open(filename).read())\r\n\r\ndef write_csv(filename, data_list):\r\n \"\"\"\r\n 将python对象 [{}, {}. {}, {} ...] 写入到csv文件中\r\n :param filename: 生成的csv文件名\r\n :param data_list: [{}, {}. {}, {} ...]\r\n :return: None\r\n \"\"\"\r\n with open(filename,'w') as f:\r\n dict_writer = csv.DictWriter(f, data_list[0].keys())\r\n dict_writer.writeheader()\r\n dict_writer.writerows(data_list)\r\n\r\n\r\ndef write_csv2(filename, content_list):\r\n \"\"\"\r\n 与 write_csv 类似\r\n :param filename:\r\n :param content_list:\r\n :return:\r\n \"\"\"\r\n with open(filename, 'w') as f:\r\n csv_writer = csv.writer(f)\r\n\r\n head_list = content_list[0].keys()\r\n data_list = [content.values() for content in content_list]\r\n csv_writer.writerow(head_list)\r\n csv_writer.writerows(data_list)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n # 读出json数据内容\r\n content_list = read_json('lagou_info_lin3.json')\r\n # 将数据写入到csv文件\r\n write_csv( \"lagou_info_lin3.csv\", content_list)",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import sys
import pysolr
import requests
import logging
import json
import datetime
from urlparse import urlparse
from django.conf import settings
from django.utils.html import strip_tags
from aggregator.utils import mercator_to_llbbox
def get_date(layer):
"""
Returns a date for Solr. A date can be detected or from metadata.
It can be a range or a simple date in isoformat.
"""
date = None
type = 1
layer_dates = layer.get_layer_dates()
if layer_dates:
date = layer_dates[0][0]
type = layer_dates[0][1]
if date is None:
date = layer.created.date()
# layer date > 2300 is invalid for sure
# TODO put this logic in date miner
if date.year > 2300:
date = None
if type == 0:
type = "Detected"
if type == 1:
type = "From Metadata"
return get_solr_date(date), type
def get_solr_date(pydate):
"""
Returns a date in a valid Solr format from a string.
"""
# check if date is valid and then set it to solr format YYYY-MM-DDThh:mm:ssZ
try:
if isinstance(pydate, datetime.datetime):
solr_date = '%sZ' % pydate.isoformat()[0:19]
return solr_date
else:
return None
except Exception:
return None
class SolrHypermap(object):
solr_url = settings.SOLR_URL
solr = pysolr.Solr(solr_url, timeout=60)
logger = logging.getLogger("hypermap")
@staticmethod
def get_domain(url):
urlParts = urlparse(url)
hostname = urlParts.hostname
if hostname == "localhost":
return "Harvard" # assumption
return hostname
@staticmethod
def is_solr_up():
solr_url = settings.SOLR_URL
solr_url_parts = solr_url.split('/')
admin_url = '/'.join(solr_url_parts[:-1]) + '/admin/cores'
params = {'action': 'STATUS', 'wt': 'json'}
try:
req = requests.get(admin_url, params=params)
response = json.loads(req.text)
status = response['status']
if status:
response = True
except requests.exceptions.RequestException:
response = False
return response
@staticmethod
def layer_to_solr(layer):
category = None
username = None
try:
# as a first thing we need to remove the existing index in case there is already one
SolrHypermap.solr.delete(q='LayerId:%s' % layer.id)
bbox = None
if not layer.has_valid_bbox():
message = 'There are not valid coordinates for layer id: %s' % layer.id
SolrHypermap.logger.error(message)
else:
bbox = [float(layer.bbox_x0), float(layer.bbox_y0), float(layer.bbox_x1), float(layer.bbox_y1)]
for proj in layer.srs.values():
if proj['code'] in ('102113', '102100'):
bbox = mercator_to_llbbox(bbox)
minX = bbox[0]
minY = bbox[1]
maxX = bbox[2]
maxY = bbox[3]
# coords hack needed by solr
if (minX < -180):
minX = -180
if (maxX > 180):
maxX = 180
if (minY < -90):
minY = -90
if (maxY > 90):
maxY = 90
wkt = "ENVELOPE({:f},{:f},{:f},{:f})".format(minX, maxX, maxY, minY)
halfWidth = (maxX - minX) / 2.0
halfHeight = (maxY - minY) / 2.0
area = (halfWidth * 2) * (halfHeight * 2)
domain = SolrHypermap.get_domain(layer.service.url)
if hasattr(layer, 'layerwm'):
category = layer.layerwm.category
username = layer.layerwm.username
abstract = layer.abstract
if abstract:
abstract = strip_tags(layer.abstract)
else:
abstract = ''
if layer.service.type == "WM":
originator = username
else:
originator = domain
# now we add the index
solr_record = {
"LayerId": str(layer.id),
"LayerName": layer.name,
"LayerTitle": layer.title,
"Originator": originator,
"ServiceId": str(layer.service.id),
"ServiceType": layer.service.type,
"LayerCategory": category,
"LayerUsername": username,
"LayerUrl": layer.url,
"LayerReliability": layer.reliability,
"LayerRecentReliability": layer.recent_reliability,
"LayerLastStatus": layer.last_status,
"Is_Public": layer.is_public,
"Availability": "Online",
"Location": '{"layerInfoPage": "' + layer.get_absolute_url() + '"}',
"Abstract": abstract,
"SrsProjectionCode": layer.srs.values_list('code', flat=True),
"DomainName": layer.service.get_domain
}
solr_date, type = get_date(layer)
if solr_date is not None:
solr_record['LayerDate'] = solr_date
solr_record['LayerDateType'] = type
if bbox is not None:
solr_record['MinX'] = minX
solr_record['MinY'] = minY
solr_record['MaxX'] = maxX
solr_record['MaxY'] = maxY
solr_record['Area'] = area
solr_record['bbox'] = wkt
SolrHypermap.solr.add([solr_record])
SolrHypermap.logger.info("Solr record saved for layer with id: %s" % layer.id)
return True, None
except Exception:
SolrHypermap.logger.error("Error saving solr record for layer with id: %s - %s"
% (layer.id, sys.exc_info()[1]))
return False, sys.exc_info()[1]
@staticmethod
def clear_solr():
"""Clear all indexes in the solr core"""
SolrHypermap.solr.delete(q='*:*')
print 'Solr core cleared'
|
normal
|
{
"blob_id": "6eb59f62a1623f308e0eda4e616be4177a421179",
"index": 2254,
"step-1": "import sys\nimport pysolr\nimport requests\nimport logging\nimport json\nimport datetime\n\nfrom urlparse import urlparse\nfrom django.conf import settings\nfrom django.utils.html import strip_tags\n\nfrom aggregator.utils import mercator_to_llbbox\n\n\ndef get_date(layer):\n \"\"\"\n Returns a date for Solr. A date can be detected or from metadata.\n It can be a range or a simple date in isoformat.\n \"\"\"\n date = None\n type = 1\n layer_dates = layer.get_layer_dates()\n if layer_dates:\n date = layer_dates[0][0]\n type = layer_dates[0][1]\n if date is None:\n date = layer.created.date()\n # layer date > 2300 is invalid for sure\n # TODO put this logic in date miner\n if date.year > 2300:\n date = None\n if type == 0:\n type = \"Detected\"\n if type == 1:\n type = \"From Metadata\"\n return get_solr_date(date), type\n\n\ndef get_solr_date(pydate):\n \"\"\"\n Returns a date in a valid Solr format from a string.\n \"\"\"\n # check if date is valid and then set it to solr format YYYY-MM-DDThh:mm:ssZ\n try:\n if isinstance(pydate, datetime.datetime):\n solr_date = '%sZ' % pydate.isoformat()[0:19]\n return solr_date\n else:\n return None\n except Exception:\n return None\n\n\nclass SolrHypermap(object):\n\n solr_url = settings.SOLR_URL\n solr = pysolr.Solr(solr_url, timeout=60)\n logger = logging.getLogger(\"hypermap\")\n\n @staticmethod\n def get_domain(url):\n urlParts = urlparse(url)\n hostname = urlParts.hostname\n if hostname == \"localhost\":\n return \"Harvard\" # assumption\n return hostname\n\n @staticmethod\n def is_solr_up():\n solr_url = settings.SOLR_URL\n solr_url_parts = solr_url.split('/')\n admin_url = '/'.join(solr_url_parts[:-1]) + '/admin/cores'\n params = {'action': 'STATUS', 'wt': 'json'}\n try:\n req = requests.get(admin_url, params=params)\n response = json.loads(req.text)\n status = response['status']\n if status:\n response = True\n except requests.exceptions.RequestException:\n response = False\n return response\n\n @staticmethod\n def layer_to_solr(layer):\n category = None\n username = None\n try:\n # as a first thing we need to remove the existing index in case there is already one\n SolrHypermap.solr.delete(q='LayerId:%s' % layer.id)\n bbox = None\n if not layer.has_valid_bbox():\n message = 'There are not valid coordinates for layer id: %s' % layer.id\n SolrHypermap.logger.error(message)\n else:\n bbox = [float(layer.bbox_x0), float(layer.bbox_y0), float(layer.bbox_x1), float(layer.bbox_y1)]\n for proj in layer.srs.values():\n if proj['code'] in ('102113', '102100'):\n bbox = mercator_to_llbbox(bbox)\n minX = bbox[0]\n minY = bbox[1]\n maxX = bbox[2]\n maxY = bbox[3]\n # coords hack needed by solr\n if (minX < -180):\n minX = -180\n if (maxX > 180):\n maxX = 180\n if (minY < -90):\n minY = -90\n if (maxY > 90):\n maxY = 90\n wkt = \"ENVELOPE({:f},{:f},{:f},{:f})\".format(minX, maxX, maxY, minY)\n halfWidth = (maxX - minX) / 2.0\n halfHeight = (maxY - minY) / 2.0\n area = (halfWidth * 2) * (halfHeight * 2)\n domain = SolrHypermap.get_domain(layer.service.url)\n if hasattr(layer, 'layerwm'):\n category = layer.layerwm.category\n username = layer.layerwm.username\n abstract = layer.abstract\n if abstract:\n abstract = strip_tags(layer.abstract)\n else:\n abstract = ''\n if layer.service.type == \"WM\":\n originator = username\n else:\n originator = domain\n # now we add the index\n solr_record = {\n \"LayerId\": str(layer.id),\n \"LayerName\": layer.name,\n \"LayerTitle\": layer.title,\n \"Originator\": originator,\n \"ServiceId\": str(layer.service.id),\n \"ServiceType\": layer.service.type,\n \"LayerCategory\": category,\n \"LayerUsername\": username,\n \"LayerUrl\": layer.url,\n \"LayerReliability\": layer.reliability,\n \"LayerRecentReliability\": layer.recent_reliability,\n \"LayerLastStatus\": layer.last_status,\n \"Is_Public\": layer.is_public,\n \"Availability\": \"Online\",\n \"Location\": '{\"layerInfoPage\": \"' + layer.get_absolute_url() + '\"}',\n \"Abstract\": abstract,\n \"SrsProjectionCode\": layer.srs.values_list('code', flat=True),\n \"DomainName\": layer.service.get_domain\n }\n\n solr_date, type = get_date(layer)\n if solr_date is not None:\n solr_record['LayerDate'] = solr_date\n solr_record['LayerDateType'] = type\n if bbox is not None:\n solr_record['MinX'] = minX\n solr_record['MinY'] = minY\n solr_record['MaxX'] = maxX\n solr_record['MaxY'] = maxY\n solr_record['Area'] = area\n solr_record['bbox'] = wkt\n SolrHypermap.solr.add([solr_record])\n SolrHypermap.logger.info(\"Solr record saved for layer with id: %s\" % layer.id)\n return True, None\n except Exception:\n SolrHypermap.logger.error(\"Error saving solr record for layer with id: %s - %s\"\n % (layer.id, sys.exc_info()[1]))\n return False, sys.exc_info()[1]\n\n @staticmethod\n def clear_solr():\n \"\"\"Clear all indexes in the solr core\"\"\"\n SolrHypermap.solr.delete(q='*:*')\n print 'Solr core cleared'\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from rest_framework import viewsets
from .serializers import UserSerializer
from .models import UserCustom
class UserViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed or edited.
"""
queryset = UserCustom.objects.all()
serializer_class = UserSerializer
|
normal
|
{
"blob_id": "fadf16792822926cb7b7386291e52ce44693baf8",
"index": 2053,
"step-1": "<mask token>\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n <mask token>\n queryset = UserCustom.objects.all()\n serializer_class = UserSerializer\n",
"step-3": "<mask token>\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = UserCustom.objects.all()\n serializer_class = UserSerializer\n",
"step-4": "from rest_framework import viewsets\nfrom .serializers import UserSerializer\nfrom .models import UserCustom\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = UserCustom.objects.all()\n serializer_class = UserSerializer\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
def part_1() -> int:
start = 382345
end = 843167
total = 0
for number in range(start, end + 1):
if check_number(str(number)):
total += 1
return total
def check_number(problem_input: str) -> bool:
previous = 0
double = False
for current in range(1, len(problem_input)):
if int(problem_input[current]) < int(problem_input[previous]):
return False
if int(problem_input[previous]) == int(problem_input[current]):
double = True
previous += 1
return double
def check_number_2(problem_input: str) -> bool:
previous = 0
current = 1
triple = True
seen_a_double = False
length = len(problem_input)
while current < length:
if int(problem_input[current]) < int(problem_input[previous]):
return False
if int(problem_input[current]) == int(problem_input[previous]):
if previous >= 1:
triple = int(problem_input[previous - 1]) == int(problem_input[previous])
if current < length - 1:
triple = int(problem_input[current + 1]) == int(problem_input[current])
while current < length - 1 and int(problem_input[current]) == int(problem_input[current + 1]):
current += 1
previous += 1
if not triple:
seen_a_double = True
previous += 1
current += 1
return seen_a_double
def part_2() -> int:
start = 382345
end = 843167
total = 0
for number in range(start, end + 1):
if check_number_2(str(number)):
total += 1
return total
def main():
x = "111111"
print(check_number(x) is True)
x = "223450"
print(check_number(x) is False)
x = "123789"
print(check_number(x) is False)
print("PART 1:", part_1()) # should be 460
x = "112233"
print(check_number_2(x) is True)
x = "123444"
print(check_number_2(x) is False)
x = "111122"
print(check_number_2(x) is True)
x = "112222"
print(check_number_2(x) is True)
x = "1112589"
print(check_number_2(x) is False)
print("PART 2:", part_2())
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "c46495eebbe796253f56b7472d5548b41c5d0bc4",
"index": 2411,
"step-1": "def part_1() ->int:\n start = 382345\n end = 843167\n total = 0\n for number in range(start, end + 1):\n if check_number(str(number)):\n total += 1\n return total\n\n\n<mask token>\n\n\ndef check_number_2(problem_input: str) ->bool:\n previous = 0\n current = 1\n triple = True\n seen_a_double = False\n length = len(problem_input)\n while current < length:\n if int(problem_input[current]) < int(problem_input[previous]):\n return False\n if int(problem_input[current]) == int(problem_input[previous]):\n if previous >= 1:\n triple = int(problem_input[previous - 1]) == int(problem_input\n [previous])\n if current < length - 1:\n triple = int(problem_input[current + 1]) == int(problem_input\n [current])\n while current < length - 1 and int(problem_input[current]\n ) == int(problem_input[current + 1]):\n current += 1\n previous += 1\n if not triple:\n seen_a_double = True\n previous += 1\n current += 1\n return seen_a_double\n\n\n<mask token>\n\n\ndef main():\n x = '111111'\n print(check_number(x) is True)\n x = '223450'\n print(check_number(x) is False)\n x = '123789'\n print(check_number(x) is False)\n print('PART 1:', part_1())\n x = '112233'\n print(check_number_2(x) is True)\n x = '123444'\n print(check_number_2(x) is False)\n x = '111122'\n print(check_number_2(x) is True)\n x = '112222'\n print(check_number_2(x) is True)\n x = '1112589'\n print(check_number_2(x) is False)\n print('PART 2:', part_2())\n\n\n<mask token>\n",
"step-2": "def part_1() ->int:\n start = 382345\n end = 843167\n total = 0\n for number in range(start, end + 1):\n if check_number(str(number)):\n total += 1\n return total\n\n\n<mask token>\n\n\ndef check_number_2(problem_input: str) ->bool:\n previous = 0\n current = 1\n triple = True\n seen_a_double = False\n length = len(problem_input)\n while current < length:\n if int(problem_input[current]) < int(problem_input[previous]):\n return False\n if int(problem_input[current]) == int(problem_input[previous]):\n if previous >= 1:\n triple = int(problem_input[previous - 1]) == int(problem_input\n [previous])\n if current < length - 1:\n triple = int(problem_input[current + 1]) == int(problem_input\n [current])\n while current < length - 1 and int(problem_input[current]\n ) == int(problem_input[current + 1]):\n current += 1\n previous += 1\n if not triple:\n seen_a_double = True\n previous += 1\n current += 1\n return seen_a_double\n\n\ndef part_2() ->int:\n start = 382345\n end = 843167\n total = 0\n for number in range(start, end + 1):\n if check_number_2(str(number)):\n total += 1\n return total\n\n\ndef main():\n x = '111111'\n print(check_number(x) is True)\n x = '223450'\n print(check_number(x) is False)\n x = '123789'\n print(check_number(x) is False)\n print('PART 1:', part_1())\n x = '112233'\n print(check_number_2(x) is True)\n x = '123444'\n print(check_number_2(x) is False)\n x = '111122'\n print(check_number_2(x) is True)\n x = '112222'\n print(check_number_2(x) is True)\n x = '1112589'\n print(check_number_2(x) is False)\n print('PART 2:', part_2())\n\n\n<mask token>\n",
"step-3": "def part_1() ->int:\n start = 382345\n end = 843167\n total = 0\n for number in range(start, end + 1):\n if check_number(str(number)):\n total += 1\n return total\n\n\ndef check_number(problem_input: str) ->bool:\n previous = 0\n double = False\n for current in range(1, len(problem_input)):\n if int(problem_input[current]) < int(problem_input[previous]):\n return False\n if int(problem_input[previous]) == int(problem_input[current]):\n double = True\n previous += 1\n return double\n\n\ndef check_number_2(problem_input: str) ->bool:\n previous = 0\n current = 1\n triple = True\n seen_a_double = False\n length = len(problem_input)\n while current < length:\n if int(problem_input[current]) < int(problem_input[previous]):\n return False\n if int(problem_input[current]) == int(problem_input[previous]):\n if previous >= 1:\n triple = int(problem_input[previous - 1]) == int(problem_input\n [previous])\n if current < length - 1:\n triple = int(problem_input[current + 1]) == int(problem_input\n [current])\n while current < length - 1 and int(problem_input[current]\n ) == int(problem_input[current + 1]):\n current += 1\n previous += 1\n if not triple:\n seen_a_double = True\n previous += 1\n current += 1\n return seen_a_double\n\n\ndef part_2() ->int:\n start = 382345\n end = 843167\n total = 0\n for number in range(start, end + 1):\n if check_number_2(str(number)):\n total += 1\n return total\n\n\ndef main():\n x = '111111'\n print(check_number(x) is True)\n x = '223450'\n print(check_number(x) is False)\n x = '123789'\n print(check_number(x) is False)\n print('PART 1:', part_1())\n x = '112233'\n print(check_number_2(x) is True)\n x = '123444'\n print(check_number_2(x) is False)\n x = '111122'\n print(check_number_2(x) is True)\n x = '112222'\n print(check_number_2(x) is True)\n x = '1112589'\n print(check_number_2(x) is False)\n print('PART 2:', part_2())\n\n\n<mask token>\n",
"step-4": "def part_1() ->int:\n start = 382345\n end = 843167\n total = 0\n for number in range(start, end + 1):\n if check_number(str(number)):\n total += 1\n return total\n\n\ndef check_number(problem_input: str) ->bool:\n previous = 0\n double = False\n for current in range(1, len(problem_input)):\n if int(problem_input[current]) < int(problem_input[previous]):\n return False\n if int(problem_input[previous]) == int(problem_input[current]):\n double = True\n previous += 1\n return double\n\n\ndef check_number_2(problem_input: str) ->bool:\n previous = 0\n current = 1\n triple = True\n seen_a_double = False\n length = len(problem_input)\n while current < length:\n if int(problem_input[current]) < int(problem_input[previous]):\n return False\n if int(problem_input[current]) == int(problem_input[previous]):\n if previous >= 1:\n triple = int(problem_input[previous - 1]) == int(problem_input\n [previous])\n if current < length - 1:\n triple = int(problem_input[current + 1]) == int(problem_input\n [current])\n while current < length - 1 and int(problem_input[current]\n ) == int(problem_input[current + 1]):\n current += 1\n previous += 1\n if not triple:\n seen_a_double = True\n previous += 1\n current += 1\n return seen_a_double\n\n\ndef part_2() ->int:\n start = 382345\n end = 843167\n total = 0\n for number in range(start, end + 1):\n if check_number_2(str(number)):\n total += 1\n return total\n\n\ndef main():\n x = '111111'\n print(check_number(x) is True)\n x = '223450'\n print(check_number(x) is False)\n x = '123789'\n print(check_number(x) is False)\n print('PART 1:', part_1())\n x = '112233'\n print(check_number_2(x) is True)\n x = '123444'\n print(check_number_2(x) is False)\n x = '111122'\n print(check_number_2(x) is True)\n x = '112222'\n print(check_number_2(x) is True)\n x = '1112589'\n print(check_number_2(x) is False)\n print('PART 2:', part_2())\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "def part_1() -> int:\n start = 382345\n end = 843167\n total = 0\n\n for number in range(start, end + 1):\n if check_number(str(number)):\n total += 1\n\n return total\n\n\ndef check_number(problem_input: str) -> bool:\n previous = 0\n double = False\n for current in range(1, len(problem_input)):\n if int(problem_input[current]) < int(problem_input[previous]):\n return False\n if int(problem_input[previous]) == int(problem_input[current]):\n double = True\n previous += 1\n\n return double\n\n\ndef check_number_2(problem_input: str) -> bool:\n previous = 0\n current = 1\n triple = True\n seen_a_double = False\n length = len(problem_input)\n while current < length:\n if int(problem_input[current]) < int(problem_input[previous]):\n return False\n if int(problem_input[current]) == int(problem_input[previous]):\n if previous >= 1:\n triple = int(problem_input[previous - 1]) == int(problem_input[previous])\n if current < length - 1:\n triple = int(problem_input[current + 1]) == int(problem_input[current])\n while current < length - 1 and int(problem_input[current]) == int(problem_input[current + 1]):\n current += 1\n previous += 1\n if not triple:\n seen_a_double = True\n\n previous += 1\n current += 1\n\n return seen_a_double\n\n\ndef part_2() -> int:\n start = 382345\n end = 843167\n total = 0\n\n for number in range(start, end + 1):\n if check_number_2(str(number)):\n total += 1\n\n return total\n\n\ndef main():\n x = \"111111\"\n print(check_number(x) is True)\n x = \"223450\"\n print(check_number(x) is False)\n x = \"123789\"\n print(check_number(x) is False)\n\n print(\"PART 1:\", part_1()) # should be 460\n\n x = \"112233\"\n print(check_number_2(x) is True)\n x = \"123444\"\n print(check_number_2(x) is False)\n x = \"111122\"\n print(check_number_2(x) is True)\n x = \"112222\"\n print(check_number_2(x) is True)\n x = \"1112589\"\n print(check_number_2(x) is False)\n\n print(\"PART 2:\", part_2())\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
######################################################################
#
# Write something here to recognize your own file
#
# Copyright: MIT License
#
######################################################################
def multiply(value):
return value * 5
if __name__ == "__main__":
a = [0, 1, 2, 3, 4, 5]
new_empty_list = []
print(a)
for item in a:
b = multiply(item)
new_empty_list.append(b)
print(b)
print(new_empty_list)
|
normal
|
{
"blob_id": "0778b25363d50e699edf48b92f1104ab57c03172",
"index": 2015,
"step-1": "<mask token>\n",
"step-2": "def multiply(value):\n return value * 5\n\n\n<mask token>\n",
"step-3": "def multiply(value):\n return value * 5\n\n\nif __name__ == '__main__':\n a = [0, 1, 2, 3, 4, 5]\n new_empty_list = []\n print(a)\n for item in a:\n b = multiply(item)\n new_empty_list.append(b)\n print(b)\n print(new_empty_list)\n",
"step-4": "######################################################################\n#\n# Write something here to recognize your own file\n# \n# Copyright: MIT License\n#\n######################################################################\n\ndef multiply(value):\n return value * 5\n\nif __name__ == \"__main__\":\n a = [0, 1, 2, 3, 4, 5]\n new_empty_list = []\n \n print(a)\n \n for item in a:\n b = multiply(item)\n new_empty_list.append(b)\n print(b)\n \n print(new_empty_list)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import numpy as np
import cv2
import time
from itertools import chain, compress
from collections import defaultdict, namedtuple
class FeatureMetaData(object):
"""
Contain necessary information of a feature for easy access.
"""
def __init__(self):
self.id = None # int
self.response = None # float
self.lifetime = None # int
self.cam0_point = None # vec2
self.cam1_point = None # vec2
class FeatureMeasurement(object):
"""
Stereo measurement of a feature.
"""
def __init__(self):
self.id = None
self.u0 = None
self.v0 = None
self.u1 = None
self.v1 = None
class ImageProcessor(object):
"""
Detect and track features in image sequences.
"""
def __init__(self, config):
self.config = config
# Indicate if this is the first image message.
self.is_first_img = True
# ID for the next new feature.
self.next_feature_id = 0
# Feature detector
self.detector = cv2.FastFeatureDetector_create(self.config.fast_threshold)
# IMU message buffer.
self.imu_msg_buffer = []
# Previous and current images
self.cam0_prev_img_msg = None
self.cam0_curr_img_msg = None
self.cam1_curr_img_msg = None
# Pyramids for previous and current image
self.prev_cam0_pyramid = None
self.curr_cam0_pyramid = None
self.curr_cam1_pyramid = None
# Features in the previous and current image.
# list of lists of FeatureMetaData
self.prev_features = [[] for _ in range(self.config.grid_num)] # Don't use [[]] * N
self.curr_features = [[] for _ in range(self.config.grid_num)]
# Number of features after each outlier removal step.
# keys: before_tracking, after_tracking, after_matching, after_ransac
self.num_features = defaultdict(int)
# load config
# Camera calibration parameters
self.cam0_resolution = config.cam0_resolution # vec2
self.cam0_intrinsics = config.cam0_intrinsics # vec4
self.cam0_distortion_model = config.cam0_distortion_model # string
self.cam0_distortion_coeffs = config.cam0_distortion_coeffs # vec4
self.cam1_resolution = config.cam1_resolution # vec2
self.cam1_intrinsics = config.cam1_intrinsics # vec4
self.cam1_distortion_model = config.cam1_distortion_model # string
self.cam1_distortion_coeffs = config.cam1_distortion_coeffs # vec4
# Take a vector from cam0 frame to the IMU frame.
self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)
self.R_cam0_imu = self.T_cam0_imu[:3, :3]
self.t_cam0_imu = self.T_cam0_imu[:3, 3]
# Take a vector from cam1 frame to the IMU frame.
self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)
self.R_cam1_imu = self.T_cam1_imu[:3, :3]
self.t_cam1_imu = self.T_cam1_imu[:3, 3]
self.image_id = 0
def stereo_callback(self, stereo_msg):
"""
Callback function for the stereo images.
"""
start = time.time()
self.cam0_curr_img_msg = stereo_msg.cam0_msg
self.cam1_curr_img_msg = stereo_msg.cam1_msg
# Build the image pyramids once since they're used at multiple places.
self.create_image_pyramids()
# Detect features in the first frame.
if self.is_first_img:
if not self.config.load_features_flag:
self.initialize_first_frame()
self.is_first_img = False
# Draw results.
# self.draw_features_stereo()
else:
if not self.config.load_features_flag:
# Track the feature in the previous image.
t = time.time()
self.track_features()
print('___track_features:', time.time() - t)
t = time.time()
# Add new features into the current image.
self.add_new_features()
print('___add_new_features:', time.time() - t)
t = time.time()
self.prune_features()
print('___prune_features:', time.time() - t)
t = time.time()
# Draw results.
# self.draw_features_stereo()
print('___draw_features_stereo:', time.time() - t)
t = time.time()
print('===image process elapsed:', time.time() - start, f'({stereo_msg.timestamp})')
if not self.config.load_features_flag:
try:
self.save_features()
return self.publish()
finally:
self.cam0_prev_img_msg = self.cam0_curr_img_msg
self.prev_features = self.curr_features
self.prev_cam0_pyramid = self.curr_cam0_pyramid
# Initialize the current features to empty vectors.
self.curr_features = [[] for _ in range(self.config.grid_num)]
else:
self.load_features()
return self.publish()
def imu_callback(self, msg):
"""
Callback function for the imu message.
"""
self.imu_msg_buffer.append(msg)
def create_image_pyramids(self):
"""
Create image pyramids used for KLT tracking.
(Seems doesn't work in python)
"""
curr_cam0_img = self.cam0_curr_img_msg.image
# self.curr_cam0_pyramid = cv2.buildOpticalFlowPyramid(
# curr_cam0_img, self.config.win_size, self.config.pyramid_levels,
# None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]
self.curr_cam0_pyramid = curr_cam0_img
curr_cam1_img = self.cam1_curr_img_msg.image
# self.curr_cam1_pyramid = cv2.buildOpticalFlowPyramid(
# curr_cam1_img, self.config.win_size, self.config.pyramid_levels,
# None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]
self.curr_cam1_pyramid = curr_cam1_img
def initialize_first_frame(self):
"""
Initialize the image processing sequence, which is basically detect
new features on the first set of stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
# Detect new features on the frist image.
new_features = self.detector.detect(img)
# Find the stereo matched points for the newly detected features.
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers = [], []
response_inliers = []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
# len(cam0_inliers) < max(5, 0.1 * len(new_features))
# Group the features into grids
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row*self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
# Sort the new features in each grid based on its response.
# And collect new features within each grid with high response.
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x:x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def track_features(self):
"""
Tracker features on the newly received stereo images.
"""
img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(img)
# Compute a rough relative rotation which takes a vector
# from the previous frame to the current frame.
cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()
# Organize the features in the previous image.
prev_ids = []
prev_lifetime = []
prev_cam0_points = []
prev_cam1_points = []
for feature in chain.from_iterable(self.prev_features):
prev_ids.append(feature.id)
prev_lifetime.append(feature.lifetime)
prev_cam0_points.append(feature.cam0_point)
prev_cam1_points.append(feature.cam1_point)
prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)
# Number of the features before tracking.
self.num_features['before_tracking'] = len(prev_cam0_points)
# Abort tracking if there is no features in the previous frame.
if len(prev_cam0_points) == 0:
return
# Track features using LK optical flow method.
curr_cam0_points = self.predict_feature_tracking(
prev_cam0_points, cam0_R_p_c, self.cam0_intrinsics)
curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(
self.prev_cam0_pyramid, self.curr_cam0_pyramid,
prev_cam0_points.astype(np.float32),
curr_cam0_points.astype(np.float32),
**self.config.lk_params)
# Mark those tracked points out of the image region as untracked.
for i, point in enumerate(curr_cam0_points):
if not track_inliers[i]:
continue
if (point[0] < 0 or point[0] > img.shape[1]-1 or
point[1] < 0 or point[1] > img.shape[0]-1):
track_inliers[i] = 0
# Collect the tracked points.
prev_tracked_ids = select(prev_ids, track_inliers)
prev_tracked_lifetime = select(prev_lifetime, track_inliers)
prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)
prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)
curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)
# Number of features left after tracking.
self.num_features['after_tracking'] = len(curr_tracked_cam0_points)
# Outlier removal involves three steps, which forms a close
# loop between the previous and current frames of cam0 (left)
# and cam1 (right). Assuming the stereo matching between the
# previous cam0 and cam1 images are correct, the three steps are:
#
# prev frames cam0 ----------> cam1
# | |
# |ransac |ransac
# | stereo match |
# curr frames cam0 ----------> cam1
#
# 1) Stereo matching between current images of cam0 and cam1.
# 2) RANSAC between previous and current images of cam0.
# 3) RANSAC between previous and current images of cam1.
#
# For Step 3, tracking between the images is no longer needed.
# The stereo matching results are directly used in the RANSAC.
# Step 1: stereo matching.
curr_cam1_points, match_inliers = self.stereo_match(
curr_tracked_cam0_points)
prev_matched_ids = select(prev_tracked_ids, match_inliers)
prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)
prev_matched_cam0_points = select(prev_tracked_cam0_points, match_inliers)
prev_matched_cam1_points = select(prev_tracked_cam1_points, match_inliers)
curr_matched_cam0_points = select(curr_tracked_cam0_points, match_inliers)
curr_matched_cam1_points = select(curr_cam1_points, match_inliers)
# Number of features left after stereo matching.
self.num_features['after_matching'] = len(curr_matched_cam0_points)
# Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.
# cam0_ransac_inliers = self.two_point_ransac(
# prev_matched_cam0_points, curr_matched_cam0_points,
# cam0_R_p_c, self.cam0_intrinsics,
# self.cam0_distortion_model, self.cam0_distortion_coeffs,
# self.config.ransac_threshold, 0.99)
# cam1_ransac_inliers = self.two_point_ransac(
# prev_matched_cam1_points, curr_matched_cam1_points,
# cam1_R_p_c, self.cam1_intrinsics,
# self.cam1_distortion_model, self.cam1_distortion_coeffs,
# self.config.ransac_threshold, 0.99)
cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)
cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)
# Number of features after ransac.
after_ransac = 0
for i in range(len(cam0_ransac_inliers)):
if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):
continue
row = int(curr_matched_cam0_points[i][1] / grid_height)
col = int(curr_matched_cam0_points[i][0] / grid_width)
code = row * self.config.grid_col + col
grid_new_feature = FeatureMetaData()
grid_new_feature.id = prev_matched_ids[i]
grid_new_feature.lifetime = prev_matched_lifetime[i] + 1
grid_new_feature.cam0_point = curr_matched_cam0_points[i]
grid_new_feature.cam1_point = curr_matched_cam1_points[i]
prev_matched_lifetime[i] += 1
self.curr_features[code].append(grid_new_feature)
after_ransac += 1
self.num_features['after_ransac'] = after_ransac
# Compute the tracking rate.
# prev_feature_num = sum([len(x) for x in self.prev_features])
# curr_feature_num = sum([len(x) for x in self.curr_features])
def add_new_features(self):
"""
Detect new features on the image to ensure that the features are
uniformly distributed on the image.
"""
curr_img = self.cam0_curr_img_msg.image
grid_height, grid_width = self.get_grid_size(curr_img)
# Create a mask to avoid redetecting existing features.
mask = np.ones(curr_img.shape[:2], dtype='uint8')
for feature in chain.from_iterable(self.curr_features):
x, y = map(int, feature.cam0_point)
mask[y-3:y+4, x-3:x+4] = 0
# Detect new features.
new_features = self.detector.detect(curr_img, mask=mask)
# Collect the new detected features based on the grid.
# Select the ones with top response within each grid afterwards.
new_feature_sieve = [[] for _ in range(self.config.grid_num)]
for feature in new_features:
row = int(feature.pt[1] / grid_height)
col = int(feature.pt[0] / grid_width)
code = row * self.config.grid_col + col
new_feature_sieve[code].append(feature)
new_features = []
for features in new_feature_sieve:
if len(features) > self.config.grid_max_feature_num:
features = sorted(features, key=lambda x:x.response,
reverse=True)[:self.config.grid_max_feature_num]
new_features.append(features)
new_features = list(chain.from_iterable(new_features))
# Find the stereo matched points for the newly detected features.
cam0_points = [kp.pt for kp in new_features]
cam1_points, inlier_markers = self.stereo_match(cam0_points)
cam0_inliers, cam1_inliers, response_inliers = [], [], []
for i, inlier in enumerate(inlier_markers):
if not inlier:
continue
cam0_inliers.append(cam0_points[i])
cam1_inliers.append(cam1_points[i])
response_inliers.append(new_features[i].response)
# if len(cam0_inliers) < max(5, len(new_features) * 0.1):
# Group the features into grids
grid_new_features = [[] for _ in range(self.config.grid_num)]
for i in range(len(cam0_inliers)):
cam0_point = cam0_inliers[i]
cam1_point = cam1_inliers[i]
response = response_inliers[i]
row = int(cam0_point[1] / grid_height)
col = int(cam0_point[0] / grid_width)
code = row*self.config.grid_col + col
new_feature = FeatureMetaData()
new_feature.response = response
new_feature.cam0_point = cam0_point
new_feature.cam1_point = cam1_point
grid_new_features[code].append(new_feature)
# Sort the new features in each grid based on its response.
# And collect new features within each grid with high response.
for i, new_features in enumerate(grid_new_features):
for feature in sorted(new_features, key=lambda x:x.response,
reverse=True)[:self.config.grid_min_feature_num]:
self.curr_features[i].append(feature)
self.curr_features[i][-1].id = self.next_feature_id
self.curr_features[i][-1].lifetime = 1
self.next_feature_id += 1
def prune_features(self):
"""
Remove some of the features of a grid in case there are too many
features inside of that grid, which ensures the number of features
within each grid is bounded.
"""
for i, features in enumerate(self.curr_features):
# Continue if the number of features in this grid does
# not exceed the upper bound.
if len(features) <= self.config.grid_max_feature_num:
continue
self.curr_features[i] = sorted(features, key=lambda x:x.lifetime,
reverse=True)[:self.config.grid_max_feature_num]
def load_features(self):
# load features
filename = self.config.result_dir + str(self.image_id) + ".npz"
self.curr_features = np.load(filename, allow_pickle=True)['arr_0']
self.image_id += 1
def save_features(self):
# save features
filename = self.config.result_dir + str(self.image_id) + ".npz"
np.savez(filename, self.curr_features)
self.image_id += 1
def publish(self):
"""
Publish the features on the current image including both the
tracked and newly detected ones.
"""
curr_ids = []
curr_cam0_points = []
curr_cam1_points = []
for feature in chain.from_iterable(self.curr_features):
curr_ids.append(feature.id)
curr_cam0_points.append(feature.cam0_point)
curr_cam1_points.append(feature.cam1_point)
curr_cam0_points_undistorted = self.undistort_points(
curr_cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs)
curr_cam1_points_undistorted = self.undistort_points(
curr_cam1_points, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
features = []
for i in range(len(curr_ids)):
fm = FeatureMeasurement()
fm.id = curr_ids[i]
fm.u0 = curr_cam0_points_undistorted[i][0]
fm.v0 = curr_cam0_points_undistorted[i][1]
fm.u1 = curr_cam1_points_undistorted[i][0]
fm.v1 = curr_cam1_points_undistorted[i][1]
features.append(fm)
feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(
self.cam0_curr_img_msg.timestamp, features)
return feature_msg
def integrate_imu_data(self):
"""
Integrates the IMU gyro readings between the two consecutive images,
which is used for both tracking prediction and 2-point RANSAC.
Returns:
cam0_R_p_c: a rotation matrix which takes a vector from previous
cam0 frame to current cam0 frame.
cam1_R_p_c: a rotation matrix which takes a vector from previous
cam1 frame to current cam1 frame.
"""
# Find the start and the end limit within the imu msg buffer.
idx_begin = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:
idx_begin = i
break
idx_end = None
for i, msg in enumerate(self.imu_msg_buffer):
if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:
idx_end = i
break
if idx_begin is None or idx_end is None:
return np.identity(3), np.identity(3)
# Compute the mean angular velocity in the IMU frame.
mean_ang_vel = np.zeros(3)
for i in range(idx_begin, idx_end):
mean_ang_vel += self.imu_msg_buffer[i].angular_velocity
if idx_end > idx_begin:
mean_ang_vel /= (idx_end - idx_begin)
# Transform the mean angular velocity from the IMU frame to the
# cam0 and cam1 frames.
cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel
cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel
# Compute the relative rotation.
dt = self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.timestamp
cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T
cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T
# Delete the useless and used imu messages.
self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]
return cam0_R_p_c, cam1_R_p_c
def rescale_points(self, pts1, pts2):
"""
Arguments:
pts1: first set of points.
pts2: second set of points.
Returns:
pts1: scaled first set of points.
pts2: scaled second set of points.
scaling_factor: scaling factor
"""
scaling_factor = 0
for pt1, pt2 in zip(pts1, pts2):
scaling_factor += np.linalg.norm(pt1)
scaling_factor += np.linalg.norm(pt2)
scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)
for i in range(len(pts1)):
pts1[i] *= scaling_factor
pts2[i] *= scaling_factor
return pts1, pts2, scaling_factor
# def two_point_ransac(self, pts1, pts2, R_p_c, intrinsics,
# distortion_model, distortion_coeffs,
# inlier_error, success_probability):
# """
# Applies two point ransac algorithm to mark the inliers in the input set.
# Arguments:
# pts1: first set of points.
# pts2: second set of points.
# R_p_c: a rotation matrix takes a vector in the previous camera frame
# to the current camera frame.
# intrinsics: intrinsics of the camera.
# distortion_model: distortion model of the camera.
# distortion_coeffs: distortion coefficients.
# inlier_error: acceptable error to be considered as an inlier.
# success_probability: the required probability of success.
# Returns:
# inlier_flag: 1 for inliers and 0 for outliers.
# """
# # Check the size of input point size.
# assert len(pts1) == len(pts2), 'Sets of different size are used...'
# norm_pixel_unit = 2.0 / (intrinsics[0] + intrinsics[1])
# iter_num = int(np.ceil(np.log(1-success_probability) / np.log(1-0.7*0.7)))
# # Initially, mark all points as inliers.
# inlier_markers = [1] * len(pts1)
# # Undistort all the points.
# pts1_undistorted = self.undistort_points(pts1, intrinsics,
# distortion_model, distortion_coeffs)
# pts2_undistorted = self.undistort_points(pts2, intrinsics,
# distortion_model, distortion_coeffs)
# # Compenstate the points in the previous image with
# # the relative rotation.
# for i, pt in enumerate(pts1_undistorted):
# pt_h = np.array([*pt, 1.0])
# pt_hc = R_p_c @ pt_h
# pts1_undistorted[i] = pt_hc[:2]
# # Normalize the points to gain numerical stability.
# pts1_undistorted, pts2_undistorted, scaling_factor = self.rescale_points(
# pts1_undistorted, pts2_undistorted)
# # Compute the difference between previous and current points,
# # which will be used frequently later.
# pts_diff = []
# for pt1, pt2 in zip(pts1_undistorted, pts2_undistorted):
# pts_diff.append(pt1 - pt2)
# # Mark the point pairs with large difference directly.
# # BTW, the mean distance of the rest of the point pairs are computed.
# mean_pt_distance = 0.0
# raw_inlier_count = 0
# for i, pt_diff in enumerate(pts_diff):
# distance = np.linalg.norm(pt_diff)
# # 25 pixel distance is a pretty large tolerance for normal motion.
# # However, to be used with aggressive motion, this tolerance should
# # be increased significantly to match the usage.
# if distance > 50.0 * norm_pixel_unit:
# inlier_markers[i] = 0
# else:
# mean_pt_distance += distance
# raw_inlier_count += 1
# mean_pt_distance /= raw_inlier_count
# # If the current number of inliers is less than 3, just mark
# # all input as outliers. This case can happen with fast
# # rotation where very few features are tracked.
# if raw_inlier_count < 3:
# return [0] * len(inlier_markers)
# # Before doing 2-point RANSAC, we have to check if the motion
# # is degenerated, meaning that there is no translation between
# # the frames, in which case, the model of the RANSAC does not work.
# # If so, the distance between the matched points will be almost 0.
# if mean_pt_distance < norm_pixel_unit:
# for i, pt_diff in enumerate(pts_diff):
# if inlier_markers[i] == 0:
# continue
# if np.linalg.norm(pt_diff) > inlier_error * norm_pixel_unit:
# inlier_markers[i] = 0
# return inlier_markers
# # In the case of general motion, the RANSAC model can be applied.
# # The three column corresponds to tx, ty, and tz respectively.
# coeff_t = []
# for i, pt_diff in enumerate(pts_diff):
# coeff_t.append(np.array([
# pt_diff[1],
# -pt_diff[0],
# pts1_undistorted[0] * pts2_undistorted[1] -
# pts1_undistorted[1] * pts2_undistorted[0]]))
# coeff_t = np.array(coeff_t)
# raw_inlier_idx = np.where(inlier_markers)[0]
# best_inlier_set = []
# best_error = 1e10
# for i in range(iter_num):
# # Randomly select two point pairs.
# # Although this is a weird way of selecting two pairs, but it
# # is able to efficiently avoid selecting repetitive pairs.
# pair_idx1 = np.random.choice(raw_inlier_idx)
# idx_diff = np.random.randint(1, len(raw_inlier_idx))
# pair_idx2 = (pair_idx1+idx_diff) % len(raw_inlier_idx)
# # Construct the model.
# coeff_t_ = np.array([coeff_t[pair_idx1], coeff_t[pair_idx2]])
# coeff_tx = coeff_t_[:, 0]
# coeff_ty = coeff_t_[:, 1]
# coeff_tz = coeff_t_[:, 2]
# coeff_l1_norm = np.linalg.norm(coeff_t_, 1, axis=0)
# base_indicator = np.argmin(coeff_l1_norm)
# if base_indicator == 0:
# A = np.array([coeff_ty, coeff_tz]).T
# solution = np.linalg.inv(A) @ (-coeff_tx)
# model = [1.0, *solution]
# elif base_indicator == 1:
# A = np.array([coeff_tx, coeff_tz]).T
# solution = np.linalg.inv(A) @ (-coeff_ty)
# model = [solution[0], 1.0, solution[1]]
# else:
# A = np.array([coeff_tx, coeff_ty]).T
# solution = np.linalg.inv(A) @ (-coeff_tz)
# model = [*solution, 1.0]
# # Find all the inliers among point pairs.
# error = coeff_t @ model
# inlier_set = []
# for i, e in enumerate(error):
# if inlier_markers[i] == 0:
# continue
# if np.abs(e) < inlier_error * norm_pixel_unit:
# inlier_set.append(i)
# # If the number of inliers is small, the current model is
# # probably wrong.
# if len(inlier_set) < 0.2 * len(pts1_undistorted):
# continue
# # Refit the model using all of the possible inliers.
# coeff_t_ = coeff_t[inlier_set]
# coeff_tx_better = coeff_t_[:, 0]
# coeff_ty_better = coeff_t_[:, 1]
# coeff_tz_better = coeff_t_[:, 2]
# if base_indicator == 0:
# A = np.array([coeff_ty_better, coeff_tz_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tx_better)
# model_better = [1.0, *solution]
# elif base_indicator == 1:
# A = np.array([coeff_tx_better, coeff_tz_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_ty_better)
# model_better = [solution[0], 1.0, solution[1]]
# else:
# A = np.array([coeff_tx_better, coeff_ty_better]).T
# solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tz_better)
# model_better = [*solution, 1.0]
# # Compute the error and upate the best model if possible.
# new_error = coeff_t @ model_better
# this_error = np.mean([np.abs(new_error[i]) for i in inlier_set])
# if len(inlier_set) > best_inlier_set:
# best_error = this_error
# best_inlier_set = inlier_set
# # Fill in the markers.
# inlier_markers = [0] * len(pts1)
# for i in best_inlier_set:
# inlier_markers[i] = 1
# return inlier_markers
def get_grid_size(self, img):
"""
# Size of each grid.
"""
grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))
grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))
return grid_height, grid_width
def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):
"""
predictFeatureTracking Compensates the rotation between consecutive
camera frames so that feature tracking would be more robust and fast.
Arguments:
input_pts: features in the previous image to be tracked.
R_p_c: a rotation matrix takes a vector in the previous camera
frame to the current camera frame. (matrix33)
intrinsics: intrinsic matrix of the camera. (vec3)
Returns:
compensated_pts: predicted locations of the features in the
current image based on the provided rotation.
"""
# Return directly if there are no input features.
if len(input_pts) == 0:
return []
# Intrinsic matrix.
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
H = K @ R_p_c @ np.linalg.inv(K)
compensated_pts = []
for i in range(len(input_pts)):
p1 = np.array([*input_pts[i], 1.0])
p2 = H @ p1
compensated_pts.append(p2[:2] / p2[2])
return np.array(compensated_pts, dtype=np.float32)
def stereo_match(self, cam0_points):
"""
Matches features with stereo image pairs.
Arguments:
cam0_points: points in the primary image.
Returns:
cam1_points: points in the secondary image.
inlier_markers: 1 if the match is valid, 0 otherwise.
"""
cam0_points = np.array(cam0_points)
if len(cam0_points) == 0:
return []
R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu
cam0_points_undistorted = self.undistort_points(
cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs, R_cam0_cam1)
cam1_points = self.distort_points(
cam0_points_undistorted, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
cam1_points_copy = cam1_points.copy()
# Track features using LK optical flow method.
cam0_points = cam0_points.astype(np.float32)
cam1_points = cam1_points.astype(np.float32)
cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(
self.curr_cam0_pyramid, self.curr_cam1_pyramid,
cam0_points, cam1_points, **self.config.lk_params)
cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(
self.curr_cam1_pyramid, self.curr_cam0_pyramid,
cam1_points, cam0_points.copy(), **self.config.lk_params)
err = np.linalg.norm(cam0_points - cam0_points_, axis=1)
# cam1_points_undistorted = self.undistort_points(
# cam1_points, self.cam1_intrinsics,
# self.cam1_distortion_model, self.cam1_distortion_coeffs, R_cam0_cam1)
disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])
inlier_markers = np.logical_and.reduce(
[inlier_markers.reshape(-1), err < 3, disparity < 20])
# Mark those tracked points out of the image region as untracked.
img = self.cam1_curr_img_msg.image
for i, point in enumerate(cam1_points):
if not inlier_markers[i]:
continue
if (point[0] < 0 or point[0] > img.shape[1]-1 or
point[1] < 0 or point[1] > img.shape[0]-1):
inlier_markers[i] = 0
# Compute the relative rotation between the cam0 frame and cam1 frame.
t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)
# Compute the essential matrix.
E = skew(t_cam0_cam1) @ R_cam0_cam1
# Further remove outliers based on the known essential matrix.
cam0_points_undistorted = self.undistort_points(
cam0_points, self.cam0_intrinsics,
self.cam0_distortion_model, self.cam0_distortion_coeffs)
cam1_points_undistorted = self.undistort_points(
cam1_points, self.cam1_intrinsics,
self.cam1_distortion_model, self.cam1_distortion_coeffs)
norm_pixel_unit = 4.0 / (
self.cam0_intrinsics[0] + self.cam0_intrinsics[1] +
self.cam1_intrinsics[0] + self.cam1_intrinsics[1])
for i in range(len(cam0_points_undistorted)):
if not inlier_markers[i]:
continue
pt0 = np.array([*cam0_points_undistorted[i], 1.0])
pt1 = np.array([*cam1_points_undistorted[i], 1.0])
epipolar_line = E @ pt0
error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(
epipolar_line[:2])
if error > self.config.stereo_threshold * norm_pixel_unit:
inlier_markers[i] = 0
return cam1_points, inlier_markers
def undistort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs, rectification_matrix=np.identity(3),
new_intrinsics=np.array([1, 1, 0, 0])):
"""
Arguments:
pts_in: points to be undistorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
rectification_matrix:
new_intrinsics:
Returns:
pts_out: undistorted points.
"""
if len(pts_in) == 0:
return []
pts_in = np.reshape(pts_in, (-1, 1, 2))
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
K_new = np.array([
[new_intrinsics[0], 0.0, new_intrinsics[2]],
[0.0, new_intrinsics[1], new_intrinsics[3]],
[0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.undistortPoints(pts_in, K, distortion_coeffs,
rectification_matrix, K_new)
else: # default: 'radtan'
pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs, None,
rectification_matrix, K_new)
return pts_out.reshape((-1, 2))
def distort_points(self, pts_in, intrinsics, distortion_model,
distortion_coeffs):
"""
Arguments:
pts_in: points to be distorted.
intrinsics: intrinsics of the camera.
distortion_model: distortion model of the camera.
distortion_coeffs: distortion coefficients.
Returns:
pts_out: distorted points. (N, 2)
"""
if len(pts_in) == 0:
return []
K = np.array([
[intrinsics[0], 0.0, intrinsics[2]],
[0.0, intrinsics[1], intrinsics[3]],
[0.0, 0.0, 1.0]])
if distortion_model == 'equidistant':
pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)
else: # default: 'radtan'
homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)
pts_out, _ = cv2.projectPoints(homogenous_pts,
np.zeros(3), np.zeros(3), K, distortion_coeffs)
return pts_out.reshape((-1, 2))
def draw_features_stereo(self):
img0 = self.cam0_curr_img_msg.image
img1 = self.cam1_curr_img_msg.image
kps0 = []
kps1 = []
matches = []
for feature in chain.from_iterable(self.curr_features):
matches.append(cv2.DMatch(len(kps0), len(kps0), 0))
kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))
kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))
img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)
cv2.imshow('stereo features', img)
cv2.waitKey(1)
def skew(vec):
x, y, z = vec
return np.array([
[0, -z, y],
[z, 0, -x],
[-y, x, 0]])
def select(data, selectors):
return [d for d, s in zip(data, selectors) if s]
|
normal
|
{
"blob_id": "02f196623907703255bf149db0435104d086da97",
"index": 8292,
"step-1": "<mask token>\n\n\nclass ImageProcessor(object):\n <mask token>\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n <mask token>\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n <mask token>\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ImageProcessor(object):\n <mask token>\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n\n def load_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\n self.image_id += 1\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n\n def publish(self):\n \"\"\"\n Publish the features on the current image including both the \n tracked and newly detected ones.\n \"\"\"\n curr_ids = []\n curr_cam0_points = []\n curr_cam1_points = []\n for feature in chain.from_iterable(self.curr_features):\n curr_ids.append(feature.id)\n curr_cam0_points.append(feature.cam0_point)\n curr_cam1_points.append(feature.cam1_point)\n curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,\n self.cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,\n self.cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n features = []\n for i in range(len(curr_ids)):\n fm = FeatureMeasurement()\n fm.id = curr_ids[i]\n fm.u0 = curr_cam0_points_undistorted[i][0]\n fm.v0 = curr_cam0_points_undistorted[i][1]\n fm.u1 = curr_cam1_points_undistorted[i][0]\n fm.v1 = curr_cam1_points_undistorted[i][1]\n features.append(fm)\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self\n .cam0_curr_img_msg.timestamp, features)\n return feature_msg\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass FeatureMeasurement(object):\n <mask token>\n\n def __init__(self):\n self.id = None\n self.u0 = None\n self.v0 = None\n self.u1 = None\n self.v1 = None\n\n\nclass ImageProcessor(object):\n \"\"\"\n Detect and track features in image sequences.\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n\n def load_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\n self.image_id += 1\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n\n def publish(self):\n \"\"\"\n Publish the features on the current image including both the \n tracked and newly detected ones.\n \"\"\"\n curr_ids = []\n curr_cam0_points = []\n curr_cam1_points = []\n for feature in chain.from_iterable(self.curr_features):\n curr_ids.append(feature.id)\n curr_cam0_points.append(feature.cam0_point)\n curr_cam1_points.append(feature.cam1_point)\n curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,\n self.cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,\n self.cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n features = []\n for i in range(len(curr_ids)):\n fm = FeatureMeasurement()\n fm.id = curr_ids[i]\n fm.u0 = curr_cam0_points_undistorted[i][0]\n fm.v0 = curr_cam0_points_undistorted[i][1]\n fm.u1 = curr_cam1_points_undistorted[i][0]\n fm.v1 = curr_cam1_points_undistorted[i][1]\n features.append(fm)\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self\n .cam0_curr_img_msg.timestamp, features)\n return feature_msg\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass FeatureMetaData(object):\n <mask token>\n <mask token>\n\n\nclass FeatureMeasurement(object):\n \"\"\"\n Stereo measurement of a feature.\n \"\"\"\n\n def __init__(self):\n self.id = None\n self.u0 = None\n self.v0 = None\n self.u1 = None\n self.v1 = None\n\n\nclass ImageProcessor(object):\n \"\"\"\n Detect and track features in image sequences.\n \"\"\"\n\n def __init__(self, config):\n self.config = config\n self.is_first_img = True\n self.next_feature_id = 0\n self.detector = cv2.FastFeatureDetector_create(self.config.\n fast_threshold)\n self.imu_msg_buffer = []\n self.cam0_prev_img_msg = None\n self.cam0_curr_img_msg = None\n self.cam1_curr_img_msg = None\n self.prev_cam0_pyramid = None\n self.curr_cam0_pyramid = None\n self.curr_cam1_pyramid = None\n self.prev_features = [[] for _ in range(self.config.grid_num)]\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n self.num_features = defaultdict(int)\n self.cam0_resolution = config.cam0_resolution\n self.cam0_intrinsics = config.cam0_intrinsics\n self.cam0_distortion_model = config.cam0_distortion_model\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs\n self.cam1_resolution = config.cam1_resolution\n self.cam1_intrinsics = config.cam1_intrinsics\n self.cam1_distortion_model = config.cam1_distortion_model\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\n self.image_id = 0\n\n def stereo_callback(self, stereo_msg):\n \"\"\"\n Callback function for the stereo images.\n \"\"\"\n start = time.time()\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\n self.create_image_pyramids()\n if self.is_first_img:\n if not self.config.load_features_flag:\n self.initialize_first_frame()\n self.is_first_img = False\n elif not self.config.load_features_flag:\n t = time.time()\n self.track_features()\n print('___track_features:', time.time() - t)\n t = time.time()\n self.add_new_features()\n print('___add_new_features:', time.time() - t)\n t = time.time()\n self.prune_features()\n print('___prune_features:', time.time() - t)\n t = time.time()\n print('___draw_features_stereo:', time.time() - t)\n t = time.time()\n print('===image process elapsed:', time.time() - start,\n f'({stereo_msg.timestamp})')\n if not self.config.load_features_flag:\n try:\n self.save_features()\n return self.publish()\n finally:\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\n self.prev_features = self.curr_features\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\n self.curr_features = [[] for _ in range(self.config.grid_num)]\n else:\n self.load_features()\n return self.publish()\n\n def imu_callback(self, msg):\n \"\"\"\n Callback function for the imu message.\n \"\"\"\n self.imu_msg_buffer.append(msg)\n\n def create_image_pyramids(self):\n \"\"\"\n Create image pyramids used for KLT tracking.\n (Seems doesn't work in python)\n \"\"\"\n curr_cam0_img = self.cam0_curr_img_msg.image\n self.curr_cam0_pyramid = curr_cam0_img\n curr_cam1_img = self.cam1_curr_img_msg.image\n self.curr_cam1_pyramid = curr_cam1_img\n\n def initialize_first_frame(self):\n \"\"\"\n Initialize the image processing sequence, which is basically detect \n new features on the first set of stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n new_features = self.detector.detect(img)\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers = [], []\n response_inliers = []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def track_features(self):\n \"\"\"\n Tracker features on the newly received stereo images.\n \"\"\"\n img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(img)\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\n prev_ids = []\n prev_lifetime = []\n prev_cam0_points = []\n prev_cam1_points = []\n for feature in chain.from_iterable(self.prev_features):\n prev_ids.append(feature.id)\n prev_lifetime.append(feature.lifetime)\n prev_cam0_points.append(feature.cam0_point)\n prev_cam1_points.append(feature.cam1_point)\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\n self.num_features['before_tracking'] = len(prev_cam0_points)\n if len(prev_cam0_points) == 0:\n return\n curr_cam0_points = self.predict_feature_tracking(prev_cam0_points,\n cam0_R_p_c, self.cam0_intrinsics)\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(self.\n prev_cam0_pyramid, self.curr_cam0_pyramid, prev_cam0_points.\n astype(np.float32), curr_cam0_points.astype(np.float32), **self\n .config.lk_params)\n for i, point in enumerate(curr_cam0_points):\n if not track_inliers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n track_inliers[i] = 0\n prev_tracked_ids = select(prev_ids, track_inliers)\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\n curr_cam1_points, match_inliers = self.stereo_match(\n curr_tracked_cam0_points)\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\n prev_matched_cam0_points = select(prev_tracked_cam0_points,\n match_inliers)\n prev_matched_cam1_points = select(prev_tracked_cam1_points,\n match_inliers)\n curr_matched_cam0_points = select(curr_tracked_cam0_points,\n match_inliers)\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\n after_ransac = 0\n for i in range(len(cam0_ransac_inliers)):\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\n continue\n row = int(curr_matched_cam0_points[i][1] / grid_height)\n col = int(curr_matched_cam0_points[i][0] / grid_width)\n code = row * self.config.grid_col + col\n grid_new_feature = FeatureMetaData()\n grid_new_feature.id = prev_matched_ids[i]\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\n prev_matched_lifetime[i] += 1\n self.curr_features[code].append(grid_new_feature)\n after_ransac += 1\n self.num_features['after_ransac'] = after_ransac\n\n def add_new_features(self):\n \"\"\"\n Detect new features on the image to ensure that the features are \n uniformly distributed on the image.\n \"\"\"\n curr_img = self.cam0_curr_img_msg.image\n grid_height, grid_width = self.get_grid_size(curr_img)\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\n for feature in chain.from_iterable(self.curr_features):\n x, y = map(int, feature.cam0_point)\n mask[y - 3:y + 4, x - 3:x + 4] = 0\n new_features = self.detector.detect(curr_img, mask=mask)\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\n for feature in new_features:\n row = int(feature.pt[1] / grid_height)\n col = int(feature.pt[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature_sieve[code].append(feature)\n new_features = []\n for features in new_feature_sieve:\n if len(features) > self.config.grid_max_feature_num:\n features = sorted(features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_max_feature_num]\n new_features.append(features)\n new_features = list(chain.from_iterable(new_features))\n cam0_points = [kp.pt for kp in new_features]\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\n for i, inlier in enumerate(inlier_markers):\n if not inlier:\n continue\n cam0_inliers.append(cam0_points[i])\n cam1_inliers.append(cam1_points[i])\n response_inliers.append(new_features[i].response)\n grid_new_features = [[] for _ in range(self.config.grid_num)]\n for i in range(len(cam0_inliers)):\n cam0_point = cam0_inliers[i]\n cam1_point = cam1_inliers[i]\n response = response_inliers[i]\n row = int(cam0_point[1] / grid_height)\n col = int(cam0_point[0] / grid_width)\n code = row * self.config.grid_col + col\n new_feature = FeatureMetaData()\n new_feature.response = response\n new_feature.cam0_point = cam0_point\n new_feature.cam1_point = cam1_point\n grid_new_features[code].append(new_feature)\n for i, new_features in enumerate(grid_new_features):\n for feature in sorted(new_features, key=lambda x: x.response,\n reverse=True)[:self.config.grid_min_feature_num]:\n self.curr_features[i].append(feature)\n self.curr_features[i][-1].id = self.next_feature_id\n self.curr_features[i][-1].lifetime = 1\n self.next_feature_id += 1\n\n def prune_features(self):\n \"\"\"\n Remove some of the features of a grid in case there are too many \n features inside of that grid, which ensures the number of features \n within each grid is bounded.\n \"\"\"\n for i, features in enumerate(self.curr_features):\n if len(features) <= self.config.grid_max_feature_num:\n continue\n self.curr_features[i] = sorted(features, key=lambda x: x.\n lifetime, reverse=True)[:self.config.grid_max_feature_num]\n\n def load_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\n self.image_id += 1\n\n def save_features(self):\n filename = self.config.result_dir + str(self.image_id) + '.npz'\n np.savez(filename, self.curr_features)\n self.image_id += 1\n\n def publish(self):\n \"\"\"\n Publish the features on the current image including both the \n tracked and newly detected ones.\n \"\"\"\n curr_ids = []\n curr_cam0_points = []\n curr_cam1_points = []\n for feature in chain.from_iterable(self.curr_features):\n curr_ids.append(feature.id)\n curr_cam0_points.append(feature.cam0_point)\n curr_cam1_points.append(feature.cam1_point)\n curr_cam0_points_undistorted = self.undistort_points(curr_cam0_points,\n self.cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n curr_cam1_points_undistorted = self.undistort_points(curr_cam1_points,\n self.cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n features = []\n for i in range(len(curr_ids)):\n fm = FeatureMeasurement()\n fm.id = curr_ids[i]\n fm.u0 = curr_cam0_points_undistorted[i][0]\n fm.v0 = curr_cam0_points_undistorted[i][1]\n fm.u1 = curr_cam1_points_undistorted[i][0]\n fm.v1 = curr_cam1_points_undistorted[i][1]\n features.append(fm)\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(self\n .cam0_curr_img_msg.timestamp, features)\n return feature_msg\n\n def integrate_imu_data(self):\n \"\"\"\n Integrates the IMU gyro readings between the two consecutive images, \n which is used for both tracking prediction and 2-point RANSAC.\n\n Returns:\n cam0_R_p_c: a rotation matrix which takes a vector from previous \n cam0 frame to current cam0 frame.\n cam1_R_p_c: a rotation matrix which takes a vector from previous \n cam1 frame to current cam1 frame.\n \"\"\"\n idx_begin = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\n idx_begin = i\n break\n idx_end = None\n for i, msg in enumerate(self.imu_msg_buffer):\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\n idx_end = i\n break\n if idx_begin is None or idx_end is None:\n return np.identity(3), np.identity(3)\n mean_ang_vel = np.zeros(3)\n for i in range(idx_begin, idx_end):\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\n if idx_end > idx_begin:\n mean_ang_vel /= idx_end - idx_begin\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\n dt = (self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.\n timestamp)\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\n return cam0_R_p_c, cam1_R_p_c\n\n def rescale_points(self, pts1, pts2):\n \"\"\"\n Arguments:\n pts1: first set of points.\n pts2: second set of points.\n\n Returns:\n pts1: scaled first set of points.\n pts2: scaled second set of points.\n scaling_factor: scaling factor\n \"\"\"\n scaling_factor = 0\n for pt1, pt2 in zip(pts1, pts2):\n scaling_factor += np.linalg.norm(pt1)\n scaling_factor += np.linalg.norm(pt2)\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\n for i in range(len(pts1)):\n pts1[i] *= scaling_factor\n pts2[i] *= scaling_factor\n return pts1, pts2, scaling_factor\n\n def get_grid_size(self, img):\n \"\"\"\n # Size of each grid.\n \"\"\"\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\n return grid_height, grid_width\n\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\n \"\"\"\n predictFeatureTracking Compensates the rotation between consecutive \n camera frames so that feature tracking would be more robust and fast.\n\n Arguments:\n input_pts: features in the previous image to be tracked.\n R_p_c: a rotation matrix takes a vector in the previous camera \n frame to the current camera frame. (matrix33)\n intrinsics: intrinsic matrix of the camera. (vec3)\n\n Returns:\n compensated_pts: predicted locations of the features in the \n current image based on the provided rotation.\n \"\"\"\n if len(input_pts) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n H = K @ R_p_c @ np.linalg.inv(K)\n compensated_pts = []\n for i in range(len(input_pts)):\n p1 = np.array([*input_pts[i], 1.0])\n p2 = H @ p1\n compensated_pts.append(p2[:2] / p2[2])\n return np.array(compensated_pts, dtype=np.float32)\n\n def stereo_match(self, cam0_points):\n \"\"\"\n Matches features with stereo image pairs.\n\n Arguments:\n cam0_points: points in the primary image.\n\n Returns:\n cam1_points: points in the secondary image.\n inlier_markers: 1 if the match is valid, 0 otherwise.\n \"\"\"\n cam0_points = np.array(cam0_points)\n if len(cam0_points) == 0:\n return []\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs, R_cam0_cam1)\n cam1_points = self.distort_points(cam0_points_undistorted, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n cam1_points_copy = cam1_points.copy()\n cam0_points = cam0_points.astype(np.float32)\n cam1_points = cam1_points.astype(np.float32)\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam0_pyramid, self.curr_cam1_pyramid, cam0_points,\n cam1_points, **self.config.lk_params)\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(self.\n curr_cam1_pyramid, self.curr_cam0_pyramid, cam1_points,\n cam0_points.copy(), **self.config.lk_params)\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\n inlier_markers = np.logical_and.reduce([inlier_markers.reshape(-1),\n err < 3, disparity < 20])\n img = self.cam1_curr_img_msg.image\n for i, point in enumerate(cam1_points):\n if not inlier_markers[i]:\n continue\n if point[0] < 0 or point[0] > img.shape[1] - 1 or point[1\n ] < 0 or point[1] > img.shape[0] - 1:\n inlier_markers[i] = 0\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\n E = skew(t_cam0_cam1) @ R_cam0_cam1\n cam0_points_undistorted = self.undistort_points(cam0_points, self.\n cam0_intrinsics, self.cam0_distortion_model, self.\n cam0_distortion_coeffs)\n cam1_points_undistorted = self.undistort_points(cam1_points, self.\n cam1_intrinsics, self.cam1_distortion_model, self.\n cam1_distortion_coeffs)\n norm_pixel_unit = 4.0 / (self.cam0_intrinsics[0] + self.\n cam0_intrinsics[1] + self.cam1_intrinsics[0] + self.\n cam1_intrinsics[1])\n for i in range(len(cam0_points_undistorted)):\n if not inlier_markers[i]:\n continue\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\n epipolar_line = E @ pt0\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\n epipolar_line[:2])\n if error > self.config.stereo_threshold * norm_pixel_unit:\n inlier_markers[i] = 0\n return cam1_points, inlier_markers\n\n def undistort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs, rectification_matrix=np.identity(3),\n new_intrinsics=np.array([1, 1, 0, 0])):\n \"\"\"\n Arguments:\n pts_in: points to be undistorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n rectification_matrix:\n new_intrinsics:\n\n Returns:\n pts_out: undistorted points.\n \"\"\"\n if len(pts_in) == 0:\n return []\n pts_in = np.reshape(pts_in, (-1, 1, 2))\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n K_new = np.array([[new_intrinsics[0], 0.0, new_intrinsics[2]], [0.0,\n new_intrinsics[1], new_intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.undistortPoints(pts_in, K,\n distortion_coeffs, rectification_matrix, K_new)\n else:\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs,\n None, rectification_matrix, K_new)\n return pts_out.reshape((-1, 2))\n\n def distort_points(self, pts_in, intrinsics, distortion_model,\n distortion_coeffs):\n \"\"\"\n Arguments:\n pts_in: points to be distorted.\n intrinsics: intrinsics of the camera.\n distortion_model: distortion model of the camera.\n distortion_coeffs: distortion coefficients.\n\n Returns:\n pts_out: distorted points. (N, 2)\n \"\"\"\n if len(pts_in) == 0:\n return []\n K = np.array([[intrinsics[0], 0.0, intrinsics[2]], [0.0, intrinsics\n [1], intrinsics[3]], [0.0, 0.0, 1.0]])\n if distortion_model == 'equidistant':\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\n else:\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\n pts_out, _ = cv2.projectPoints(homogenous_pts, np.zeros(3), np.\n zeros(3), K, distortion_coeffs)\n return pts_out.reshape((-1, 2))\n\n def draw_features_stereo(self):\n img0 = self.cam0_curr_img_msg.image\n img1 = self.cam1_curr_img_msg.image\n kps0 = []\n kps1 = []\n matches = []\n for feature in chain.from_iterable(self.curr_features):\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\n cv2.imshow('stereo features', img)\n cv2.waitKey(1)\n\n\n<mask token>\n",
"step-5": "import numpy as np\r\nimport cv2\r\nimport time\r\n\r\nfrom itertools import chain, compress\r\nfrom collections import defaultdict, namedtuple\r\n\r\n\r\n\r\nclass FeatureMetaData(object):\r\n \"\"\"\r\n Contain necessary information of a feature for easy access.\r\n \"\"\"\r\n def __init__(self):\r\n self.id = None # int\r\n self.response = None # float\r\n self.lifetime = None # int\r\n self.cam0_point = None # vec2\r\n self.cam1_point = None # vec2\r\n\r\n\r\nclass FeatureMeasurement(object):\r\n \"\"\"\r\n Stereo measurement of a feature.\r\n \"\"\"\r\n def __init__(self):\r\n self.id = None\r\n self.u0 = None\r\n self.v0 = None\r\n self.u1 = None\r\n self.v1 = None\r\n\r\n\r\n\r\nclass ImageProcessor(object):\r\n \"\"\"\r\n Detect and track features in image sequences.\r\n \"\"\"\r\n def __init__(self, config):\r\n self.config = config\r\n\r\n # Indicate if this is the first image message.\r\n self.is_first_img = True\r\n\r\n # ID for the next new feature.\r\n self.next_feature_id = 0\r\n\r\n # Feature detector\r\n self.detector = cv2.FastFeatureDetector_create(self.config.fast_threshold)\r\n\r\n # IMU message buffer.\r\n self.imu_msg_buffer = []\r\n\r\n # Previous and current images\r\n self.cam0_prev_img_msg = None\r\n self.cam0_curr_img_msg = None\r\n self.cam1_curr_img_msg = None\r\n\r\n # Pyramids for previous and current image\r\n self.prev_cam0_pyramid = None\r\n self.curr_cam0_pyramid = None\r\n self.curr_cam1_pyramid = None\r\n\r\n # Features in the previous and current image.\r\n # list of lists of FeatureMetaData\r\n self.prev_features = [[] for _ in range(self.config.grid_num)] # Don't use [[]] * N\r\n self.curr_features = [[] for _ in range(self.config.grid_num)]\r\n\r\n # Number of features after each outlier removal step.\r\n # keys: before_tracking, after_tracking, after_matching, after_ransac\r\n self.num_features = defaultdict(int)\r\n\r\n # load config\r\n # Camera calibration parameters\r\n self.cam0_resolution = config.cam0_resolution # vec2\r\n self.cam0_intrinsics = config.cam0_intrinsics # vec4\r\n self.cam0_distortion_model = config.cam0_distortion_model # string\r\n self.cam0_distortion_coeffs = config.cam0_distortion_coeffs # vec4\r\n\r\n self.cam1_resolution = config.cam1_resolution # vec2\r\n self.cam1_intrinsics = config.cam1_intrinsics # vec4\r\n self.cam1_distortion_model = config.cam1_distortion_model # string\r\n self.cam1_distortion_coeffs = config.cam1_distortion_coeffs # vec4\r\n\r\n # Take a vector from cam0 frame to the IMU frame.\r\n self.T_cam0_imu = np.linalg.inv(config.T_imu_cam0)\r\n self.R_cam0_imu = self.T_cam0_imu[:3, :3]\r\n self.t_cam0_imu = self.T_cam0_imu[:3, 3]\r\n # Take a vector from cam1 frame to the IMU frame.\r\n self.T_cam1_imu = np.linalg.inv(config.T_imu_cam1)\r\n self.R_cam1_imu = self.T_cam1_imu[:3, :3]\r\n self.t_cam1_imu = self.T_cam1_imu[:3, 3]\r\n\r\n self.image_id = 0\r\n\r\n def stereo_callback(self, stereo_msg):\r\n \"\"\"\r\n Callback function for the stereo images.\r\n \"\"\"\r\n start = time.time()\r\n self.cam0_curr_img_msg = stereo_msg.cam0_msg\r\n self.cam1_curr_img_msg = stereo_msg.cam1_msg\r\n\r\n # Build the image pyramids once since they're used at multiple places.\r\n self.create_image_pyramids()\r\n\r\n # Detect features in the first frame.\r\n if self.is_first_img:\r\n if not self.config.load_features_flag:\r\n self.initialize_first_frame()\r\n self.is_first_img = False\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n else:\r\n if not self.config.load_features_flag:\r\n # Track the feature in the previous image.\r\n t = time.time()\r\n self.track_features()\r\n print('___track_features:', time.time() - t)\r\n t = time.time()\r\n\r\n # Add new features into the current image.\r\n self.add_new_features()\r\n print('___add_new_features:', time.time() - t)\r\n t = time.time()\r\n self.prune_features()\r\n print('___prune_features:', time.time() - t)\r\n t = time.time()\r\n # Draw results.\r\n # self.draw_features_stereo()\r\n print('___draw_features_stereo:', time.time() - t)\r\n t = time.time()\r\n\r\n print('===image process elapsed:', time.time() - start, f'({stereo_msg.timestamp})')\r\n\r\n if not self.config.load_features_flag:\r\n try:\r\n self.save_features() \r\n return self.publish()\r\n finally:\r\n self.cam0_prev_img_msg = self.cam0_curr_img_msg\r\n self.prev_features = self.curr_features\r\n self.prev_cam0_pyramid = self.curr_cam0_pyramid\r\n\r\n # Initialize the current features to empty vectors.\r\n self.curr_features = [[] for _ in range(self.config.grid_num)]\r\n else:\r\n self.load_features()\r\n return self.publish()\r\n\r\n def imu_callback(self, msg):\r\n \"\"\"\r\n Callback function for the imu message.\r\n \"\"\"\r\n self.imu_msg_buffer.append(msg)\r\n\r\n def create_image_pyramids(self):\r\n \"\"\"\r\n Create image pyramids used for KLT tracking.\r\n (Seems doesn't work in python)\r\n \"\"\"\r\n curr_cam0_img = self.cam0_curr_img_msg.image\r\n # self.curr_cam0_pyramid = cv2.buildOpticalFlowPyramid(\r\n # curr_cam0_img, self.config.win_size, self.config.pyramid_levels, \r\n # None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]\r\n self.curr_cam0_pyramid = curr_cam0_img\r\n\r\n curr_cam1_img = self.cam1_curr_img_msg.image\r\n # self.curr_cam1_pyramid = cv2.buildOpticalFlowPyramid(\r\n # curr_cam1_img, self.config.win_size, self.config.pyramid_levels, \r\n # None, cv2.BORDER_REFLECT_101, cv2.BORDER_CONSTANT, False)[1]\r\n self.curr_cam1_pyramid = curr_cam1_img\r\n\r\n def initialize_first_frame(self):\r\n \"\"\"\r\n Initialize the image processing sequence, which is basically detect \r\n new features on the first set of stereo images.\r\n \"\"\"\r\n img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(img)\r\n\r\n # Detect new features on the frist image.\r\n new_features = self.detector.detect(img)\r\n\r\n # Find the stereo matched points for the newly detected features.\r\n cam0_points = [kp.pt for kp in new_features]\r\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\r\n\r\n cam0_inliers, cam1_inliers = [], []\r\n response_inliers = []\r\n for i, inlier in enumerate(inlier_markers):\r\n if not inlier:\r\n continue\r\n cam0_inliers.append(cam0_points[i])\r\n cam1_inliers.append(cam1_points[i])\r\n response_inliers.append(new_features[i].response)\r\n # len(cam0_inliers) < max(5, 0.1 * len(new_features))\r\n\r\n # Group the features into grids\r\n grid_new_features = [[] for _ in range(self.config.grid_num)]\r\n\r\n for i in range(len(cam0_inliers)):\r\n cam0_point = cam0_inliers[i]\r\n cam1_point = cam1_inliers[i]\r\n response = response_inliers[i]\r\n\r\n row = int(cam0_point[1] / grid_height)\r\n col = int(cam0_point[0] / grid_width)\r\n code = row*self.config.grid_col + col\r\n\r\n new_feature = FeatureMetaData()\r\n new_feature.response = response\r\n new_feature.cam0_point = cam0_point\r\n new_feature.cam1_point = cam1_point\r\n grid_new_features[code].append(new_feature)\r\n\r\n # Sort the new features in each grid based on its response.\r\n # And collect new features within each grid with high response.\r\n for i, new_features in enumerate(grid_new_features):\r\n for feature in sorted(new_features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_min_feature_num]:\r\n self.curr_features[i].append(feature)\r\n self.curr_features[i][-1].id = self.next_feature_id\r\n self.curr_features[i][-1].lifetime = 1\r\n self.next_feature_id += 1\r\n\r\n def track_features(self):\r\n \"\"\"\r\n Tracker features on the newly received stereo images.\r\n \"\"\"\r\n img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(img)\r\n\r\n # Compute a rough relative rotation which takes a vector \r\n # from the previous frame to the current frame.\r\n cam0_R_p_c, cam1_R_p_c = self.integrate_imu_data()\r\n\r\n # Organize the features in the previous image.\r\n prev_ids = []\r\n prev_lifetime = []\r\n prev_cam0_points = []\r\n prev_cam1_points = []\r\n\r\n for feature in chain.from_iterable(self.prev_features):\r\n prev_ids.append(feature.id)\r\n prev_lifetime.append(feature.lifetime)\r\n prev_cam0_points.append(feature.cam0_point)\r\n prev_cam1_points.append(feature.cam1_point)\r\n prev_cam0_points = np.array(prev_cam0_points, dtype=np.float32)\r\n\r\n # Number of the features before tracking.\r\n self.num_features['before_tracking'] = len(prev_cam0_points)\r\n\r\n # Abort tracking if there is no features in the previous frame.\r\n if len(prev_cam0_points) == 0:\r\n return\r\n\r\n # Track features using LK optical flow method.\r\n curr_cam0_points = self.predict_feature_tracking(\r\n prev_cam0_points, cam0_R_p_c, self.cam0_intrinsics)\r\n\r\n curr_cam0_points, track_inliers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.prev_cam0_pyramid, self.curr_cam0_pyramid,\r\n prev_cam0_points.astype(np.float32), \r\n curr_cam0_points.astype(np.float32), \r\n **self.config.lk_params)\r\n \r\n # Mark those tracked points out of the image region as untracked.\r\n for i, point in enumerate(curr_cam0_points):\r\n if not track_inliers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n track_inliers[i] = 0\r\n\r\n # Collect the tracked points.\r\n prev_tracked_ids = select(prev_ids, track_inliers)\r\n prev_tracked_lifetime = select(prev_lifetime, track_inliers)\r\n prev_tracked_cam0_points = select(prev_cam0_points, track_inliers)\r\n prev_tracked_cam1_points = select(prev_cam1_points, track_inliers)\r\n curr_tracked_cam0_points = select(curr_cam0_points, track_inliers)\r\n\r\n # Number of features left after tracking.\r\n self.num_features['after_tracking'] = len(curr_tracked_cam0_points)\r\n\r\n # Outlier removal involves three steps, which forms a close\r\n # loop between the previous and current frames of cam0 (left)\r\n # and cam1 (right). Assuming the stereo matching between the\r\n # previous cam0 and cam1 images are correct, the three steps are:\r\n #\r\n # prev frames cam0 ----------> cam1\r\n # | |\r\n # |ransac |ransac\r\n # | stereo match |\r\n # curr frames cam0 ----------> cam1\r\n #\r\n # 1) Stereo matching between current images of cam0 and cam1.\r\n # 2) RANSAC between previous and current images of cam0.\r\n # 3) RANSAC between previous and current images of cam1.\r\n #\r\n # For Step 3, tracking between the images is no longer needed.\r\n # The stereo matching results are directly used in the RANSAC.\r\n\r\n # Step 1: stereo matching.\r\n curr_cam1_points, match_inliers = self.stereo_match(\r\n curr_tracked_cam0_points)\r\n\r\n prev_matched_ids = select(prev_tracked_ids, match_inliers)\r\n prev_matched_lifetime = select(prev_tracked_lifetime, match_inliers)\r\n prev_matched_cam0_points = select(prev_tracked_cam0_points, match_inliers)\r\n prev_matched_cam1_points = select(prev_tracked_cam1_points, match_inliers)\r\n curr_matched_cam0_points = select(curr_tracked_cam0_points, match_inliers)\r\n curr_matched_cam1_points = select(curr_cam1_points, match_inliers)\r\n\r\n # Number of features left after stereo matching.\r\n self.num_features['after_matching'] = len(curr_matched_cam0_points)\r\n\r\n # Step 2 and 3: RANSAC on temporal image pairs of cam0 and cam1.\r\n # cam0_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam0_points, curr_matched_cam0_points,\r\n # cam0_R_p_c, self.cam0_intrinsics, \r\n # self.cam0_distortion_model, self.cam0_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n\r\n # cam1_ransac_inliers = self.two_point_ransac(\r\n # prev_matched_cam1_points, curr_matched_cam1_points,\r\n # cam1_R_p_c, self.cam1_intrinsics, \r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, \r\n # self.config.ransac_threshold, 0.99)\r\n cam0_ransac_inliers = [1] * len(prev_matched_cam0_points)\r\n cam1_ransac_inliers = [1] * len(prev_matched_cam1_points)\r\n\r\n # Number of features after ransac.\r\n after_ransac = 0\r\n for i in range(len(cam0_ransac_inliers)):\r\n if not (cam0_ransac_inliers[i] and cam1_ransac_inliers[i]):\r\n continue \r\n row = int(curr_matched_cam0_points[i][1] / grid_height)\r\n col = int(curr_matched_cam0_points[i][0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n\r\n grid_new_feature = FeatureMetaData()\r\n grid_new_feature.id = prev_matched_ids[i]\r\n grid_new_feature.lifetime = prev_matched_lifetime[i] + 1\r\n grid_new_feature.cam0_point = curr_matched_cam0_points[i]\r\n grid_new_feature.cam1_point = curr_matched_cam1_points[i]\r\n prev_matched_lifetime[i] += 1\r\n\r\n self.curr_features[code].append(grid_new_feature)\r\n after_ransac += 1\r\n self.num_features['after_ransac'] = after_ransac\r\n\r\n # Compute the tracking rate.\r\n # prev_feature_num = sum([len(x) for x in self.prev_features])\r\n # curr_feature_num = sum([len(x) for x in self.curr_features])\r\n \r\n\r\n def add_new_features(self):\r\n \"\"\"\r\n Detect new features on the image to ensure that the features are \r\n uniformly distributed on the image.\r\n \"\"\"\r\n curr_img = self.cam0_curr_img_msg.image\r\n grid_height, grid_width = self.get_grid_size(curr_img)\r\n\r\n # Create a mask to avoid redetecting existing features.\r\n mask = np.ones(curr_img.shape[:2], dtype='uint8')\r\n\r\n for feature in chain.from_iterable(self.curr_features):\r\n x, y = map(int, feature.cam0_point)\r\n mask[y-3:y+4, x-3:x+4] = 0\r\n\r\n # Detect new features.\r\n new_features = self.detector.detect(curr_img, mask=mask)\r\n\r\n # Collect the new detected features based on the grid.\r\n # Select the ones with top response within each grid afterwards.\r\n new_feature_sieve = [[] for _ in range(self.config.grid_num)]\r\n for feature in new_features:\r\n row = int(feature.pt[1] / grid_height)\r\n col = int(feature.pt[0] / grid_width)\r\n code = row * self.config.grid_col + col\r\n new_feature_sieve[code].append(feature)\r\n\r\n new_features = []\r\n for features in new_feature_sieve:\r\n if len(features) > self.config.grid_max_feature_num:\r\n features = sorted(features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_max_feature_num]\r\n new_features.append(features)\r\n new_features = list(chain.from_iterable(new_features))\r\n\r\n # Find the stereo matched points for the newly detected features.\r\n cam0_points = [kp.pt for kp in new_features]\r\n cam1_points, inlier_markers = self.stereo_match(cam0_points)\r\n\r\n cam0_inliers, cam1_inliers, response_inliers = [], [], []\r\n for i, inlier in enumerate(inlier_markers):\r\n if not inlier:\r\n continue\r\n cam0_inliers.append(cam0_points[i])\r\n cam1_inliers.append(cam1_points[i])\r\n response_inliers.append(new_features[i].response)\r\n # if len(cam0_inliers) < max(5, len(new_features) * 0.1):\r\n\r\n # Group the features into grids\r\n grid_new_features = [[] for _ in range(self.config.grid_num)]\r\n for i in range(len(cam0_inliers)):\r\n cam0_point = cam0_inliers[i]\r\n cam1_point = cam1_inliers[i]\r\n response = response_inliers[i]\r\n\r\n row = int(cam0_point[1] / grid_height)\r\n col = int(cam0_point[0] / grid_width)\r\n code = row*self.config.grid_col + col\r\n\r\n new_feature = FeatureMetaData()\r\n new_feature.response = response\r\n new_feature.cam0_point = cam0_point\r\n new_feature.cam1_point = cam1_point\r\n grid_new_features[code].append(new_feature)\r\n\r\n # Sort the new features in each grid based on its response.\r\n # And collect new features within each grid with high response.\r\n for i, new_features in enumerate(grid_new_features):\r\n for feature in sorted(new_features, key=lambda x:x.response, \r\n reverse=True)[:self.config.grid_min_feature_num]:\r\n self.curr_features[i].append(feature)\r\n self.curr_features[i][-1].id = self.next_feature_id\r\n self.curr_features[i][-1].lifetime = 1\r\n self.next_feature_id += 1\r\n\r\n def prune_features(self):\r\n \"\"\"\r\n Remove some of the features of a grid in case there are too many \r\n features inside of that grid, which ensures the number of features \r\n within each grid is bounded.\r\n \"\"\"\r\n for i, features in enumerate(self.curr_features):\r\n # Continue if the number of features in this grid does\r\n # not exceed the upper bound.\r\n if len(features) <= self.config.grid_max_feature_num:\r\n continue\r\n self.curr_features[i] = sorted(features, key=lambda x:x.lifetime, \r\n reverse=True)[:self.config.grid_max_feature_num]\r\n\r\n def load_features(self):\r\n\r\n # load features \r\n filename = self.config.result_dir + str(self.image_id) + \".npz\"\r\n self.curr_features = np.load(filename, allow_pickle=True)['arr_0']\r\n self.image_id += 1 \r\n\r\n def save_features(self):\r\n \r\n # save features \r\n filename = self.config.result_dir + str(self.image_id) + \".npz\"\r\n np.savez(filename, self.curr_features)\r\n self.image_id += 1 \r\n\r\n def publish(self):\r\n \"\"\"\r\n Publish the features on the current image including both the \r\n tracked and newly detected ones.\r\n \"\"\"\r\n\r\n curr_ids = []\r\n curr_cam0_points = []\r\n curr_cam1_points = []\r\n for feature in chain.from_iterable(self.curr_features):\r\n curr_ids.append(feature.id)\r\n curr_cam0_points.append(feature.cam0_point)\r\n curr_cam1_points.append(feature.cam1_point)\r\n\r\n curr_cam0_points_undistorted = self.undistort_points(\r\n curr_cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs)\r\n curr_cam1_points_undistorted = self.undistort_points(\r\n curr_cam1_points, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n\r\n features = []\r\n for i in range(len(curr_ids)):\r\n fm = FeatureMeasurement()\r\n fm.id = curr_ids[i]\r\n fm.u0 = curr_cam0_points_undistorted[i][0]\r\n fm.v0 = curr_cam0_points_undistorted[i][1]\r\n fm.u1 = curr_cam1_points_undistorted[i][0]\r\n fm.v1 = curr_cam1_points_undistorted[i][1]\r\n features.append(fm)\r\n\r\n feature_msg = namedtuple('feature_msg', ['timestamp', 'features'])(\r\n self.cam0_curr_img_msg.timestamp, features)\r\n return feature_msg\r\n\r\n def integrate_imu_data(self):\r\n \"\"\"\r\n Integrates the IMU gyro readings between the two consecutive images, \r\n which is used for both tracking prediction and 2-point RANSAC.\r\n\r\n Returns:\r\n cam0_R_p_c: a rotation matrix which takes a vector from previous \r\n cam0 frame to current cam0 frame.\r\n cam1_R_p_c: a rotation matrix which takes a vector from previous \r\n cam1 frame to current cam1 frame.\r\n \"\"\"\r\n # Find the start and the end limit within the imu msg buffer.\r\n idx_begin = None\r\n for i, msg in enumerate(self.imu_msg_buffer):\r\n if msg.timestamp >= self.cam0_prev_img_msg.timestamp - 0.01:\r\n idx_begin = i\r\n break\r\n\r\n idx_end = None\r\n for i, msg in enumerate(self.imu_msg_buffer):\r\n if msg.timestamp >= self.cam0_curr_img_msg.timestamp - 0.004:\r\n idx_end = i\r\n break\r\n\r\n if idx_begin is None or idx_end is None:\r\n return np.identity(3), np.identity(3)\r\n\r\n # Compute the mean angular velocity in the IMU frame.\r\n mean_ang_vel = np.zeros(3)\r\n for i in range(idx_begin, idx_end):\r\n mean_ang_vel += self.imu_msg_buffer[i].angular_velocity\r\n\r\n if idx_end > idx_begin:\r\n mean_ang_vel /= (idx_end - idx_begin)\r\n\r\n # Transform the mean angular velocity from the IMU frame to the \r\n # cam0 and cam1 frames.\r\n cam0_mean_ang_vel = self.R_cam0_imu.T @ mean_ang_vel\r\n cam1_mean_ang_vel = self.R_cam1_imu.T @ mean_ang_vel\r\n\r\n # Compute the relative rotation.\r\n dt = self.cam0_curr_img_msg.timestamp - self.cam0_prev_img_msg.timestamp\r\n cam0_R_p_c = cv2.Rodrigues(cam0_mean_ang_vel * dt)[0].T\r\n cam1_R_p_c = cv2.Rodrigues(cam1_mean_ang_vel * dt)[0].T\r\n\r\n # Delete the useless and used imu messages.\r\n self.imu_msg_buffer = self.imu_msg_buffer[idx_end:]\r\n return cam0_R_p_c, cam1_R_p_c\r\n\r\n def rescale_points(self, pts1, pts2):\r\n \"\"\"\r\n Arguments:\r\n pts1: first set of points.\r\n pts2: second set of points.\r\n\r\n Returns:\r\n pts1: scaled first set of points.\r\n pts2: scaled second set of points.\r\n scaling_factor: scaling factor\r\n \"\"\"\r\n scaling_factor = 0\r\n for pt1, pt2 in zip(pts1, pts2):\r\n scaling_factor += np.linalg.norm(pt1)\r\n scaling_factor += np.linalg.norm(pt2)\r\n\r\n scaling_factor = (len(pts1) + len(pts2)) / scaling_factor * np.sqrt(2)\r\n\r\n for i in range(len(pts1)):\r\n pts1[i] *= scaling_factor\r\n pts2[i] *= scaling_factor\r\n\r\n return pts1, pts2, scaling_factor\r\n\r\n # def two_point_ransac(self, pts1, pts2, R_p_c, intrinsics, \r\n # distortion_model, distortion_coeffs,\r\n # inlier_error, success_probability):\r\n # \"\"\"\r\n # Applies two point ransac algorithm to mark the inliers in the input set.\r\n\r\n # Arguments:\r\n # pts1: first set of points.\r\n # pts2: second set of points.\r\n # R_p_c: a rotation matrix takes a vector in the previous camera frame \r\n # to the current camera frame.\r\n # intrinsics: intrinsics of the camera.\r\n # distortion_model: distortion model of the camera.\r\n # distortion_coeffs: distortion coefficients.\r\n # inlier_error: acceptable error to be considered as an inlier.\r\n # success_probability: the required probability of success.\r\n\r\n # Returns:\r\n # inlier_flag: 1 for inliers and 0 for outliers.\r\n # \"\"\"\r\n # # Check the size of input point size.\r\n # assert len(pts1) == len(pts2), 'Sets of different size are used...'\r\n\r\n # norm_pixel_unit = 2.0 / (intrinsics[0] + intrinsics[1])\r\n # iter_num = int(np.ceil(np.log(1-success_probability) / np.log(1-0.7*0.7)))\r\n\r\n # # Initially, mark all points as inliers.\r\n # inlier_markers = [1] * len(pts1)\r\n\r\n # # Undistort all the points.\r\n # pts1_undistorted = self.undistort_points(pts1, intrinsics, \r\n # distortion_model, distortion_coeffs)\r\n # pts2_undistorted = self.undistort_points(pts2, intrinsics, \r\n # distortion_model, distortion_coeffs)\r\n\r\n # # Compenstate the points in the previous image with\r\n # # the relative rotation.\r\n # for i, pt in enumerate(pts1_undistorted):\r\n # pt_h = np.array([*pt, 1.0])\r\n # pt_hc = R_p_c @ pt_h\r\n # pts1_undistorted[i] = pt_hc[:2]\r\n\r\n # # Normalize the points to gain numerical stability.\r\n # pts1_undistorted, pts2_undistorted, scaling_factor = self.rescale_points(\r\n # pts1_undistorted, pts2_undistorted)\r\n\r\n # # Compute the difference between previous and current points,\r\n # # which will be used frequently later.\r\n # pts_diff = []\r\n # for pt1, pt2 in zip(pts1_undistorted, pts2_undistorted):\r\n # pts_diff.append(pt1 - pt2)\r\n\r\n # # Mark the point pairs with large difference directly.\r\n # # BTW, the mean distance of the rest of the point pairs are computed.\r\n # mean_pt_distance = 0.0\r\n # raw_inlier_count = 0\r\n # for i, pt_diff in enumerate(pts_diff):\r\n # distance = np.linalg.norm(pt_diff)\r\n # # 25 pixel distance is a pretty large tolerance for normal motion.\r\n # # However, to be used with aggressive motion, this tolerance should\r\n # # be increased significantly to match the usage.\r\n # if distance > 50.0 * norm_pixel_unit:\r\n # inlier_markers[i] = 0\r\n # else:\r\n # mean_pt_distance += distance\r\n # raw_inlier_count += 1\r\n\r\n # mean_pt_distance /= raw_inlier_count\r\n\r\n # # If the current number of inliers is less than 3, just mark\r\n # # all input as outliers. This case can happen with fast\r\n # # rotation where very few features are tracked.\r\n # if raw_inlier_count < 3:\r\n # return [0] * len(inlier_markers)\r\n\r\n # # Before doing 2-point RANSAC, we have to check if the motion\r\n # # is degenerated, meaning that there is no translation between\r\n # # the frames, in which case, the model of the RANSAC does not work. \r\n # # If so, the distance between the matched points will be almost 0.\r\n # if mean_pt_distance < norm_pixel_unit:\r\n # for i, pt_diff in enumerate(pts_diff):\r\n # if inlier_markers[i] == 0:\r\n # continue\r\n # if np.linalg.norm(pt_diff) > inlier_error * norm_pixel_unit:\r\n # inlier_markers[i] = 0\r\n # return inlier_markers\r\n\r\n # # In the case of general motion, the RANSAC model can be applied.\r\n # # The three column corresponds to tx, ty, and tz respectively.\r\n # coeff_t = []\r\n # for i, pt_diff in enumerate(pts_diff):\r\n # coeff_t.append(np.array([\r\n # pt_diff[1],\r\n # -pt_diff[0],\r\n # pts1_undistorted[0] * pts2_undistorted[1] - \r\n # pts1_undistorted[1] * pts2_undistorted[0]]))\r\n # coeff_t = np.array(coeff_t)\r\n\r\n # raw_inlier_idx = np.where(inlier_markers)[0]\r\n # best_inlier_set = []\r\n # best_error = 1e10\r\n\r\n # for i in range(iter_num):\r\n # # Randomly select two point pairs.\r\n # # Although this is a weird way of selecting two pairs, but it\r\n # # is able to efficiently avoid selecting repetitive pairs.\r\n # pair_idx1 = np.random.choice(raw_inlier_idx)\r\n # idx_diff = np.random.randint(1, len(raw_inlier_idx))\r\n # pair_idx2 = (pair_idx1+idx_diff) % len(raw_inlier_idx)\r\n\r\n # # Construct the model.\r\n # coeff_t_ = np.array([coeff_t[pair_idx1], coeff_t[pair_idx2]])\r\n # coeff_tx = coeff_t_[:, 0]\r\n # coeff_ty = coeff_t_[:, 1]\r\n # coeff_tz = coeff_t_[:, 2]\r\n # coeff_l1_norm = np.linalg.norm(coeff_t_, 1, axis=0)\r\n # base_indicator = np.argmin(coeff_l1_norm)\r\n\r\n # if base_indicator == 0:\r\n # A = np.array([coeff_ty, coeff_tz]).T\r\n # solution = np.linalg.inv(A) @ (-coeff_tx)\r\n # model = [1.0, *solution]\r\n # elif base_indicator == 1:\r\n # A = np.array([coeff_tx, coeff_tz]).T\r\n # solution = np.linalg.inv(A) @ (-coeff_ty)\r\n # model = [solution[0], 1.0, solution[1]]\r\n # else:\r\n # A = np.array([coeff_tx, coeff_ty]).T\r\n # solution = np.linalg.inv(A) @ (-coeff_tz)\r\n # model = [*solution, 1.0]\r\n\r\n # # Find all the inliers among point pairs.\r\n # error = coeff_t @ model\r\n\r\n # inlier_set = []\r\n # for i, e in enumerate(error):\r\n # if inlier_markers[i] == 0:\r\n # continue\r\n # if np.abs(e) < inlier_error * norm_pixel_unit:\r\n # inlier_set.append(i)\r\n\r\n # # If the number of inliers is small, the current model is \r\n # # probably wrong.\r\n # if len(inlier_set) < 0.2 * len(pts1_undistorted):\r\n # continue\r\n\r\n # # Refit the model using all of the possible inliers.\r\n # coeff_t_ = coeff_t[inlier_set]\r\n # coeff_tx_better = coeff_t_[:, 0]\r\n # coeff_ty_better = coeff_t_[:, 1]\r\n # coeff_tz_better = coeff_t_[:, 2]\r\n\r\n # if base_indicator == 0:\r\n # A = np.array([coeff_ty_better, coeff_tz_better]).T\r\n # solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tx_better)\r\n # model_better = [1.0, *solution]\r\n # elif base_indicator == 1:\r\n # A = np.array([coeff_tx_better, coeff_tz_better]).T\r\n # solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_ty_better)\r\n # model_better = [solution[0], 1.0, solution[1]]\r\n # else:\r\n # A = np.array([coeff_tx_better, coeff_ty_better]).T\r\n # solution = np.linalg.inv(A.T @ A) @ A.T @ (-coeff_tz_better)\r\n # model_better = [*solution, 1.0]\r\n\r\n # # Compute the error and upate the best model if possible.\r\n # new_error = coeff_t @ model_better\r\n # this_error = np.mean([np.abs(new_error[i]) for i in inlier_set])\r\n\r\n # if len(inlier_set) > best_inlier_set:\r\n # best_error = this_error\r\n # best_inlier_set = inlier_set\r\n\r\n # # Fill in the markers.\r\n # inlier_markers = [0] * len(pts1)\r\n # for i in best_inlier_set:\r\n # inlier_markers[i] = 1\r\n\r\n # return inlier_markers\r\n\r\n def get_grid_size(self, img):\r\n \"\"\"\r\n # Size of each grid.\r\n \"\"\"\r\n grid_height = int(np.ceil(img.shape[0] / self.config.grid_row))\r\n grid_width = int(np.ceil(img.shape[1] / self.config.grid_col))\r\n return grid_height, grid_width\r\n\r\n def predict_feature_tracking(self, input_pts, R_p_c, intrinsics):\r\n \"\"\"\r\n predictFeatureTracking Compensates the rotation between consecutive \r\n camera frames so that feature tracking would be more robust and fast.\r\n\r\n Arguments:\r\n input_pts: features in the previous image to be tracked.\r\n R_p_c: a rotation matrix takes a vector in the previous camera \r\n frame to the current camera frame. (matrix33)\r\n intrinsics: intrinsic matrix of the camera. (vec3)\r\n\r\n Returns:\r\n compensated_pts: predicted locations of the features in the \r\n current image based on the provided rotation.\r\n \"\"\"\r\n # Return directly if there are no input features.\r\n if len(input_pts) == 0:\r\n return []\r\n\r\n # Intrinsic matrix.\r\n K = np.array([\r\n [intrinsics[0], 0.0, intrinsics[2]],\r\n [0.0, intrinsics[1], intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n H = K @ R_p_c @ np.linalg.inv(K)\r\n\r\n compensated_pts = []\r\n for i in range(len(input_pts)):\r\n p1 = np.array([*input_pts[i], 1.0])\r\n p2 = H @ p1\r\n compensated_pts.append(p2[:2] / p2[2])\r\n return np.array(compensated_pts, dtype=np.float32)\r\n\r\n def stereo_match(self, cam0_points):\r\n \"\"\"\r\n Matches features with stereo image pairs.\r\n\r\n Arguments:\r\n cam0_points: points in the primary image.\r\n\r\n Returns:\r\n cam1_points: points in the secondary image.\r\n inlier_markers: 1 if the match is valid, 0 otherwise.\r\n \"\"\"\r\n cam0_points = np.array(cam0_points)\r\n if len(cam0_points) == 0:\r\n return []\r\n\r\n R_cam0_cam1 = self.R_cam1_imu.T @ self.R_cam0_imu\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs, R_cam0_cam1)\r\n cam1_points = self.distort_points(\r\n cam0_points_undistorted, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n cam1_points_copy = cam1_points.copy()\r\n\r\n # Track features using LK optical flow method.\r\n cam0_points = cam0_points.astype(np.float32)\r\n cam1_points = cam1_points.astype(np.float32)\r\n cam1_points, inlier_markers, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam0_pyramid, self.curr_cam1_pyramid,\r\n cam0_points, cam1_points, **self.config.lk_params)\r\n\r\n cam0_points_, _, _ = cv2.calcOpticalFlowPyrLK(\r\n self.curr_cam1_pyramid, self.curr_cam0_pyramid, \r\n cam1_points, cam0_points.copy(), **self.config.lk_params)\r\n err = np.linalg.norm(cam0_points - cam0_points_, axis=1)\r\n\r\n # cam1_points_undistorted = self.undistort_points(\r\n # cam1_points, self.cam1_intrinsics,\r\n # self.cam1_distortion_model, self.cam1_distortion_coeffs, R_cam0_cam1)\r\n disparity = np.abs(cam1_points_copy[:, 1] - cam1_points[:, 1])\r\n \r\n\r\n \r\n inlier_markers = np.logical_and.reduce(\r\n [inlier_markers.reshape(-1), err < 3, disparity < 20])\r\n\r\n # Mark those tracked points out of the image region as untracked.\r\n img = self.cam1_curr_img_msg.image\r\n for i, point in enumerate(cam1_points):\r\n if not inlier_markers[i]:\r\n continue\r\n if (point[0] < 0 or point[0] > img.shape[1]-1 or \r\n point[1] < 0 or point[1] > img.shape[0]-1):\r\n inlier_markers[i] = 0\r\n\r\n # Compute the relative rotation between the cam0 frame and cam1 frame.\r\n t_cam0_cam1 = self.R_cam1_imu.T @ (self.t_cam0_imu - self.t_cam1_imu)\r\n # Compute the essential matrix.\r\n E = skew(t_cam0_cam1) @ R_cam0_cam1\r\n\r\n # Further remove outliers based on the known essential matrix.\r\n cam0_points_undistorted = self.undistort_points(\r\n cam0_points, self.cam0_intrinsics,\r\n self.cam0_distortion_model, self.cam0_distortion_coeffs)\r\n cam1_points_undistorted = self.undistort_points(\r\n cam1_points, self.cam1_intrinsics,\r\n self.cam1_distortion_model, self.cam1_distortion_coeffs)\r\n\r\n norm_pixel_unit = 4.0 / (\r\n self.cam0_intrinsics[0] + self.cam0_intrinsics[1] +\r\n self.cam1_intrinsics[0] + self.cam1_intrinsics[1])\r\n\r\n for i in range(len(cam0_points_undistorted)):\r\n if not inlier_markers[i]:\r\n continue\r\n pt0 = np.array([*cam0_points_undistorted[i], 1.0])\r\n pt1 = np.array([*cam1_points_undistorted[i], 1.0])\r\n epipolar_line = E @ pt0\r\n error = np.abs((pt1 * epipolar_line)[0]) / np.linalg.norm(\r\n epipolar_line[:2])\r\n\r\n if error > self.config.stereo_threshold * norm_pixel_unit:\r\n inlier_markers[i] = 0\r\n\r\n return cam1_points, inlier_markers\r\n\r\n def undistort_points(self, pts_in, intrinsics, distortion_model, \r\n distortion_coeffs, rectification_matrix=np.identity(3),\r\n new_intrinsics=np.array([1, 1, 0, 0])):\r\n \"\"\"\r\n Arguments:\r\n pts_in: points to be undistorted.\r\n intrinsics: intrinsics of the camera.\r\n distortion_model: distortion model of the camera.\r\n distortion_coeffs: distortion coefficients.\r\n rectification_matrix:\r\n new_intrinsics:\r\n\r\n Returns:\r\n pts_out: undistorted points.\r\n \"\"\"\r\n if len(pts_in) == 0:\r\n return []\r\n \r\n pts_in = np.reshape(pts_in, (-1, 1, 2))\r\n K = np.array([\r\n [intrinsics[0], 0.0, intrinsics[2]],\r\n [0.0, intrinsics[1], intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n K_new = np.array([\r\n [new_intrinsics[0], 0.0, new_intrinsics[2]],\r\n [0.0, new_intrinsics[1], new_intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n\r\n if distortion_model == 'equidistant':\r\n pts_out = cv2.fisheye.undistortPoints(pts_in, K, distortion_coeffs,\r\n rectification_matrix, K_new)\r\n else: # default: 'radtan'\r\n pts_out = cv2.undistortPoints(pts_in, K, distortion_coeffs, None,\r\n rectification_matrix, K_new)\r\n return pts_out.reshape((-1, 2))\r\n\r\n def distort_points(self, pts_in, intrinsics, distortion_model, \r\n distortion_coeffs):\r\n \"\"\"\r\n Arguments:\r\n pts_in: points to be distorted.\r\n intrinsics: intrinsics of the camera.\r\n distortion_model: distortion model of the camera.\r\n distortion_coeffs: distortion coefficients.\r\n\r\n Returns:\r\n pts_out: distorted points. (N, 2)\r\n \"\"\"\r\n if len(pts_in) == 0:\r\n return []\r\n\r\n K = np.array([\r\n [intrinsics[0], 0.0, intrinsics[2]],\r\n [0.0, intrinsics[1], intrinsics[3]],\r\n [0.0, 0.0, 1.0]])\r\n\r\n if distortion_model == 'equidistant':\r\n pts_out = cv2.fisheye.distortPoints(pts_in, K, distortion_coeffs)\r\n else: # default: 'radtan'\r\n homogenous_pts = cv2.convertPointsToHomogeneous(pts_in)\r\n pts_out, _ = cv2.projectPoints(homogenous_pts, \r\n np.zeros(3), np.zeros(3), K, distortion_coeffs)\r\n return pts_out.reshape((-1, 2))\r\n\r\n def draw_features_stereo(self):\r\n img0 = self.cam0_curr_img_msg.image\r\n img1 = self.cam1_curr_img_msg.image\r\n\r\n kps0 = []\r\n kps1 = []\r\n matches = []\r\n for feature in chain.from_iterable(self.curr_features):\r\n matches.append(cv2.DMatch(len(kps0), len(kps0), 0))\r\n kps0.append(cv2.KeyPoint(*feature.cam0_point, 1))\r\n kps1.append(cv2.KeyPoint(*feature.cam1_point, 1))\r\n\r\n img = cv2.drawMatches(img0, kps0, img1, kps1, matches, None, flags=2)\r\n cv2.imshow('stereo features', img)\r\n cv2.waitKey(1)\r\n\r\n\r\ndef skew(vec):\r\n x, y, z = vec\r\n return np.array([\r\n [0, -z, y],\r\n [z, 0, -x],\r\n [-y, x, 0]])\r\n\r\ndef select(data, selectors):\r\n return [d for d, s in zip(data, selectors) if s]\r\n\r\n\r\n",
"step-ids": [
18,
20,
23,
25,
31
]
}
|
[
18,
20,
23,
25,
31
] |
import requests
import re
import time
import os
import argparse
import json
url = "https://contactform7.com/captcha/"
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15',
'Content-Type': "multipart/form-data; boundary=----WebKitFormBoundaryQgctpYC5kRiIjznW","Connection": "keep-alive",
"Cookie": "lang = en_US;_ga = GA1.2.765999315.1562601614;_gid = GA1.2.1701684676.1562913704;__cfduid = d695b369369d5130db03260060ed2edec1562601611"
}
ap=argparse.ArgumentParser()
ap.add_argument("-o","--output",required=True,help="Path to save the images")
ap.add_argument("-n","--number",required=False,default=500,help="number of images to download")
args=vars(ap.parse_args())
s=requests.Session()
result = s.get(url, headers=headers).content.decode("UTF-8")
count =1
result = re.findall("src=\"(.*[0-9]{1,}\.png)\"", result)
for j in result:
print("\033[095m Downloading image \033[00m : \033[092m {}/{} \033[00m ".format(count, args["number"]))
print(j.encode("ascii"))
r = s.get(j.encode("ascii"), headers=headers)
p = os.path.sep.join([args["output"], "{}.jpg".format(str(count).zfill(5))])
f = open(p, "wb")
f.write(r.content)
f.close()
time.sleep(0.1)
count += 1
url = "https://contactform7.com/wp-json/contact-form-7/v1/contact-forms/1209/feedback"
images=["captcha-118","captcha-170","captcha-778"]
while count<args["number"]:
try:
s = requests.Session()
result = json.loads(s.post(url, headers=headers).content.decode("UTF-8"))
#print(result["captcha"])
#print(result["captcha"][u'captcha-118'].encode("ascii"))
for j in range(3):
print("\033[095m Downloading image \033[00m : \033[092m {}/{} \033[00m ".format(count,args["number"]))
# print(j.encode("ascii"))
r = s.get(result["captcha"][images[j]].encode("ascii"), headers=headers)
p= os.path.sep.join([args["output"],"{}.jpg".format(str(count).zfill(5))])
f=open(p,"wb")
f.write(r.content)
f.close()
time.sleep(0.1)
count+=1
except Exception:
print("\033[92m Error Downloading Webpage \033[00m")
time.sleep(1)
|
normal
|
{
"blob_id": "6990b5f34af654b4e1a39c3d73b6822fa48e4835",
"index": 9159,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nap.add_argument('-o', '--output', required=True, help='Path to save the images'\n )\nap.add_argument('-n', '--number', required=False, default=500, help=\n 'number of images to download')\n<mask token>\nfor j in result:\n print('\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n print(j.encode('ascii'))\n r = s.get(j.encode('ascii'), headers=headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count).zfill(5))]\n )\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\n<mask token>\nwhile count < args['number']:\n try:\n s = requests.Session()\n result = json.loads(s.post(url, headers=headers).content.decode(\n 'UTF-8'))\n for j in range(3):\n print(\n '\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n r = s.get(result['captcha'][images[j]].encode('ascii'), headers\n =headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count\n ).zfill(5))])\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\n except Exception:\n print('\\x1b[92m Error Downloading Webpage \\x1b[00m')\n time.sleep(1)\n",
"step-3": "<mask token>\nurl = 'https://contactform7.com/captcha/'\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15'\n , 'Content-Type':\n 'multipart/form-data; boundary=----WebKitFormBoundaryQgctpYC5kRiIjznW',\n 'Connection': 'keep-alive', 'Cookie':\n 'lang = en_US;_ga = GA1.2.765999315.1562601614;_gid = GA1.2.1701684676.1562913704;__cfduid = d695b369369d5130db03260060ed2edec1562601611'\n }\nap = argparse.ArgumentParser()\nap.add_argument('-o', '--output', required=True, help='Path to save the images'\n )\nap.add_argument('-n', '--number', required=False, default=500, help=\n 'number of images to download')\nargs = vars(ap.parse_args())\ns = requests.Session()\nresult = s.get(url, headers=headers).content.decode('UTF-8')\ncount = 1\nresult = re.findall('src=\"(.*[0-9]{1,}\\\\.png)\"', result)\nfor j in result:\n print('\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n print(j.encode('ascii'))\n r = s.get(j.encode('ascii'), headers=headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count).zfill(5))]\n )\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\nurl = (\n 'https://contactform7.com/wp-json/contact-form-7/v1/contact-forms/1209/feedback'\n )\nimages = ['captcha-118', 'captcha-170', 'captcha-778']\nwhile count < args['number']:\n try:\n s = requests.Session()\n result = json.loads(s.post(url, headers=headers).content.decode(\n 'UTF-8'))\n for j in range(3):\n print(\n '\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n r = s.get(result['captcha'][images[j]].encode('ascii'), headers\n =headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count\n ).zfill(5))])\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\n except Exception:\n print('\\x1b[92m Error Downloading Webpage \\x1b[00m')\n time.sleep(1)\n",
"step-4": "import requests\nimport re\nimport time\nimport os\nimport argparse\nimport json\nurl = 'https://contactform7.com/captcha/'\nheaders = {'User-Agent':\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15'\n , 'Content-Type':\n 'multipart/form-data; boundary=----WebKitFormBoundaryQgctpYC5kRiIjznW',\n 'Connection': 'keep-alive', 'Cookie':\n 'lang = en_US;_ga = GA1.2.765999315.1562601614;_gid = GA1.2.1701684676.1562913704;__cfduid = d695b369369d5130db03260060ed2edec1562601611'\n }\nap = argparse.ArgumentParser()\nap.add_argument('-o', '--output', required=True, help='Path to save the images'\n )\nap.add_argument('-n', '--number', required=False, default=500, help=\n 'number of images to download')\nargs = vars(ap.parse_args())\ns = requests.Session()\nresult = s.get(url, headers=headers).content.decode('UTF-8')\ncount = 1\nresult = re.findall('src=\"(.*[0-9]{1,}\\\\.png)\"', result)\nfor j in result:\n print('\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n print(j.encode('ascii'))\n r = s.get(j.encode('ascii'), headers=headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count).zfill(5))]\n )\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\nurl = (\n 'https://contactform7.com/wp-json/contact-form-7/v1/contact-forms/1209/feedback'\n )\nimages = ['captcha-118', 'captcha-170', 'captcha-778']\nwhile count < args['number']:\n try:\n s = requests.Session()\n result = json.loads(s.post(url, headers=headers).content.decode(\n 'UTF-8'))\n for j in range(3):\n print(\n '\\x1b[095m Downloading image \\x1b[00m : \\x1b[092m {}/{} \\x1b[00m '\n .format(count, args['number']))\n r = s.get(result['captcha'][images[j]].encode('ascii'), headers\n =headers)\n p = os.path.sep.join([args['output'], '{}.jpg'.format(str(count\n ).zfill(5))])\n f = open(p, 'wb')\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\n except Exception:\n print('\\x1b[92m Error Downloading Webpage \\x1b[00m')\n time.sleep(1)\n",
"step-5": "import requests\nimport re\nimport time\nimport os\nimport argparse\nimport json\n\nurl = \"https://contactform7.com/captcha/\"\nheaders = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Safari/605.1.15',\n 'Content-Type': \"multipart/form-data; boundary=----WebKitFormBoundaryQgctpYC5kRiIjznW\",\"Connection\": \"keep-alive\",\n \"Cookie\": \"lang = en_US;_ga = GA1.2.765999315.1562601614;_gid = GA1.2.1701684676.1562913704;__cfduid = d695b369369d5130db03260060ed2edec1562601611\"\n}\n\nap=argparse.ArgumentParser()\nap.add_argument(\"-o\",\"--output\",required=True,help=\"Path to save the images\")\nap.add_argument(\"-n\",\"--number\",required=False,default=500,help=\"number of images to download\")\nargs=vars(ap.parse_args())\ns=requests.Session()\nresult = s.get(url, headers=headers).content.decode(\"UTF-8\")\n\ncount =1\nresult = re.findall(\"src=\\\"(.*[0-9]{1,}\\.png)\\\"\", result)\nfor j in result:\n print(\"\\033[095m Downloading image \\033[00m : \\033[092m {}/{} \\033[00m \".format(count, args[\"number\"]))\n print(j.encode(\"ascii\"))\n r = s.get(j.encode(\"ascii\"), headers=headers)\n p = os.path.sep.join([args[\"output\"], \"{}.jpg\".format(str(count).zfill(5))])\n f = open(p, \"wb\")\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count += 1\n\nurl = \"https://contactform7.com/wp-json/contact-form-7/v1/contact-forms/1209/feedback\"\nimages=[\"captcha-118\",\"captcha-170\",\"captcha-778\"]\nwhile count<args[\"number\"]:\n try:\n s = requests.Session()\n result = json.loads(s.post(url, headers=headers).content.decode(\"UTF-8\"))\n #print(result[\"captcha\"])\n #print(result[\"captcha\"][u'captcha-118'].encode(\"ascii\"))\n\n for j in range(3):\n print(\"\\033[095m Downloading image \\033[00m : \\033[092m {}/{} \\033[00m \".format(count,args[\"number\"]))\n # print(j.encode(\"ascii\"))\n r = s.get(result[\"captcha\"][images[j]].encode(\"ascii\"), headers=headers)\n p= os.path.sep.join([args[\"output\"],\"{}.jpg\".format(str(count).zfill(5))])\n f=open(p,\"wb\")\n f.write(r.content)\n f.close()\n time.sleep(0.1)\n count+=1\n\n except Exception:\n print(\"\\033[92m Error Downloading Webpage \\033[00m\")\n time.sleep(1)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
Given an infinite sorted array (or an array with unknown size), find if a given number ‘key’ is present in the array. Write a function to return the index of the ‘key’ if it is present in the array, otherwise return -1.
Since it is not possible to define an array with infinite (unknown) size, you will be provided with an interface ArrayReader to read elements of the array. ArrayReader.get(index) will return the number at index; if the array’s size is smaller than the index, it will return Integer.MAX_VALUE.
Example 1:
Input: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], key = 16
Output: 6
Explanation: The key is present at index '6' in the array.
Example 2:
Input: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], key = 11
Output: -1
Explanation: The key is not present in the array.
Example 3:
Input: [1, 3, 8, 10, 15], key = 15
Output: 4
Explanation: The key is present at index '4' in the array.
Example 4:
Input: [1, 3, 8, 10, 15], key = 200
Output: -1
Explanation: The key is not present in the array.
'''
import math
class ArrayReader:
def __init__(self, arr):
self.arr = arr
def get(self, index):
if index > len(self.arr):
return math.inf
return self.arr[index]
def search_in_infinite_array(reader, key):
# first find the bounds
low = 0
high = 1
while reader.get(high) < key:
new_low = high + 1
high = (high - low + 1)*2
low = new_low
return binary_search_array(reader, key, low, high)
def binary_search_array(reader, key, low, high):
while low <= high:
mid = (low + high) // 2
if key == reader.get(mid):
return mid
if key > reader.get(mid):
low = mid + 1
else:
high = mid - 1
return - 1
reader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])
print(search_in_infinite_array(reader, 16))
|
normal
|
{
"blob_id": "a9efa258c223460b2b79861acdde89161706ad9a",
"index": 8770,
"step-1": "<mask token>\n\n\nclass ArrayReader:\n\n def __init__(self, arr):\n self.arr = arr\n\n def get(self, index):\n if index > len(self.arr):\n return math.inf\n return self.arr[index]\n\n\n<mask token>\n\n\ndef binary_search_array(reader, key, low, high):\n while low <= high:\n mid = (low + high) // 2\n if key == reader.get(mid):\n return mid\n if key > reader.get(mid):\n low = mid + 1\n else:\n high = mid - 1\n return -1\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ArrayReader:\n\n def __init__(self, arr):\n self.arr = arr\n\n def get(self, index):\n if index > len(self.arr):\n return math.inf\n return self.arr[index]\n\n\ndef search_in_infinite_array(reader, key):\n low = 0\n high = 1\n while reader.get(high) < key:\n new_low = high + 1\n high = (high - low + 1) * 2\n low = new_low\n return binary_search_array(reader, key, low, high)\n\n\ndef binary_search_array(reader, key, low, high):\n while low <= high:\n mid = (low + high) // 2\n if key == reader.get(mid):\n return mid\n if key > reader.get(mid):\n low = mid + 1\n else:\n high = mid - 1\n return -1\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ArrayReader:\n\n def __init__(self, arr):\n self.arr = arr\n\n def get(self, index):\n if index > len(self.arr):\n return math.inf\n return self.arr[index]\n\n\ndef search_in_infinite_array(reader, key):\n low = 0\n high = 1\n while reader.get(high) < key:\n new_low = high + 1\n high = (high - low + 1) * 2\n low = new_low\n return binary_search_array(reader, key, low, high)\n\n\ndef binary_search_array(reader, key, low, high):\n while low <= high:\n mid = (low + high) // 2\n if key == reader.get(mid):\n return mid\n if key > reader.get(mid):\n low = mid + 1\n else:\n high = mid - 1\n return -1\n\n\nreader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])\nprint(search_in_infinite_array(reader, 16))\n",
"step-4": "<mask token>\nimport math\n\n\nclass ArrayReader:\n\n def __init__(self, arr):\n self.arr = arr\n\n def get(self, index):\n if index > len(self.arr):\n return math.inf\n return self.arr[index]\n\n\ndef search_in_infinite_array(reader, key):\n low = 0\n high = 1\n while reader.get(high) < key:\n new_low = high + 1\n high = (high - low + 1) * 2\n low = new_low\n return binary_search_array(reader, key, low, high)\n\n\ndef binary_search_array(reader, key, low, high):\n while low <= high:\n mid = (low + high) // 2\n if key == reader.get(mid):\n return mid\n if key > reader.get(mid):\n low = mid + 1\n else:\n high = mid - 1\n return -1\n\n\nreader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])\nprint(search_in_infinite_array(reader, 16))\n",
"step-5": "'''\nGiven an infinite sorted array (or an array with unknown size), find if a given number ‘key’ is present in the array. Write a function to return the index of the ‘key’ if it is present in the array, otherwise return -1.\n\nSince it is not possible to define an array with infinite (unknown) size, you will be provided with an interface ArrayReader to read elements of the array. ArrayReader.get(index) will return the number at index; if the array’s size is smaller than the index, it will return Integer.MAX_VALUE.\n\nExample 1:\n\nInput: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], key = 16\nOutput: 6\nExplanation: The key is present at index '6' in the array.\nExample 2:\n\nInput: [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30], key = 11\nOutput: -1\nExplanation: The key is not present in the array.\nExample 3:\n\nInput: [1, 3, 8, 10, 15], key = 15\nOutput: 4\nExplanation: The key is present at index '4' in the array.\nExample 4:\n\nInput: [1, 3, 8, 10, 15], key = 200\nOutput: -1\nExplanation: The key is not present in the array.\n'''\n\nimport math\n\n\nclass ArrayReader:\n def __init__(self, arr):\n self.arr = arr\n\n def get(self, index):\n if index > len(self.arr):\n return math.inf\n\n return self.arr[index]\n\n\ndef search_in_infinite_array(reader, key):\n # first find the bounds\n\n low = 0\n high = 1\n\n while reader.get(high) < key:\n new_low = high + 1\n high = (high - low + 1)*2\n low = new_low\n\n return binary_search_array(reader, key, low, high)\n\n\ndef binary_search_array(reader, key, low, high):\n\n while low <= high:\n\n mid = (low + high) // 2\n\n if key == reader.get(mid):\n return mid\n\n if key > reader.get(mid):\n low = mid + 1\n\n else:\n high = mid - 1\n\n return - 1\n\n\nreader = ArrayReader([4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30])\nprint(search_in_infinite_array(reader, 16))\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
'''
Module for interaction with database
'''
import sqlite3
from enum import Enum
DB_NAME = 'categories.db'
class State(Enum):
ok = True
error = False
def get_db_connection():
try:
global connection
connection = sqlite3.connect(DB_NAME)
cursor = connection.cursor()
except Exception:
print("Error connection db {0}".format(DB_NAME))
connection.close()
return
return cursor
def close_db_connection():
try:
connection.close()
except Exception:
print("Error closing connection")
def create_new_category(category):
state = State.ok
try:
cursor = get_db_connection()
query = "CREATE TABLE {0} (word varchar(15) primary key, weight real)".format(category)
cursor.execute(query)
except Exception:
state = State.error
print("Error with creating new category")
finally:
close_db_connection()
return state
def get_category_data(category):
state = State.ok
data = list()
try:
cursor = get_db_connection()
query = "SELECT * from {0} ORDER BY weight DESC".format(category)
for row in cursor.execute(query):
data.append(row)
except Exception:
state = State.error
print("Error with getting data from {0} category".format(category))
finally:
close_db_connection()
return state.value, data
def set_category_data(category, data):
state = State.ok
try:
cursor = get_db_connection()
for key, value in data:
query = 'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'.format(category, key, value)
cursor.execute(query)
connection.commit()
except Exception:
state = State.error
print("Error with setting data to database in {0} category".format(category))
finally:
close_db_connection()
return state.value
def get_file_names_in_category(category):
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result WHERE category = '{0}'".format(category)
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print("Error with getting category file names")
finally:
close_db_connection()
return state.value, names
def get_file_names():
state = State.ok
names = list()
try:
cursor = get_db_connection()
query = "SELECT * FROM result"
for row in cursor.execute(query):
names.append(row)
except Exception:
state = State.error
print("Error with getting category file names")
finally:
close_db_connection()
return state.value, names
|
normal
|
{
"blob_id": "9b3c2604b428295eda16030b45cf739e714f3d00",
"index": 1614,
"step-1": "<mask token>\n\n\nclass State(Enum):\n ok = True\n error = False\n\n\n<mask token>\n\n\ndef close_db_connection():\n try:\n connection.close()\n except Exception:\n print('Error closing connection')\n\n\ndef create_new_category(category):\n state = State.ok\n try:\n cursor = get_db_connection()\n query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'\n .format(category))\n cursor.execute(query)\n except Exception:\n state = State.error\n print('Error with creating new category')\n finally:\n close_db_connection()\n return state\n\n\ndef get_category_data(category):\n state = State.ok\n data = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)\n for row in cursor.execute(query):\n data.append(row)\n except Exception:\n state = State.error\n print('Error with getting data from {0} category'.format(category))\n finally:\n close_db_connection()\n return state.value, data\n\n\n<mask token>\n\n\ndef get_file_names_in_category(category):\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result WHERE category = '{0}'\".format(category)\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n\n\ndef get_file_names():\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * FROM result'\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n",
"step-2": "<mask token>\n\n\nclass State(Enum):\n ok = True\n error = False\n\n\ndef get_db_connection():\n try:\n global connection\n connection = sqlite3.connect(DB_NAME)\n cursor = connection.cursor()\n except Exception:\n print('Error connection db {0}'.format(DB_NAME))\n connection.close()\n return\n return cursor\n\n\ndef close_db_connection():\n try:\n connection.close()\n except Exception:\n print('Error closing connection')\n\n\ndef create_new_category(category):\n state = State.ok\n try:\n cursor = get_db_connection()\n query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'\n .format(category))\n cursor.execute(query)\n except Exception:\n state = State.error\n print('Error with creating new category')\n finally:\n close_db_connection()\n return state\n\n\ndef get_category_data(category):\n state = State.ok\n data = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)\n for row in cursor.execute(query):\n data.append(row)\n except Exception:\n state = State.error\n print('Error with getting data from {0} category'.format(category))\n finally:\n close_db_connection()\n return state.value, data\n\n\n<mask token>\n\n\ndef get_file_names_in_category(category):\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result WHERE category = '{0}'\".format(category)\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n\n\ndef get_file_names():\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * FROM result'\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n",
"step-3": "<mask token>\nDB_NAME = 'categories.db'\n\n\nclass State(Enum):\n ok = True\n error = False\n\n\ndef get_db_connection():\n try:\n global connection\n connection = sqlite3.connect(DB_NAME)\n cursor = connection.cursor()\n except Exception:\n print('Error connection db {0}'.format(DB_NAME))\n connection.close()\n return\n return cursor\n\n\ndef close_db_connection():\n try:\n connection.close()\n except Exception:\n print('Error closing connection')\n\n\ndef create_new_category(category):\n state = State.ok\n try:\n cursor = get_db_connection()\n query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'\n .format(category))\n cursor.execute(query)\n except Exception:\n state = State.error\n print('Error with creating new category')\n finally:\n close_db_connection()\n return state\n\n\ndef get_category_data(category):\n state = State.ok\n data = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)\n for row in cursor.execute(query):\n data.append(row)\n except Exception:\n state = State.error\n print('Error with getting data from {0} category'.format(category))\n finally:\n close_db_connection()\n return state.value, data\n\n\ndef set_category_data(category, data):\n state = State.ok\n try:\n cursor = get_db_connection()\n for key, value in data:\n query = (\n 'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'\n .format(category, key, value))\n cursor.execute(query)\n connection.commit()\n except Exception:\n state = State.error\n print('Error with setting data to database in {0} category'.format(\n category))\n finally:\n close_db_connection()\n return state.value\n\n\ndef get_file_names_in_category(category):\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result WHERE category = '{0}'\".format(category)\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n\n\ndef get_file_names():\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * FROM result'\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n",
"step-4": "<mask token>\nimport sqlite3\nfrom enum import Enum\nDB_NAME = 'categories.db'\n\n\nclass State(Enum):\n ok = True\n error = False\n\n\ndef get_db_connection():\n try:\n global connection\n connection = sqlite3.connect(DB_NAME)\n cursor = connection.cursor()\n except Exception:\n print('Error connection db {0}'.format(DB_NAME))\n connection.close()\n return\n return cursor\n\n\ndef close_db_connection():\n try:\n connection.close()\n except Exception:\n print('Error closing connection')\n\n\ndef create_new_category(category):\n state = State.ok\n try:\n cursor = get_db_connection()\n query = ('CREATE TABLE {0} (word varchar(15) primary key, weight real)'\n .format(category))\n cursor.execute(query)\n except Exception:\n state = State.error\n print('Error with creating new category')\n finally:\n close_db_connection()\n return state\n\n\ndef get_category_data(category):\n state = State.ok\n data = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * from {0} ORDER BY weight DESC'.format(category)\n for row in cursor.execute(query):\n data.append(row)\n except Exception:\n state = State.error\n print('Error with getting data from {0} category'.format(category))\n finally:\n close_db_connection()\n return state.value, data\n\n\ndef set_category_data(category, data):\n state = State.ok\n try:\n cursor = get_db_connection()\n for key, value in data:\n query = (\n 'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'\n .format(category, key, value))\n cursor.execute(query)\n connection.commit()\n except Exception:\n state = State.error\n print('Error with setting data to database in {0} category'.format(\n category))\n finally:\n close_db_connection()\n return state.value\n\n\ndef get_file_names_in_category(category):\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result WHERE category = '{0}'\".format(category)\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n\n\ndef get_file_names():\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = 'SELECT * FROM result'\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print('Error with getting category file names')\n finally:\n close_db_connection()\n return state.value, names\n",
"step-5": "'''\n Module for interaction with database\n'''\n\nimport sqlite3\nfrom enum import Enum\n\nDB_NAME = 'categories.db'\n\n\nclass State(Enum):\n ok = True\n error = False\n\n\ndef get_db_connection():\n try:\n global connection\n connection = sqlite3.connect(DB_NAME)\n cursor = connection.cursor()\n except Exception:\n print(\"Error connection db {0}\".format(DB_NAME))\n connection.close()\n return\n\n return cursor\n\n\ndef close_db_connection():\n try:\n connection.close()\n except Exception:\n print(\"Error closing connection\")\n\n\ndef create_new_category(category):\n state = State.ok\n\n try:\n cursor = get_db_connection()\n query = \"CREATE TABLE {0} (word varchar(15) primary key, weight real)\".format(category)\n cursor.execute(query)\n except Exception:\n state = State.error\n print(\"Error with creating new category\")\n finally:\n close_db_connection()\n\n return state\n\n\ndef get_category_data(category):\n state = State.ok\n data = list()\n\n try:\n cursor = get_db_connection()\n query = \"SELECT * from {0} ORDER BY weight DESC\".format(category)\n for row in cursor.execute(query):\n data.append(row)\n except Exception:\n state = State.error\n print(\"Error with getting data from {0} category\".format(category))\n finally:\n close_db_connection()\n\n return state.value, data\n\n\ndef set_category_data(category, data):\n state = State.ok\n try:\n cursor = get_db_connection()\n for key, value in data:\n query = 'INSERT OR REPLACE INTO {0} (word, weight) VALUES({1},{2})'.format(category, key, value)\n cursor.execute(query)\n\n connection.commit()\n except Exception:\n state = State.error\n print(\"Error with setting data to database in {0} category\".format(category))\n finally:\n close_db_connection()\n\n return state.value\n\ndef get_file_names_in_category(category):\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result WHERE category = '{0}'\".format(category)\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print(\"Error with getting category file names\")\n finally:\n close_db_connection()\n\n return state.value, names\n\ndef get_file_names():\n state = State.ok\n names = list()\n try:\n cursor = get_db_connection()\n query = \"SELECT * FROM result\"\n for row in cursor.execute(query):\n names.append(row)\n except Exception:\n state = State.error\n print(\"Error with getting category file names\")\n finally:\n close_db_connection()\n\n return state.value, names",
"step-ids": [
7,
8,
10,
11,
12
]
}
|
[
7,
8,
10,
11,
12
] |
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
from flask_cors import CORS
from flask_misaka import Misaka
from flask_mailman import Mail
from flask_talisman import Talisman
from werkzeug.middleware.proxy_fix import ProxyFix
from micawber.providers import bootstrap_basic
from whitenoise import WhiteNoise
from pytz import timezone
from urllib.parse import quote_plus
from dribdat import commands, public, admin
from dribdat.assets import assets # noqa: I005
from dribdat.sso import get_auth_blueprint
from dribdat.extensions import (
hashing,
cache,
db,
login_manager,
migrate,
)
from dribdat.settings import ProdConfig # noqa: I005
from dribdat.utils import timesince
from dribdat.onebox import make_oembedplus
def init_app(config_object=ProdConfig):
"""Define an application factory.
See: http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
"""
app = Flask(__name__)
app.config.from_object(config_object)
# Set up cross-site access to the API
if app.config['SERVER_CORS']:
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
# Set up using an external proxy/static server
if app.config['SERVER_PROXY']:
app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)
else:
# Internally optimize static file hosting
app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')
for static in ('css', 'img', 'js', 'public'):
app.wsgi_app.add_files('dribdat/static/' + static)
register_extensions(app)
register_blueprints(app)
register_oauthhandlers(app)
register_errorhandlers(app)
register_filters(app)
register_loggers(app)
register_shellcontext(app)
register_commands(app)
register_caching(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
assets.init_app(app)
hashing.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
migrate.init_app(app, db)
init_mailman(app)
init_talisman(app)
return None
def init_mailman(app):
"""Initialize mailer support."""
if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:
if not app.config['MAIL_DEFAULT_SENDER']:
app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')
else:
mail = Mail()
mail.init_app(app)
def init_talisman(app):
"""Initialize Talisman support."""
if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:
Talisman(app,
content_security_policy=app.config['CSP_DIRECTIVES'],
frame_options_allow_from='*')
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(public.project.blueprint)
app.register_blueprint(public.auth.blueprint)
app.register_blueprint(public.api.blueprint)
app.register_blueprint(admin.views.blueprint)
return None
def register_oauthhandlers(app):
"""Set up OAuth handlers based on configuration."""
blueprint = get_auth_blueprint(app)
if blueprint is not None:
app.register_blueprint(blueprint, url_prefix="/oauth")
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
from dribdat.user.models import User
return {
'db': db,
'User': User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
def register_filters(app):
"""Register filters for templates."""
#
# Conversion of Markdown to HTML
Misaka(app, autolink=True, fenced_code=True,
strikethrough=True, tables=True)
# Registration of handlers for micawber
app.oembed_providers = bootstrap_basic()
@app.template_filter()
def onebox(value):
return make_oembedplus(
value, app.oembed_providers, maxwidth=600, maxheight=400
)
# Timezone helper
app.tz = timezone(app.config['TIME_ZONE'])
# Lambda filters for safe image_url's
app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&=')
# Custom filters
@app.template_filter()
def since_date(value):
return timesince(value)
@app.template_filter()
def until_date(value):
return timesince(value, default="now!", until=True)
@app.template_filter()
def format_date(value, format='%d.%m.%Y'):
if value is None: return ''
return value.strftime(format)
@app.template_filter()
def format_datetime(value, format='%d.%m.%Y %H:%M'):
if value is None: return ''
return value.strftime(format)
def register_loggers(app):
"""Initialize and configure logging."""
if 'DEBUG' in app.config and not app.config['DEBUG']:
import logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
def register_caching(app):
"""Prevent cached responses in debug."""
if 'DEBUG' in app.config and app.config['DEBUG']:
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, public, max-age=0"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
|
normal
|
{
"blob_id": "2257f73a290dfd428a874e963c26e51f1c1f1efa",
"index": 927,
"step-1": "<mask token>\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<mask token>\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\n<mask token>\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\n<mask token>\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<mask token>\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix='/oauth')\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\n<mask token>\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"step-3": "<mask token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<mask token>\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app, content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix='/oauth')\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\n<mask token>\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"step-4": "<mask token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app, content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix='/oauth')\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"The app module, containing the app factory function.\"\"\"\n\nfrom flask import Flask, render_template\nfrom flask_cors import CORS\nfrom flask_misaka import Misaka\nfrom flask_mailman import Mail\nfrom flask_talisman import Talisman\nfrom werkzeug.middleware.proxy_fix import ProxyFix\nfrom micawber.providers import bootstrap_basic\nfrom whitenoise import WhiteNoise\nfrom pytz import timezone\nfrom urllib.parse import quote_plus\nfrom dribdat import commands, public, admin\nfrom dribdat.assets import assets # noqa: I005\nfrom dribdat.sso import get_auth_blueprint\nfrom dribdat.extensions import (\n hashing,\n cache,\n db,\n login_manager,\n migrate,\n)\nfrom dribdat.settings import ProdConfig # noqa: I005\nfrom dribdat.utils import timesince\nfrom dribdat.onebox import make_oembedplus\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n # Set up cross-site access to the API\n if app.config['SERVER_CORS']:\n CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n\n # Set up using an external proxy/static server\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n # Internally optimize static file hosting\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app,\n content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix=\"/oauth\")\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n # If a HTTPException, pull the `code` attribute; default to 500\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {\n 'db': db,\n 'User': User}\n\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n #\n # Conversion of Markdown to HTML\n Misaka(app, autolink=True, fenced_code=True,\n strikethrough=True, tables=True)\n\n # Registration of handlers for micawber\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(\n value, app.oembed_providers, maxwidth=600, maxheight=400\n )\n\n # Timezone helper\n app.tz = timezone(app.config['TIME_ZONE'])\n\n # Lambda filters for safe image_url's\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&=')\n\n # Custom filters\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default=\"now!\", until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None: return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None: return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate, public, max-age=0\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n",
"step-ids": [
5,
9,
10,
12,
14
]
}
|
[
5,
9,
10,
12,
14
] |
from eval_lib.classification_results import analyze_one_classification_result
from eval_lib.classification_results import ClassificationBatches
from eval_lib.cloud_client import CompetitionDatastoreClient
from eval_lib.cloud_client import CompetitionStorageClient
from eval_lib.dataset_helper import DatasetMetadata
from eval_lib.dataset_helper import download_dataset
from eval_lib.dataset_helper import enforce_epsilon_and_compute_hash
from eval_lib.image_batches import AversarialBatches
from eval_lib.image_batches import DatasetBatches
from eval_lib.submissions import CompetitionSubmissions
from eval_lib.work_data import AttackWorkPieces
from eval_lib.work_data import DefenseWorkPieces
|
normal
|
{
"blob_id": "64935ae910d5f330722b637dcc5794e7e07ab52d",
"index": 8375,
"step-1": "<mask token>\n",
"step-2": "from eval_lib.classification_results import analyze_one_classification_result\nfrom eval_lib.classification_results import ClassificationBatches\nfrom eval_lib.cloud_client import CompetitionDatastoreClient\nfrom eval_lib.cloud_client import CompetitionStorageClient\nfrom eval_lib.dataset_helper import DatasetMetadata\nfrom eval_lib.dataset_helper import download_dataset\nfrom eval_lib.dataset_helper import enforce_epsilon_and_compute_hash\nfrom eval_lib.image_batches import AversarialBatches\nfrom eval_lib.image_batches import DatasetBatches\nfrom eval_lib.submissions import CompetitionSubmissions\nfrom eval_lib.work_data import AttackWorkPieces\nfrom eval_lib.work_data import DefenseWorkPieces\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
import random
import sys
def sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
return (time.time() - start_time), found
def ordered_sequential_search(my_list, search_elt):
found = False
start_time = time.time()
for elt in my_list:
if search_elt == elt:
found = True
break
elif search_elt > elt:
break
return (time.time() - start_time), found
def binary_search_iterative(my_list, search_elt):
first = 0
last = len(my_list) - 1
found = False
start_time = time.time()
while first <= last and not found:
midpoint = (first + last) // 2
if my_list[midpoint] == search_elt:
found = True
elif search_elt < my_list[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return (time.time() - start_time), found
def binary_search_rec(a_list, item):
if len(a_list) == 0:
return False
else:
midpoint = len(a_list) // 2
if a_list[midpoint] == item:
return True
elif item < a_list[midpoint]:
return binary_search_rec(a_list[:midpoint], item)
else:
return binary_search_rec(a_list[midpoint + 1:], item)
def binary_search_recursive(my_list, search_elt, start_time = time.time):
start_time = time.time()
return (time.time() - start_time), binary_search_rec(my_list, search_elt)
def generate_random_nb_my_list(nb, amount_my_list, maxNumber = sys.maxint):
return [
[random.randint(0, maxNumber) for _ in range (nb)]
for _ in range (amount_my_list)
]
def functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):
(fn_name, fn_function, fn_list_indx) = fn
(timing, _) = fn_function(rnd_list[fn_list_indx], -1)
if amt_of_nb not in timeAggregator:
timeAggregator[amt_of_nb] = {}
if fn_name not in timeAggregator[amt_of_nb]:
timeAggregator[amt_of_nb][fn_name] = 0
timeAggregator[amt_of_nb][fn_name] += timing
def printTimerAggregator(timeAggregator, list_size):
for amount_of_number, fn_type in timeAggregator.iteritems():
print('For %s size of list:' % amount_of_number)
for fn_name, consumedTime in fn_type.iteritems():
print('\t%s took %10.7f seconds to run, on average'
% (fn_name, consumedTime / list_size))
if __name__ == '__main__':
timeAggregator = {}
amount_of_numbers = [500, 1000, 10000]
function_list = [
('Sequential Search', sequential_search, 0),
('Ordered Sequential Search', ordered_sequential_search, 1),
('Binary Search Iterative', binary_search_iterative, 1),
('Binary Search Recursive', binary_search_recursive, 1),
]
list_size = 100
for amount_of_number in amount_of_numbers:
my_randoms = generate_random_nb_my_list(amount_of_number, list_size)
for unsorted_list in my_randoms:
sorted_list = unsorted_list[:]
sorted_list.sort()
for fn in function_list:
functionTimerAggregator(
timeAggregator, fn, amount_of_number,
(unsorted_list, sorted_list))
printTimerAggregator(timeAggregator, list_size)
|
normal
|
{
"blob_id": "f3a34d1c37165490c77ccd21f428718c8c90f866",
"index": 4057,
"step-1": "<mask token>\n\n\ndef sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n return time.time() - start_time, found\n\n\ndef ordered_sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n elif search_elt > elt:\n break\n return time.time() - start_time, found\n\n\n<mask token>\n\n\ndef binary_search_rec(a_list, item):\n if len(a_list) == 0:\n return False\n else:\n midpoint = len(a_list) // 2\n if a_list[midpoint] == item:\n return True\n elif item < a_list[midpoint]:\n return binary_search_rec(a_list[:midpoint], item)\n else:\n return binary_search_rec(a_list[midpoint + 1:], item)\n\n\ndef binary_search_recursive(my_list, search_elt, start_time=time.time):\n start_time = time.time()\n return time.time() - start_time, binary_search_rec(my_list, search_elt)\n\n\n<mask token>\n\n\ndef functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):\n fn_name, fn_function, fn_list_indx = fn\n timing, _ = fn_function(rnd_list[fn_list_indx], -1)\n if amt_of_nb not in timeAggregator:\n timeAggregator[amt_of_nb] = {}\n if fn_name not in timeAggregator[amt_of_nb]:\n timeAggregator[amt_of_nb][fn_name] = 0\n timeAggregator[amt_of_nb][fn_name] += timing\n\n\ndef printTimerAggregator(timeAggregator, list_size):\n for amount_of_number, fn_type in timeAggregator.iteritems():\n print('For %s size of list:' % amount_of_number)\n for fn_name, consumedTime in fn_type.iteritems():\n print('\\t%s took %10.7f seconds to run, on average' % (fn_name,\n consumedTime / list_size))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n return time.time() - start_time, found\n\n\ndef ordered_sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n elif search_elt > elt:\n break\n return time.time() - start_time, found\n\n\ndef binary_search_iterative(my_list, search_elt):\n first = 0\n last = len(my_list) - 1\n found = False\n start_time = time.time()\n while first <= last and not found:\n midpoint = (first + last) // 2\n if my_list[midpoint] == search_elt:\n found = True\n elif search_elt < my_list[midpoint]:\n last = midpoint - 1\n else:\n first = midpoint + 1\n return time.time() - start_time, found\n\n\ndef binary_search_rec(a_list, item):\n if len(a_list) == 0:\n return False\n else:\n midpoint = len(a_list) // 2\n if a_list[midpoint] == item:\n return True\n elif item < a_list[midpoint]:\n return binary_search_rec(a_list[:midpoint], item)\n else:\n return binary_search_rec(a_list[midpoint + 1:], item)\n\n\ndef binary_search_recursive(my_list, search_elt, start_time=time.time):\n start_time = time.time()\n return time.time() - start_time, binary_search_rec(my_list, search_elt)\n\n\n<mask token>\n\n\ndef functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):\n fn_name, fn_function, fn_list_indx = fn\n timing, _ = fn_function(rnd_list[fn_list_indx], -1)\n if amt_of_nb not in timeAggregator:\n timeAggregator[amt_of_nb] = {}\n if fn_name not in timeAggregator[amt_of_nb]:\n timeAggregator[amt_of_nb][fn_name] = 0\n timeAggregator[amt_of_nb][fn_name] += timing\n\n\ndef printTimerAggregator(timeAggregator, list_size):\n for amount_of_number, fn_type in timeAggregator.iteritems():\n print('For %s size of list:' % amount_of_number)\n for fn_name, consumedTime in fn_type.iteritems():\n print('\\t%s took %10.7f seconds to run, on average' % (fn_name,\n consumedTime / list_size))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n return time.time() - start_time, found\n\n\ndef ordered_sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n elif search_elt > elt:\n break\n return time.time() - start_time, found\n\n\ndef binary_search_iterative(my_list, search_elt):\n first = 0\n last = len(my_list) - 1\n found = False\n start_time = time.time()\n while first <= last and not found:\n midpoint = (first + last) // 2\n if my_list[midpoint] == search_elt:\n found = True\n elif search_elt < my_list[midpoint]:\n last = midpoint - 1\n else:\n first = midpoint + 1\n return time.time() - start_time, found\n\n\ndef binary_search_rec(a_list, item):\n if len(a_list) == 0:\n return False\n else:\n midpoint = len(a_list) // 2\n if a_list[midpoint] == item:\n return True\n elif item < a_list[midpoint]:\n return binary_search_rec(a_list[:midpoint], item)\n else:\n return binary_search_rec(a_list[midpoint + 1:], item)\n\n\ndef binary_search_recursive(my_list, search_elt, start_time=time.time):\n start_time = time.time()\n return time.time() - start_time, binary_search_rec(my_list, search_elt)\n\n\ndef generate_random_nb_my_list(nb, amount_my_list, maxNumber=sys.maxint):\n return [[random.randint(0, maxNumber) for _ in range(nb)] for _ in\n range(amount_my_list)]\n\n\ndef functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):\n fn_name, fn_function, fn_list_indx = fn\n timing, _ = fn_function(rnd_list[fn_list_indx], -1)\n if amt_of_nb not in timeAggregator:\n timeAggregator[amt_of_nb] = {}\n if fn_name not in timeAggregator[amt_of_nb]:\n timeAggregator[amt_of_nb][fn_name] = 0\n timeAggregator[amt_of_nb][fn_name] += timing\n\n\ndef printTimerAggregator(timeAggregator, list_size):\n for amount_of_number, fn_type in timeAggregator.iteritems():\n print('For %s size of list:' % amount_of_number)\n for fn_name, consumedTime in fn_type.iteritems():\n print('\\t%s took %10.7f seconds to run, on average' % (fn_name,\n consumedTime / list_size))\n\n\nif __name__ == '__main__':\n timeAggregator = {}\n amount_of_numbers = [500, 1000, 10000]\n function_list = [('Sequential Search', sequential_search, 0), (\n 'Ordered Sequential Search', ordered_sequential_search, 1), (\n 'Binary Search Iterative', binary_search_iterative, 1), (\n 'Binary Search Recursive', binary_search_recursive, 1)]\n list_size = 100\n for amount_of_number in amount_of_numbers:\n my_randoms = generate_random_nb_my_list(amount_of_number, list_size)\n for unsorted_list in my_randoms:\n sorted_list = unsorted_list[:]\n sorted_list.sort()\n for fn in function_list:\n functionTimerAggregator(timeAggregator, fn,\n amount_of_number, (unsorted_list, sorted_list))\n printTimerAggregator(timeAggregator, list_size)\n",
"step-4": "import time\nimport random\nimport sys\n\n\ndef sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n return time.time() - start_time, found\n\n\ndef ordered_sequential_search(my_list, search_elt):\n found = False\n start_time = time.time()\n for elt in my_list:\n if search_elt == elt:\n found = True\n break\n elif search_elt > elt:\n break\n return time.time() - start_time, found\n\n\ndef binary_search_iterative(my_list, search_elt):\n first = 0\n last = len(my_list) - 1\n found = False\n start_time = time.time()\n while first <= last and not found:\n midpoint = (first + last) // 2\n if my_list[midpoint] == search_elt:\n found = True\n elif search_elt < my_list[midpoint]:\n last = midpoint - 1\n else:\n first = midpoint + 1\n return time.time() - start_time, found\n\n\ndef binary_search_rec(a_list, item):\n if len(a_list) == 0:\n return False\n else:\n midpoint = len(a_list) // 2\n if a_list[midpoint] == item:\n return True\n elif item < a_list[midpoint]:\n return binary_search_rec(a_list[:midpoint], item)\n else:\n return binary_search_rec(a_list[midpoint + 1:], item)\n\n\ndef binary_search_recursive(my_list, search_elt, start_time=time.time):\n start_time = time.time()\n return time.time() - start_time, binary_search_rec(my_list, search_elt)\n\n\ndef generate_random_nb_my_list(nb, amount_my_list, maxNumber=sys.maxint):\n return [[random.randint(0, maxNumber) for _ in range(nb)] for _ in\n range(amount_my_list)]\n\n\ndef functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):\n fn_name, fn_function, fn_list_indx = fn\n timing, _ = fn_function(rnd_list[fn_list_indx], -1)\n if amt_of_nb not in timeAggregator:\n timeAggregator[amt_of_nb] = {}\n if fn_name not in timeAggregator[amt_of_nb]:\n timeAggregator[amt_of_nb][fn_name] = 0\n timeAggregator[amt_of_nb][fn_name] += timing\n\n\ndef printTimerAggregator(timeAggregator, list_size):\n for amount_of_number, fn_type in timeAggregator.iteritems():\n print('For %s size of list:' % amount_of_number)\n for fn_name, consumedTime in fn_type.iteritems():\n print('\\t%s took %10.7f seconds to run, on average' % (fn_name,\n consumedTime / list_size))\n\n\nif __name__ == '__main__':\n timeAggregator = {}\n amount_of_numbers = [500, 1000, 10000]\n function_list = [('Sequential Search', sequential_search, 0), (\n 'Ordered Sequential Search', ordered_sequential_search, 1), (\n 'Binary Search Iterative', binary_search_iterative, 1), (\n 'Binary Search Recursive', binary_search_recursive, 1)]\n list_size = 100\n for amount_of_number in amount_of_numbers:\n my_randoms = generate_random_nb_my_list(amount_of_number, list_size)\n for unsorted_list in my_randoms:\n sorted_list = unsorted_list[:]\n sorted_list.sort()\n for fn in function_list:\n functionTimerAggregator(timeAggregator, fn,\n amount_of_number, (unsorted_list, sorted_list))\n printTimerAggregator(timeAggregator, list_size)\n",
"step-5": "#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\nimport time\r\nimport random\r\nimport sys\r\n\r\ndef sequential_search(my_list, search_elt):\r\n\tfound = False\r\n\tstart_time = time.time()\r\n\tfor elt in my_list:\r\n\t\tif search_elt == elt:\r\n\t\t\tfound = True\r\n\t\t\tbreak\r\n\treturn (time.time() - start_time), found\r\n\r\ndef ordered_sequential_search(my_list, search_elt):\r\n\tfound = False\r\n\tstart_time = time.time()\r\n\tfor elt in my_list:\r\n\t\tif search_elt == elt:\r\n\t\t\tfound = True\r\n\t\t\tbreak\r\n\t\telif search_elt > elt:\r\n\t\t\tbreak\r\n\treturn (time.time() - start_time), found\r\n\r\ndef binary_search_iterative(my_list, search_elt):\r\n\tfirst = 0\r\n\tlast = len(my_list) - 1\r\n\tfound = False\r\n\r\n\tstart_time = time.time()\r\n\twhile first <= last and not found:\r\n\t\tmidpoint = (first + last) // 2\r\n\t\tif my_list[midpoint] == search_elt:\r\n\t\t\tfound = True\r\n\t\telif search_elt < my_list[midpoint]:\r\n\t\t\tlast = midpoint - 1\r\n\t\telse:\r\n\t\t\tfirst = midpoint + 1\r\n\r\n\treturn (time.time() - start_time), found\r\n\r\ndef binary_search_rec(a_list, item):\r\n\tif len(a_list) == 0:\r\n\t\treturn False\r\n\telse:\r\n\t\tmidpoint = len(a_list) // 2\r\n\t\tif a_list[midpoint] == item:\r\n\t\t\treturn True\r\n\t\telif item < a_list[midpoint]:\r\n\t\t\treturn binary_search_rec(a_list[:midpoint], item)\r\n\t\telse:\r\n\t\t\treturn binary_search_rec(a_list[midpoint + 1:], item)\r\n\r\ndef binary_search_recursive(my_list, search_elt, start_time = time.time):\r\n\tstart_time = time.time()\r\n\treturn (time.time() - start_time), binary_search_rec(my_list, search_elt)\r\n\r\ndef generate_random_nb_my_list(nb, amount_my_list, maxNumber = sys.maxint):\r\n\treturn [\r\n\t\t[random.randint(0, maxNumber) for _ in range (nb)]\r\n\t\t\tfor _ in range (amount_my_list)\r\n\t]\r\n\r\ndef functionTimerAggregator(timeAggregator, fn, amt_of_nb, rnd_list):\r\n\t(fn_name, fn_function, fn_list_indx) = fn\r\n\t(timing, _) = fn_function(rnd_list[fn_list_indx], -1)\r\n\r\n\tif amt_of_nb not in timeAggregator:\r\n\t\ttimeAggregator[amt_of_nb] = {}\r\n\tif fn_name not in timeAggregator[amt_of_nb]:\r\n\t\ttimeAggregator[amt_of_nb][fn_name] = 0\t\r\n\ttimeAggregator[amt_of_nb][fn_name] += timing\r\n\r\ndef printTimerAggregator(timeAggregator, list_size):\r\n\tfor amount_of_number, fn_type in timeAggregator.iteritems():\r\n\t\tprint('For %s size of list:' % amount_of_number)\r\n\t\tfor fn_name, consumedTime in fn_type.iteritems():\r\n\t\t\tprint('\\t%s took %10.7f seconds to run, on average'\r\n\t\t\t\t% (fn_name, consumedTime / list_size))\r\n\r\nif __name__ == '__main__':\r\n\ttimeAggregator = {}\r\n\tamount_of_numbers = [500, 1000, 10000]\r\n\tfunction_list = [\r\n\t\t('Sequential Search', sequential_search, 0),\r\n\t\t('Ordered Sequential Search', ordered_sequential_search, 1),\r\n\t\t('Binary Search Iterative', binary_search_iterative, 1),\r\n\t\t('Binary Search Recursive', binary_search_recursive, 1),\r\n\t]\r\n\tlist_size = 100\r\n\r\n\tfor amount_of_number in amount_of_numbers:\r\n\t\tmy_randoms = generate_random_nb_my_list(amount_of_number, list_size)\r\n\t\tfor unsorted_list in my_randoms:\r\n\t\t\tsorted_list = unsorted_list[:]\r\n\t\t\tsorted_list.sort()\r\n\r\n\t\t\tfor fn in function_list:\r\n\t\t\t\tfunctionTimerAggregator(\r\n\t\t\t\t\ttimeAggregator, fn, amount_of_number,\r\n\t\t\t\t\t(unsorted_list, sorted_list))\r\n\r\n\tprintTimerAggregator(timeAggregator, list_size)\r\n\t",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.