code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
from package.pack import *
add(2, 2)
sub(2, 3)
|
normal
|
{
"blob_id": "9583a97ae4b1fbf5ecdf33d848b13bf0b28d2eb4",
"index": 2452,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadd(2, 2)\nsub(2, 3)\n",
"step-3": "from package.pack import *\nadd(2, 2)\nsub(2, 3)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
Given a sentence as `txt`, return `True` if any two adjacent words have this
property: One word ends with a vowel, while the word immediately after begins
with a vowel (a e i o u).
### Examples
vowel_links("a very large appliance") ➞ True
vowel_links("go to edabit") ➞ True
vowel_links("an open fire") ➞ False
vowel_links("a sudden applause") ➞ False
### Notes
You can expect sentences in only lowercase, with no punctuation.
"""
def vowel_links(txt):
import re
lst=txt.split(' ')
for i in range(len(lst)-1):
if re.search("[aeiou]",lst[i][-1])and re.search("[aeiou]",lst[i+1][0]):
return True
return False
|
normal
|
{
"blob_id": "eefd94e7c04896cd6265bbacd624bf7e670be445",
"index": 4347,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef vowel_links(txt):\n import re\n lst = txt.split(' ')\n for i in range(len(lst) - 1):\n if re.search('[aeiou]', lst[i][-1]) and re.search('[aeiou]', lst[i +\n 1][0]):\n return True\n return False\n",
"step-3": "\"\"\"\r\n\n\nGiven a sentence as `txt`, return `True` if any two adjacent words have this\nproperty: One word ends with a vowel, while the word immediately after begins\nwith a vowel (a e i o u).\n\n### Examples\n\n vowel_links(\"a very large appliance\") ➞ True\n \n vowel_links(\"go to edabit\") ➞ True\n \n vowel_links(\"an open fire\") ➞ False\n \n vowel_links(\"a sudden applause\") ➞ False\n\n### Notes\n\nYou can expect sentences in only lowercase, with no punctuation.\n\n\"\"\"\r\n\ndef vowel_links(txt):\n import re\n lst=txt.split(' ')\n for i in range(len(lst)-1):\n if re.search(\"[aeiou]\",lst[i][-1])and re.search(\"[aeiou]\",lst[i+1][0]):\n return True\n return False\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# from __future__ import annotations
from typing import List,Union,Tuple,Dict,Set
import sys
input = sys.stdin.readline
# from collections import defaultdict,deque
# from itertools import permutations,combinations
# from bisect import bisect_left,bisect_right
import heapq
# sys.setrecursionlimit(10**5)
# class UnionFind():
# def __init__(self, N:int):
# self.par = [-1]*N
# self.size = [1]*N
# def root(self, x:int):
# if self.par[x] == -1: return x
# self.par[x] = self.root(self.par[x])
# self.size[x] = 1
# return self.par[x]
# def unite(self, x:int, y:int):
# rx,ry = self.root(x), self.root(y)
# if rx == ry: return False
# if self.size[rx] >= self.size[ry]:
# self.par[ry] = rx
# self.size[rx] += self.size[ry]
# else:
# self.par[rx] = ry
# self.size[ry] += self.size[rx]
# return True
# #! クラスカル法
# def main():
# N,M,K = map(int, input().split())
# edges = []
# for _ in range(M):
# a,b,c = map(int, input().split())
# a -= 1
# b -= 1
# edges.append((c,a,b))
# edges.sort()
# unionfind = UnionFind(N)
# ans = 0
# used = []
# for c,a,b in edges:
# if unionfind.unite(a,b):
# ans += c
# used.append(c)
# for i in range(1,K):
# ans -= used[-i]
# print(ans)
#! プリム法
def main():
N,M,K = map(int, input().split())
G = [[] for _ in range(N)]
for _ in range(M):
a,b,c = map(int, input().split())
a -= 1
b -= 1
G[a].append((c,b))
G[b].append((c,a))
ans = 0
used = []
marked = [False]*N
Q = [] #* (通行料金,都市)
heapq.heappush(Q,(0,0))
while len(Q)>0:
fee,curr = heapq.heappop(Q)
if marked[curr]: continue
marked[curr] = True
ans += fee
used.append(fee)
for nxt_fee,nxt in G[curr]:
if marked[nxt]: continue
heapq.heappush(Q, (nxt_fee,nxt))
used.sort()
for i in range(1,K):
ans -= used[-i]
print(ans)
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "13b2e05f12c6d0cd91e89f01e7eef610b1e99856",
"index": 9158,
"step-1": "<mask token>\n\n\ndef main():\n N, M, K = map(int, input().split())\n G = [[] for _ in range(N)]\n for _ in range(M):\n a, b, c = map(int, input().split())\n a -= 1\n b -= 1\n G[a].append((c, b))\n G[b].append((c, a))\n ans = 0\n used = []\n marked = [False] * N\n Q = []\n heapq.heappush(Q, (0, 0))\n while len(Q) > 0:\n fee, curr = heapq.heappop(Q)\n if marked[curr]:\n continue\n marked[curr] = True\n ans += fee\n used.append(fee)\n for nxt_fee, nxt in G[curr]:\n if marked[nxt]:\n continue\n heapq.heappush(Q, (nxt_fee, nxt))\n used.sort()\n for i in range(1, K):\n ans -= used[-i]\n print(ans)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n N, M, K = map(int, input().split())\n G = [[] for _ in range(N)]\n for _ in range(M):\n a, b, c = map(int, input().split())\n a -= 1\n b -= 1\n G[a].append((c, b))\n G[b].append((c, a))\n ans = 0\n used = []\n marked = [False] * N\n Q = []\n heapq.heappush(Q, (0, 0))\n while len(Q) > 0:\n fee, curr = heapq.heappop(Q)\n if marked[curr]:\n continue\n marked[curr] = True\n ans += fee\n used.append(fee)\n for nxt_fee, nxt in G[curr]:\n if marked[nxt]:\n continue\n heapq.heappush(Q, (nxt_fee, nxt))\n used.sort()\n for i in range(1, K):\n ans -= used[-i]\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\ninput = sys.stdin.readline\n<mask token>\n\n\ndef main():\n N, M, K = map(int, input().split())\n G = [[] for _ in range(N)]\n for _ in range(M):\n a, b, c = map(int, input().split())\n a -= 1\n b -= 1\n G[a].append((c, b))\n G[b].append((c, a))\n ans = 0\n used = []\n marked = [False] * N\n Q = []\n heapq.heappush(Q, (0, 0))\n while len(Q) > 0:\n fee, curr = heapq.heappop(Q)\n if marked[curr]:\n continue\n marked[curr] = True\n ans += fee\n used.append(fee)\n for nxt_fee, nxt in G[curr]:\n if marked[nxt]:\n continue\n heapq.heappush(Q, (nxt_fee, nxt))\n used.sort()\n for i in range(1, K):\n ans -= used[-i]\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from typing import List, Union, Tuple, Dict, Set\nimport sys\ninput = sys.stdin.readline\nimport heapq\n\n\ndef main():\n N, M, K = map(int, input().split())\n G = [[] for _ in range(N)]\n for _ in range(M):\n a, b, c = map(int, input().split())\n a -= 1\n b -= 1\n G[a].append((c, b))\n G[b].append((c, a))\n ans = 0\n used = []\n marked = [False] * N\n Q = []\n heapq.heappush(Q, (0, 0))\n while len(Q) > 0:\n fee, curr = heapq.heappop(Q)\n if marked[curr]:\n continue\n marked[curr] = True\n ans += fee\n used.append(fee)\n for nxt_fee, nxt in G[curr]:\n if marked[nxt]:\n continue\n heapq.heappush(Q, (nxt_fee, nxt))\n used.sort()\n for i in range(1, K):\n ans -= used[-i]\n print(ans)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "# from __future__ import annotations\nfrom typing import List,Union,Tuple,Dict,Set\nimport sys\ninput = sys.stdin.readline\n# from collections import defaultdict,deque\n# from itertools import permutations,combinations\n# from bisect import bisect_left,bisect_right\nimport heapq\n# sys.setrecursionlimit(10**5)\n\n# class UnionFind():\n\n# def __init__(self, N:int):\n# self.par = [-1]*N\n# self.size = [1]*N\n\n# def root(self, x:int):\n# if self.par[x] == -1: return x\n# self.par[x] = self.root(self.par[x])\n# self.size[x] = 1\n# return self.par[x]\n\n# def unite(self, x:int, y:int):\n# rx,ry = self.root(x), self.root(y)\n# if rx == ry: return False\n# if self.size[rx] >= self.size[ry]:\n# self.par[ry] = rx\n# self.size[rx] += self.size[ry]\n# else:\n# self.par[rx] = ry\n# self.size[ry] += self.size[rx]\n# return True\n\n# #! クラスカル法\n# def main():\n# N,M,K = map(int, input().split())\n# edges = []\n# for _ in range(M):\n# a,b,c = map(int, input().split())\n# a -= 1\n# b -= 1\n# edges.append((c,a,b))\n# edges.sort()\n# unionfind = UnionFind(N)\n# ans = 0\n# used = []\n# for c,a,b in edges:\n# if unionfind.unite(a,b):\n# ans += c\n# used.append(c)\n\n# for i in range(1,K):\n# ans -= used[-i]\n\n# print(ans)\n\n#! プリム法\ndef main():\n N,M,K = map(int, input().split())\n G = [[] for _ in range(N)]\n for _ in range(M):\n a,b,c = map(int, input().split())\n a -= 1\n b -= 1\n G[a].append((c,b))\n G[b].append((c,a))\n\n ans = 0\n used = []\n\n marked = [False]*N\n Q = [] #* (通行料金,都市)\n heapq.heappush(Q,(0,0))\n while len(Q)>0:\n fee,curr = heapq.heappop(Q)\n if marked[curr]: continue\n marked[curr] = True\n ans += fee\n used.append(fee)\n for nxt_fee,nxt in G[curr]:\n if marked[nxt]: continue\n heapq.heappush(Q, (nxt_fee,nxt))\n\n used.sort()\n for i in range(1,K):\n ans -= used[-i]\n\n print(ans)\n\nif __name__ == '__main__':\n main()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
# Python program to count number of digits in a number.
# print len(str(input('Enter No.: ')))
num = input("Enter no.: ")
i = 1
while num / 10:
num = num / 10
i += 1
if num < 10:
break
print i
|
normal
|
{
"blob_id": "37748e3dd17f2bdf05bb28b4dfded12de97e37e4",
"index": 9619,
"step-1": "# Python program to count number of digits in a number.\n\n# print len(str(input('Enter No.: ')))\n\nnum = input(\"Enter no.: \")\n\ni = 1\nwhile num / 10:\n num = num / 10\n i += 1\n if num < 10:\n break\nprint i\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""Seed file to make sample data for pets db."""
from models import db, User, Feedback
from app import app
# Create all tables
db.drop_all()
db.create_all()
# If table isn't empty, empty it
User.query.delete()
Feedback.query.delete()
# Add users and posts
john = User(username="John",password="123",email="24",first_name="12a",last_name="123")
# Add new objects to session, so they'll persist
db.session.add(john)
#have to add users first to not violate foreign key constraints
db.session.commit()
feed = Feedback(title="test",content="alsdkjf",username="John")
db.session.add(feed)
# Commit--otherwise, this never gets saved!
db.session.commit()
|
normal
|
{
"blob_id": "d520f9d681125937fbd9dff316bdc5f922f25ff3",
"index": 8050,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.drop_all()\ndb.create_all()\nUser.query.delete()\nFeedback.query.delete()\n<mask token>\ndb.session.add(john)\ndb.session.commit()\n<mask token>\ndb.session.add(feed)\ndb.session.commit()\n",
"step-3": "<mask token>\ndb.drop_all()\ndb.create_all()\nUser.query.delete()\nFeedback.query.delete()\njohn = User(username='John', password='123', email='24', first_name='12a',\n last_name='123')\ndb.session.add(john)\ndb.session.commit()\nfeed = Feedback(title='test', content='alsdkjf', username='John')\ndb.session.add(feed)\ndb.session.commit()\n",
"step-4": "<mask token>\nfrom models import db, User, Feedback\nfrom app import app\ndb.drop_all()\ndb.create_all()\nUser.query.delete()\nFeedback.query.delete()\njohn = User(username='John', password='123', email='24', first_name='12a',\n last_name='123')\ndb.session.add(john)\ndb.session.commit()\nfeed = Feedback(title='test', content='alsdkjf', username='John')\ndb.session.add(feed)\ndb.session.commit()\n",
"step-5": "\"\"\"Seed file to make sample data for pets db.\"\"\"\n\nfrom models import db, User, Feedback\nfrom app import app\n\n# Create all tables\ndb.drop_all()\ndb.create_all()\n\n# If table isn't empty, empty it\nUser.query.delete()\nFeedback.query.delete()\n\n\n# Add users and posts\njohn = User(username=\"John\",password=\"123\",email=\"24\",first_name=\"12a\",last_name=\"123\")\n\n# Add new objects to session, so they'll persist\ndb.session.add(john)\n\n\n#have to add users first to not violate foreign key constraints\ndb.session.commit()\n\nfeed = Feedback(title=\"test\",content=\"alsdkjf\",username=\"John\")\n\ndb.session.add(feed)\n\n\n# Commit--otherwise, this never gets saved!\ndb.session.commit()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pytest
from components import models
pytestmark = pytest.mark.django_db
def test_app_models():
assert models.ComponentsApp.allowed_subpage_models() == [
models.ComponentsApp,
models.BannerComponent,
]
def test_app_required_translatable_fields():
assert models.ComponentsApp.get_required_translatable_fields() == []
@pytest.mark.django_db
def test_set_slug(en_locale):
instance = models.ComponentsApp.objects.create(
title_en_gb='the app',
depth=2,
path='/thing',
)
assert instance.slug == models.ComponentsApp.slug_identity
|
normal
|
{
"blob_id": "b1622aa65422fcb69a16ad48a26fd9ed05b10382",
"index": 8882,
"step-1": "<mask token>\n\n\ndef test_app_models():\n assert models.ComponentsApp.allowed_subpage_models() == [models.\n ComponentsApp, models.BannerComponent]\n\n\n<mask token>\n\n\[email protected]_db\ndef test_set_slug(en_locale):\n instance = models.ComponentsApp.objects.create(title_en_gb='the app',\n depth=2, path='/thing')\n assert instance.slug == models.ComponentsApp.slug_identity\n",
"step-2": "<mask token>\n\n\ndef test_app_models():\n assert models.ComponentsApp.allowed_subpage_models() == [models.\n ComponentsApp, models.BannerComponent]\n\n\ndef test_app_required_translatable_fields():\n assert models.ComponentsApp.get_required_translatable_fields() == []\n\n\[email protected]_db\ndef test_set_slug(en_locale):\n instance = models.ComponentsApp.objects.create(title_en_gb='the app',\n depth=2, path='/thing')\n assert instance.slug == models.ComponentsApp.slug_identity\n",
"step-3": "<mask token>\npytestmark = pytest.mark.django_db\n\n\ndef test_app_models():\n assert models.ComponentsApp.allowed_subpage_models() == [models.\n ComponentsApp, models.BannerComponent]\n\n\ndef test_app_required_translatable_fields():\n assert models.ComponentsApp.get_required_translatable_fields() == []\n\n\[email protected]_db\ndef test_set_slug(en_locale):\n instance = models.ComponentsApp.objects.create(title_en_gb='the app',\n depth=2, path='/thing')\n assert instance.slug == models.ComponentsApp.slug_identity\n",
"step-4": "import pytest\nfrom components import models\npytestmark = pytest.mark.django_db\n\n\ndef test_app_models():\n assert models.ComponentsApp.allowed_subpage_models() == [models.\n ComponentsApp, models.BannerComponent]\n\n\ndef test_app_required_translatable_fields():\n assert models.ComponentsApp.get_required_translatable_fields() == []\n\n\[email protected]_db\ndef test_set_slug(en_locale):\n instance = models.ComponentsApp.objects.create(title_en_gb='the app',\n depth=2, path='/thing')\n assert instance.slug == models.ComponentsApp.slug_identity\n",
"step-5": "import pytest\n\nfrom components import models\n\npytestmark = pytest.mark.django_db\n\n\ndef test_app_models():\n assert models.ComponentsApp.allowed_subpage_models() == [\n models.ComponentsApp,\n models.BannerComponent,\n ]\n\n\ndef test_app_required_translatable_fields():\n assert models.ComponentsApp.get_required_translatable_fields() == []\n\n\[email protected]_db\ndef test_set_slug(en_locale):\n instance = models.ComponentsApp.objects.create(\n title_en_gb='the app',\n depth=2,\n path='/thing',\n )\n\n assert instance.slug == models.ComponentsApp.slug_identity\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Author: xurongzhong#126.com 技术支持qq群:6089740
# CreateDate: 2018-3-27
# pillow_rotate.py
import glob
import os
from PIL import Image
def rotate(files, dst, value=90):
for file_ in files:
img = Image.open(file_)
img = img.rotate(value)
name = "{}{}{}".format(dst, os.sep, os.path.basename(file_))
img.save(name)
src = r'/home/andrew/code/tmp_photos'
dst = r'/home/andrew/code/tmp_photos2'
common = glob.glob('{}{}*.*'.format(src, os.sep))
rotate(common, dst)
|
normal
|
{
"blob_id": "cd104eec21be8a59e8fb3bd8ab061dd357fc126a",
"index": 667,
"step-1": "<mask token>\n\n\ndef rotate(files, dst, value=90):\n for file_ in files:\n img = Image.open(file_)\n img = img.rotate(value)\n name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))\n img.save(name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef rotate(files, dst, value=90):\n for file_ in files:\n img = Image.open(file_)\n img = img.rotate(value)\n name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))\n img.save(name)\n\n\n<mask token>\nrotate(common, dst)\n",
"step-3": "<mask token>\n\n\ndef rotate(files, dst, value=90):\n for file_ in files:\n img = Image.open(file_)\n img = img.rotate(value)\n name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))\n img.save(name)\n\n\nsrc = '/home/andrew/code/tmp_photos'\ndst = '/home/andrew/code/tmp_photos2'\ncommon = glob.glob('{}{}*.*'.format(src, os.sep))\nrotate(common, dst)\n",
"step-4": "import glob\nimport os\nfrom PIL import Image\n\n\ndef rotate(files, dst, value=90):\n for file_ in files:\n img = Image.open(file_)\n img = img.rotate(value)\n name = '{}{}{}'.format(dst, os.sep, os.path.basename(file_))\n img.save(name)\n\n\nsrc = '/home/andrew/code/tmp_photos'\ndst = '/home/andrew/code/tmp_photos2'\ncommon = glob.glob('{}{}*.*'.format(src, os.sep))\nrotate(common, dst)\n",
"step-5": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n# Author: xurongzhong#126.com 技术支持qq群:6089740\n# CreateDate: 2018-3-27\n# pillow_rotate.py\nimport glob\nimport os \nfrom PIL import Image\n\ndef rotate(files, dst, value=90):\n for file_ in files:\n img = Image.open(file_)\n img = img.rotate(value)\n name = \"{}{}{}\".format(dst, os.sep, os.path.basename(file_))\n img.save(name)\n\nsrc = r'/home/andrew/code/tmp_photos'\ndst = r'/home/andrew/code/tmp_photos2'\n\ncommon = glob.glob('{}{}*.*'.format(src, os.sep)) \nrotate(common, dst)\n\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import openpyxl as opx
import pyperclip
from openpyxl import Workbook
from openpyxl.styles import PatternFill
wb = Workbook(write_only=True)
ws = wb.create_sheet()
def parseSeq(lines,seqName):
'''splits each column'''
data = []
for line in lines: data.append(line.split(' '))
'''removes any spaces'''
for i in range(len(data)):
for j in range(data[i].count('')): data[i].remove('')
'''deletes the numbers at beginning of column'''
for i in range(len(data)): del data[i][0]
'''creates a list of lists from dna sequence'''
seqRows = []
for i in range(len(data)):
seqRow = []
seqRow.append(seqName)
for j in range(len(data[i])):
for k in range(len(data[i][j])):
seqRow.append(data[i][j][k])
seqRows.append(seqRow)
return seqRows
seqs = int(input('How many DNA sequences do you want to compare? '))
saveFile = input('What do you want to name the spreadsheet? ')
'''masterList contains each sequence, and each sequence is
broken into rows'''
masterList = []
'''reads files so they can be parsed'''
for i in range(seqs):
print('What is the name of DNA sequence',i+1,end='? ')
name = input('')
file = open(name+'.txt')
info = file.readlines()
masterList.append(parseSeq(info,name))
file.close()
'''sequence that contains the most rows is used for following loop'''
elems = []
for i in range(len(masterList)): elems.append(len(masterList[i]))
bigElem = elems.index(max(elems))
'''adds dna sequence to excel spreadsheet, 60 columns, x rows'''
for row in range(len(masterList[bigElem])):
for seq in range(len(masterList)):
try:
ws.append(masterList[seq][row])
except IndexError:
ws.append([])
ws.append([])
wb.save(saveFile+'.xlsx')
'''color match'''
match = input('Do you want to color match your sequence (y/n)? ')
if match == 'y':
wb = opx.load_workbook(saveFile+'.xlsx')
sheet = wb['Sheet']
ws = wb.active
red = 'FFFF0000'
green = '0000FF00'
blue = 'FF0000FF'
greenFill = PatternFill(start_color=green,
end_color=green,
fill_type='solid')
redFill = PatternFill(start_color=red,
end_color=red,
fill_type='solid')
blueFill = PatternFill(start_color=blue,
end_color=blue,
fill_type='solid')
ws['BK1'] = 'Matched'
ws['BK1'].fill = greenFill
ws['BK2'] = 'Unmatched'
ws['BK2'].fill = blueFill
lastRow = sheet.max_row + 1
end = int(lastRow / (seqs+1))
for section in range(end):
startSec = (seqs+1)*section + 1
endSec = (seqs+1)*section + (seqs+1)
for col in range(2,62):
bp = []
for row in range(startSec,endSec):
cell = sheet.cell(row=row,column=col).value
bp.append(cell)
if bp.count(bp[0]) == seqs:
for row in range(startSec,endSec):
sheet.cell(row=row,column=col).fill = greenFill
else:
for row in range(startSec,endSec):
sheet.cell(row=row,column=col).fill = blueFill
wb.save(saveFile+'.xlsx')
|
normal
|
{
"blob_id": "19e387cb731dad21e5ee50b0a9812df984c13f3b",
"index": 7890,
"step-1": "<mask token>\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\n<mask token>\nfor i in range(seqs):\n print('What is the name of DNA sequence', i + 1, end='? ')\n name = input('')\n file = open(name + '.txt')\n info = file.readlines()\n masterList.append(parseSeq(info, name))\n file.close()\n<mask token>\nfor i in range(len(masterList)):\n elems.append(len(masterList[i]))\n<mask token>\nfor row in range(len(masterList[bigElem])):\n for seq in range(len(masterList)):\n try:\n ws.append(masterList[seq][row])\n except IndexError:\n ws.append([])\n ws.append([])\nwb.save(saveFile + '.xlsx')\n<mask token>\nif match == 'y':\n wb = opx.load_workbook(saveFile + '.xlsx')\n sheet = wb['Sheet']\n ws = wb.active\n red = 'FFFF0000'\n green = '0000FF00'\n blue = 'FF0000FF'\n greenFill = PatternFill(start_color=green, end_color=green, fill_type=\n 'solid')\n redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')\n blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')\n ws['BK1'] = 'Matched'\n ws['BK1'].fill = greenFill\n ws['BK2'] = 'Unmatched'\n ws['BK2'].fill = blueFill\n lastRow = sheet.max_row + 1\n end = int(lastRow / (seqs + 1))\n for section in range(end):\n startSec = (seqs + 1) * section + 1\n endSec = (seqs + 1) * section + (seqs + 1)\n for col in range(2, 62):\n bp = []\n for row in range(startSec, endSec):\n cell = sheet.cell(row=row, column=col).value\n bp.append(cell)\n if bp.count(bp[0]) == seqs:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = greenFill\n else:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = blueFill\n wb.save(saveFile + '.xlsx')\n",
"step-3": "<mask token>\nwb = Workbook(write_only=True)\nws = wb.create_sheet()\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\nseqs = int(input('How many DNA sequences do you want to compare? '))\nsaveFile = input('What do you want to name the spreadsheet? ')\n<mask token>\nmasterList = []\n<mask token>\nfor i in range(seqs):\n print('What is the name of DNA sequence', i + 1, end='? ')\n name = input('')\n file = open(name + '.txt')\n info = file.readlines()\n masterList.append(parseSeq(info, name))\n file.close()\n<mask token>\nelems = []\nfor i in range(len(masterList)):\n elems.append(len(masterList[i]))\nbigElem = elems.index(max(elems))\n<mask token>\nfor row in range(len(masterList[bigElem])):\n for seq in range(len(masterList)):\n try:\n ws.append(masterList[seq][row])\n except IndexError:\n ws.append([])\n ws.append([])\nwb.save(saveFile + '.xlsx')\n<mask token>\nmatch = input('Do you want to color match your sequence (y/n)? ')\nif match == 'y':\n wb = opx.load_workbook(saveFile + '.xlsx')\n sheet = wb['Sheet']\n ws = wb.active\n red = 'FFFF0000'\n green = '0000FF00'\n blue = 'FF0000FF'\n greenFill = PatternFill(start_color=green, end_color=green, fill_type=\n 'solid')\n redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')\n blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')\n ws['BK1'] = 'Matched'\n ws['BK1'].fill = greenFill\n ws['BK2'] = 'Unmatched'\n ws['BK2'].fill = blueFill\n lastRow = sheet.max_row + 1\n end = int(lastRow / (seqs + 1))\n for section in range(end):\n startSec = (seqs + 1) * section + 1\n endSec = (seqs + 1) * section + (seqs + 1)\n for col in range(2, 62):\n bp = []\n for row in range(startSec, endSec):\n cell = sheet.cell(row=row, column=col).value\n bp.append(cell)\n if bp.count(bp[0]) == seqs:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = greenFill\n else:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = blueFill\n wb.save(saveFile + '.xlsx')\n",
"step-4": "import openpyxl as opx\nimport pyperclip\nfrom openpyxl import Workbook\nfrom openpyxl.styles import PatternFill\nwb = Workbook(write_only=True)\nws = wb.create_sheet()\n\n\ndef parseSeq(lines, seqName):\n \"\"\"splits each column\"\"\"\n data = []\n for line in lines:\n data.append(line.split(' '))\n \"\"\"removes any spaces\"\"\"\n for i in range(len(data)):\n for j in range(data[i].count('')):\n data[i].remove('')\n \"\"\"deletes the numbers at beginning of column\"\"\"\n for i in range(len(data)):\n del data[i][0]\n \"\"\"creates a list of lists from dna sequence\"\"\"\n seqRows = []\n for i in range(len(data)):\n seqRow = []\n seqRow.append(seqName)\n for j in range(len(data[i])):\n for k in range(len(data[i][j])):\n seqRow.append(data[i][j][k])\n seqRows.append(seqRow)\n return seqRows\n\n\nseqs = int(input('How many DNA sequences do you want to compare? '))\nsaveFile = input('What do you want to name the spreadsheet? ')\n<mask token>\nmasterList = []\n<mask token>\nfor i in range(seqs):\n print('What is the name of DNA sequence', i + 1, end='? ')\n name = input('')\n file = open(name + '.txt')\n info = file.readlines()\n masterList.append(parseSeq(info, name))\n file.close()\n<mask token>\nelems = []\nfor i in range(len(masterList)):\n elems.append(len(masterList[i]))\nbigElem = elems.index(max(elems))\n<mask token>\nfor row in range(len(masterList[bigElem])):\n for seq in range(len(masterList)):\n try:\n ws.append(masterList[seq][row])\n except IndexError:\n ws.append([])\n ws.append([])\nwb.save(saveFile + '.xlsx')\n<mask token>\nmatch = input('Do you want to color match your sequence (y/n)? ')\nif match == 'y':\n wb = opx.load_workbook(saveFile + '.xlsx')\n sheet = wb['Sheet']\n ws = wb.active\n red = 'FFFF0000'\n green = '0000FF00'\n blue = 'FF0000FF'\n greenFill = PatternFill(start_color=green, end_color=green, fill_type=\n 'solid')\n redFill = PatternFill(start_color=red, end_color=red, fill_type='solid')\n blueFill = PatternFill(start_color=blue, end_color=blue, fill_type='solid')\n ws['BK1'] = 'Matched'\n ws['BK1'].fill = greenFill\n ws['BK2'] = 'Unmatched'\n ws['BK2'].fill = blueFill\n lastRow = sheet.max_row + 1\n end = int(lastRow / (seqs + 1))\n for section in range(end):\n startSec = (seqs + 1) * section + 1\n endSec = (seqs + 1) * section + (seqs + 1)\n for col in range(2, 62):\n bp = []\n for row in range(startSec, endSec):\n cell = sheet.cell(row=row, column=col).value\n bp.append(cell)\n if bp.count(bp[0]) == seqs:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = greenFill\n else:\n for row in range(startSec, endSec):\n sheet.cell(row=row, column=col).fill = blueFill\n wb.save(saveFile + '.xlsx')\n",
"step-5": "import openpyxl as opx\r\nimport pyperclip\r\nfrom openpyxl import Workbook\r\nfrom openpyxl.styles import PatternFill\r\nwb = Workbook(write_only=True)\r\nws = wb.create_sheet()\r\n\r\n\r\ndef parseSeq(lines,seqName):\r\n \r\n '''splits each column'''\r\n data = []\r\n for line in lines: data.append(line.split(' '))\r\n '''removes any spaces'''\r\n for i in range(len(data)):\r\n for j in range(data[i].count('')): data[i].remove('')\r\n '''deletes the numbers at beginning of column'''\r\n for i in range(len(data)): del data[i][0]\r\n '''creates a list of lists from dna sequence'''\r\n seqRows = []\r\n for i in range(len(data)):\r\n seqRow = []\r\n seqRow.append(seqName)\r\n for j in range(len(data[i])):\r\n for k in range(len(data[i][j])):\r\n seqRow.append(data[i][j][k])\r\n seqRows.append(seqRow) \r\n return seqRows\r\n\r\nseqs = int(input('How many DNA sequences do you want to compare? '))\r\nsaveFile = input('What do you want to name the spreadsheet? ')\r\n\r\n'''masterList contains each sequence, and each sequence is\r\n broken into rows'''\r\nmasterList = []\r\n'''reads files so they can be parsed'''\r\nfor i in range(seqs):\r\n print('What is the name of DNA sequence',i+1,end='? ')\r\n name = input('')\r\n file = open(name+'.txt')\r\n info = file.readlines()\r\n masterList.append(parseSeq(info,name))\r\n file.close()\r\n\r\n'''sequence that contains the most rows is used for following loop'''\r\nelems = []\r\nfor i in range(len(masterList)): elems.append(len(masterList[i]))\r\nbigElem = elems.index(max(elems))\r\n \r\n'''adds dna sequence to excel spreadsheet, 60 columns, x rows'''\r\nfor row in range(len(masterList[bigElem])):\r\n for seq in range(len(masterList)):\r\n try:\r\n ws.append(masterList[seq][row])\r\n except IndexError:\r\n ws.append([])\r\n ws.append([])\r\n \r\nwb.save(saveFile+'.xlsx')\r\n\r\n'''color match'''\r\nmatch = input('Do you want to color match your sequence (y/n)? ')\r\nif match == 'y':\r\n wb = opx.load_workbook(saveFile+'.xlsx')\r\n sheet = wb['Sheet']\r\n ws = wb.active\r\n\r\n\r\n red = 'FFFF0000'\r\n green = '0000FF00'\r\n blue = 'FF0000FF'\r\n\r\n greenFill = PatternFill(start_color=green,\r\n end_color=green,\r\n fill_type='solid')\r\n redFill = PatternFill(start_color=red,\r\n end_color=red,\r\n fill_type='solid')\r\n blueFill = PatternFill(start_color=blue,\r\n end_color=blue,\r\n fill_type='solid')\r\n\r\n\r\n ws['BK1'] = 'Matched'\r\n ws['BK1'].fill = greenFill\r\n ws['BK2'] = 'Unmatched'\r\n ws['BK2'].fill = blueFill\r\n\r\n lastRow = sheet.max_row + 1\r\n end = int(lastRow / (seqs+1))\r\n\r\n for section in range(end):\r\n startSec = (seqs+1)*section + 1\r\n endSec = (seqs+1)*section + (seqs+1)\r\n for col in range(2,62):\r\n bp = []\r\n for row in range(startSec,endSec):\r\n cell = sheet.cell(row=row,column=col).value\r\n bp.append(cell)\r\n if bp.count(bp[0]) == seqs:\r\n for row in range(startSec,endSec):\r\n sheet.cell(row=row,column=col).fill = greenFill\r\n else:\r\n for row in range(startSec,endSec):\r\n sheet.cell(row=row,column=col).fill = blueFill\r\n wb.save(saveFile+'.xlsx')\r\n\r\n\r\n\r\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
import re
from blessings import Terminal
from validate_email import validate_email
import requests
import sys
_site_ = sys.argv[1]
_saida_ = sys.argv[2]
_file_ = open(_saida_, "w")
t = Terminal()
r = requests.get(_site_, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'})
conteudo = r.content
_file_ = open(_saida_, "w")
_filtro_ = re.findall(r'[\w\.-]+@[\w\.-]+', conteudo)
for line in _filtro_:
if validate_email(line, verify=True)==False:
print line, t.red("INVALID!")
else:
print line, t.green("OK!")
_file_.write(line)
_file_.write("\n")
|
normal
|
{
"blob_id": "b52269237d66ea50c453395b9536f25f1310bf2e",
"index": 287,
"step-1": "#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\nimport re\nfrom blessings import Terminal\nfrom validate_email import validate_email\nimport requests\nimport sys\n_site_ = sys.argv[1]\n_saida_ = sys.argv[2]\n_file_ = open(_saida_, \"w\")\nt = Terminal()\nr = requests.get(_site_, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'})\nconteudo = r.content\n_file_ = open(_saida_, \"w\")\n_filtro_ = re.findall(r'[\\w\\.-]+@[\\w\\.-]+', conteudo)\nfor line in _filtro_:\n if validate_email(line, verify=True)==False:\n print line, t.red(\"INVALID!\")\n else:\n print line, t.green(\"OK!\")\n _file_.write(line)\n _file_.write(\"\\n\")\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# wilfred.py
# Authors
# Stuart C. Larsen (SCL)
# Daryl W. Bennet (DWB)
# Set up three main modules (command, control, reconnaissance),
# and then enter main event loop.
#
# Command:
# Gather mission priorities and objectives, such as turn left, turn right
# goto GPS 45, 65, land, take off.
#
# Control:
# Fly the craft to complete the command objective.
#
# Reconnaissance:
# Gather information about wilfreds current position.
#
# Main Event Loop:
# Check command listing for new updates, check reconnaisannce for current
# posistion, and then control the craft to the correct zone. Main loop will
# be a very fast feedback loop.
import command
import driver
from debug import *
def mainLoop():
wilfredCommunication = command.Command()
wilfredCommunication.waitForClient()
wilfredCommand = command.Command()
while True:
if not wilfredCommunication.checkConnection():
wilfredCommunication.waitForClient()
commands = wilfredCommunication.getCommand()
for commandData in commands.split('\n'):
cmd = commandData.split(' ')[0].strip()
if cmd == "": continue
args = [arg.strip() for arg in commandData.split(' ')[1:]]
# setMotorSpeed (0-3) (0-100)
if cmd == "setMotorSpeed":
motorNum = int(args[0])
motorSpeed = int(args[1])
wilfredCommand.setMotorSpeed(motorNum, motorSpeed)
elif cmd == "playMeow":
goodMessage("wilfred: playing meow from file: ", args[0])
wilfredCommand.playMeow(args[0])
elif cmd == "getAccel":
goodMessage("wilfred: returning acceleration...")
wilfredCommunication.sendMessage("(0, 0, 0)")
else:
errorMessage("wilfred: command not recognized: ", cmd, ": ", args)
if __name__ == "__main__":
mainLoop()
|
normal
|
{
"blob_id": "a77fb90cdc6e7f9b70f9feeefc2b7f8e93a2d8c5",
"index": 9875,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef mainLoop():\n wilfredCommunication = command.Command()\n wilfredCommunication.waitForClient()\n wilfredCommand = command.Command()\n while True:\n if not wilfredCommunication.checkConnection():\n wilfredCommunication.waitForClient()\n commands = wilfredCommunication.getCommand()\n for commandData in commands.split('\\n'):\n cmd = commandData.split(' ')[0].strip()\n if cmd == '':\n continue\n args = [arg.strip() for arg in commandData.split(' ')[1:]]\n if cmd == 'setMotorSpeed':\n motorNum = int(args[0])\n motorSpeed = int(args[1])\n wilfredCommand.setMotorSpeed(motorNum, motorSpeed)\n elif cmd == 'playMeow':\n goodMessage('wilfred: playing meow from file: ', args[0])\n wilfredCommand.playMeow(args[0])\n elif cmd == 'getAccel':\n goodMessage('wilfred: returning acceleration...')\n wilfredCommunication.sendMessage('(0, 0, 0)')\n else:\n errorMessage('wilfred: command not recognized: ', cmd, ': ',\n args)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef mainLoop():\n wilfredCommunication = command.Command()\n wilfredCommunication.waitForClient()\n wilfredCommand = command.Command()\n while True:\n if not wilfredCommunication.checkConnection():\n wilfredCommunication.waitForClient()\n commands = wilfredCommunication.getCommand()\n for commandData in commands.split('\\n'):\n cmd = commandData.split(' ')[0].strip()\n if cmd == '':\n continue\n args = [arg.strip() for arg in commandData.split(' ')[1:]]\n if cmd == 'setMotorSpeed':\n motorNum = int(args[0])\n motorSpeed = int(args[1])\n wilfredCommand.setMotorSpeed(motorNum, motorSpeed)\n elif cmd == 'playMeow':\n goodMessage('wilfred: playing meow from file: ', args[0])\n wilfredCommand.playMeow(args[0])\n elif cmd == 'getAccel':\n goodMessage('wilfred: returning acceleration...')\n wilfredCommunication.sendMessage('(0, 0, 0)')\n else:\n errorMessage('wilfred: command not recognized: ', cmd, ': ',\n args)\n\n\nif __name__ == '__main__':\n mainLoop()\n",
"step-4": "import command\nimport driver\nfrom debug import *\n\n\ndef mainLoop():\n wilfredCommunication = command.Command()\n wilfredCommunication.waitForClient()\n wilfredCommand = command.Command()\n while True:\n if not wilfredCommunication.checkConnection():\n wilfredCommunication.waitForClient()\n commands = wilfredCommunication.getCommand()\n for commandData in commands.split('\\n'):\n cmd = commandData.split(' ')[0].strip()\n if cmd == '':\n continue\n args = [arg.strip() for arg in commandData.split(' ')[1:]]\n if cmd == 'setMotorSpeed':\n motorNum = int(args[0])\n motorSpeed = int(args[1])\n wilfredCommand.setMotorSpeed(motorNum, motorSpeed)\n elif cmd == 'playMeow':\n goodMessage('wilfred: playing meow from file: ', args[0])\n wilfredCommand.playMeow(args[0])\n elif cmd == 'getAccel':\n goodMessage('wilfred: returning acceleration...')\n wilfredCommunication.sendMessage('(0, 0, 0)')\n else:\n errorMessage('wilfred: command not recognized: ', cmd, ': ',\n args)\n\n\nif __name__ == '__main__':\n mainLoop()\n",
"step-5": "# wilfred.py\n# Authors\n# Stuart C. Larsen (SCL)\n# Daryl W. Bennet (DWB)\n\n# Set up three main modules (command, control, reconnaissance),\n# and then enter main event loop.\n#\n# Command:\n# Gather mission priorities and objectives, such as turn left, turn right\n# goto GPS 45, 65, land, take off.\n#\n# Control:\n# Fly the craft to complete the command objective.\n#\n# Reconnaissance:\n# Gather information about wilfreds current position.\n#\n# Main Event Loop:\n# Check command listing for new updates, check reconnaisannce for current\n# posistion, and then control the craft to the correct zone. Main loop will\n# be a very fast feedback loop.\n\nimport command\nimport driver\nfrom debug import *\n\ndef mainLoop():\n wilfredCommunication = command.Command()\n wilfredCommunication.waitForClient()\n\n wilfredCommand = command.Command()\n\n while True:\n if not wilfredCommunication.checkConnection():\n wilfredCommunication.waitForClient()\n commands = wilfredCommunication.getCommand()\n \n\n for commandData in commands.split('\\n'):\n cmd = commandData.split(' ')[0].strip()\n if cmd == \"\": continue\n args = [arg.strip() for arg in commandData.split(' ')[1:]]\n \n \n # setMotorSpeed (0-3) (0-100)\n if cmd == \"setMotorSpeed\":\n motorNum = int(args[0])\n motorSpeed = int(args[1])\n wilfredCommand.setMotorSpeed(motorNum, motorSpeed)\n elif cmd == \"playMeow\":\n goodMessage(\"wilfred: playing meow from file: \", args[0])\n wilfredCommand.playMeow(args[0])\n elif cmd == \"getAccel\":\n goodMessage(\"wilfred: returning acceleration...\")\n wilfredCommunication.sendMessage(\"(0, 0, 0)\")\n else:\n errorMessage(\"wilfred: command not recognized: \", cmd, \": \", args)\n \n\nif __name__ == \"__main__\":\n mainLoop()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'VideoAd.compress'
db.add_column(u'main_videoad', 'compress',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'VideoAd.compress'
db.delete_column(u'main_videoad', 'compress')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'main.days': {
'Meta': {'object_name': 'Days'},
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.ImageAd']", 'null': 'True', 'blank': 'True'}),
'show_text': ('django.db.models.fields.BooleanField', [], {}),
'show_video': ('django.db.models.fields.BooleanField', [], {}),
'start_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.time(8, 0)'}),
'stop_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.time(22, 0)'}),
'terminal': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Terminal']"}),
'text_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.TextAd']", 'null': 'True', 'blank': 'True'}),
'text_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'video_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['main.VideoAd']", 'null': 'True', 'blank': 'True'}),
'video_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'main.imagead': {
'Meta': {'object_name': 'ImageAd'},
'datelist': ('main.fields.DateArrayField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}),
'prolongation': ('django.db.models.fields.TimeField', [], {}),
'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'})
},
u'main.immediatelyad': {
'Meta': {'object_name': 'ImmediatelyAd'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
'day': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'immediatelies'", 'to': u"orm['main.Days']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'time': ('django.db.models.fields.TimeField', [], {})
},
u'main.oscommandlog': {
'Meta': {'object_name': 'OsCommandLog'},
'command': ('django.db.models.fields.TextField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'errors': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ouput': ('django.db.models.fields.TextField', [], {}),
'return_code': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'main.partner': {
'Meta': {'object_name': 'Partner'},
'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bank': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'bik': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'director': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'kpp': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'ks': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'legal_address': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'ogrn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'partner_type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'passport': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'phones': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'short_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
u'main.terminal': {
'Meta': {'object_name': 'Terminal'},
'config': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'main.textad': {
'Meta': {'object_name': 'TextAd'},
'datelist': ('main.fields.DateArrayField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}),
'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'main.videoad': {
'Meta': {'object_name': 'VideoAd'},
'compress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'datelist': ('main.fields.DateArrayField', [], {}),
'file_video': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['main.Partner']"}),
'prolongation': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),
'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['main.Terminal']", 'symmetrical': 'False'})
}
}
complete_apps = ['main']
|
normal
|
{
"blob_id": "b4bcf9903f4a34c8b256c65cada29e952a436f74",
"index": 2215,
"step-1": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'main_videoad', 'compress', self.gf(\n 'django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'main_videoad', 'compress', self.gf(\n 'django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'main_videoad', 'compress')\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'main_videoad', 'compress', self.gf(\n 'django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'main_videoad', 'compress')\n models = {u'contenttypes.contenttype': {'Meta': {'ordering':\n \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\",\n 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {\n 'max_length': '100'}), u'id': ('django.db.models.fields.AutoField',\n [], {'primary_key': 'True'}), 'model': (\n 'django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length':\n '100'})}, u'main.days': {'Meta': {'object_name': 'Days'}, 'date': (\n 'django.db.models.fields.DateField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image_ad': ('django.db.models.fields.related.ManyToManyField', [],\n {'symmetrical': 'False', 'to': u\"orm['main.ImageAd']\", 'null':\n 'True', 'blank': 'True'}), 'show_text': (\n 'django.db.models.fields.BooleanField', [], {}), 'show_video': (\n 'django.db.models.fields.BooleanField', [], {}), 'start_time': (\n 'django.db.models.fields.TimeField', [], {'default':\n 'datetime.time(8, 0)'}), 'stop_time': (\n 'django.db.models.fields.TimeField', [], {'default':\n 'datetime.time(22, 0)'}), 'terminal': (\n 'django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Terminal']\"}), 'text_ad': (\n 'django.db.models.fields.related.ManyToManyField', [], {\n 'symmetrical': 'False', 'to': u\"orm['main.TextAd']\", 'null': 'True',\n 'blank': 'True'}), 'text_count': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n }), 'video_ad': ('django.db.models.fields.related.ManyToManyField',\n [], {'symmetrical': 'False', 'to': u\"orm['main.VideoAd']\", 'null':\n 'True', 'blank': 'True'}), 'video_count': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n })}, u'main.imagead': {'Meta': {'object_name': 'ImageAd'},\n 'datelist': ('main.fields.DateArrayField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image': ('filebrowser.fields.FileBrowseField', [], {'max_length':\n '255'}), 'partner': ('django.db.models.fields.related.ForeignKey',\n [], {'to': u\"orm['main.Partner']\"}), 'prolongation': (\n 'django.db.models.fields.TimeField', [], {}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'})},\n u'main.immediatelyad': {'Meta': {'object_name': 'ImmediatelyAd'},\n 'content_type': ('django.db.models.fields.related.ForeignKey', [],\n {'to': u\"orm['contenttypes.ContentType']\"}), 'day': (\n 'django.db.models.fields.related.ForeignKey', [], {'related_name':\n \"u'immediatelies'\", 'to': u\"orm['main.Days']\"}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {\n }), 'time': ('django.db.models.fields.TimeField', [], {})},\n u'main.oscommandlog': {'Meta': {'object_name': 'OsCommandLog'},\n 'command': ('django.db.models.fields.TextField', [], {}),\n 'datetime': ('django.db.models.fields.DateTimeField', [], {\n 'auto_now': 'True', 'blank': 'True'}), 'errors': (\n 'django.db.models.fields.TextField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'ouput': ('django.db.models.fields.TextField', [], {}),\n 'return_code': ('django.db.models.fields.CharField', [], {\n 'max_length': '255'})}, u'main.partner': {'Meta': {'object_name':\n 'Partner'}, 'account_number': ('django.db.models.fields.CharField',\n [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'bank':\n ('django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'bik': (\n 'django.db.models.fields.CharField', [], {'max_length': '100',\n 'null': 'True', 'blank': 'True'}), 'director': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'full_name': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'inn': ('django.db.models.fields.CharField', [], {'max_length':\n '50', 'null': 'True', 'blank': 'True'}), 'kpp': (\n 'django.db.models.fields.CharField', [], {'max_length': '50',\n 'null': 'True', 'blank': 'True'}), 'ks': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'legal_address': (\n 'django.db.models.fields.CharField', [], {'max_length': '400',\n 'null': 'True', 'blank': 'True'}), 'name': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'ogrn': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'partner_type': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n }), 'passport': ('django.db.models.fields.TextField', [], {'null':\n 'True', 'blank': 'True'}), 'phones': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'short_name': (\n 'django.db.models.fields.CharField', [], {'max_length': '500',\n 'null': 'True', 'blank': 'True'})}, u'main.terminal': {'Meta': {\n 'object_name': 'Terminal'}, 'config': (\n 'django.db.models.fields.TextField', [], {'null': 'True', 'blank':\n 'True'}), u'id': ('django.db.models.fields.AutoField', [], {\n 'primary_key': 'True'}), 'text': (\n 'django.db.models.fields.TextField', [], {})}, u'main.textad': {\n 'Meta': {'object_name': 'TextAd'}, 'datelist': (\n 'main.fields.DateArrayField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Partner']\"}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'}), 'text': (\n 'django.db.models.fields.TextField', [], {})}, u'main.videoad': {\n 'Meta': {'object_name': 'VideoAd'}, 'compress': (\n 'django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'datelist': ('main.fields.DateArrayField', [], {}), 'file_video': (\n 'filebrowser.fields.FileBrowseField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Partner']\"}), 'prolongation': (\n 'django.db.models.fields.TimeField', [], {'null': 'True', 'blank':\n 'True'}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'})}}\n complete_apps = ['main']\n",
"step-4": "from south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'main_videoad', 'compress', self.gf(\n 'django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'main_videoad', 'compress')\n models = {u'contenttypes.contenttype': {'Meta': {'ordering':\n \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\",\n 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {\n 'max_length': '100'}), u'id': ('django.db.models.fields.AutoField',\n [], {'primary_key': 'True'}), 'model': (\n 'django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length':\n '100'})}, u'main.days': {'Meta': {'object_name': 'Days'}, 'date': (\n 'django.db.models.fields.DateField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image_ad': ('django.db.models.fields.related.ManyToManyField', [],\n {'symmetrical': 'False', 'to': u\"orm['main.ImageAd']\", 'null':\n 'True', 'blank': 'True'}), 'show_text': (\n 'django.db.models.fields.BooleanField', [], {}), 'show_video': (\n 'django.db.models.fields.BooleanField', [], {}), 'start_time': (\n 'django.db.models.fields.TimeField', [], {'default':\n 'datetime.time(8, 0)'}), 'stop_time': (\n 'django.db.models.fields.TimeField', [], {'default':\n 'datetime.time(22, 0)'}), 'terminal': (\n 'django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Terminal']\"}), 'text_ad': (\n 'django.db.models.fields.related.ManyToManyField', [], {\n 'symmetrical': 'False', 'to': u\"orm['main.TextAd']\", 'null': 'True',\n 'blank': 'True'}), 'text_count': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n }), 'video_ad': ('django.db.models.fields.related.ManyToManyField',\n [], {'symmetrical': 'False', 'to': u\"orm['main.VideoAd']\", 'null':\n 'True', 'blank': 'True'}), 'video_count': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n })}, u'main.imagead': {'Meta': {'object_name': 'ImageAd'},\n 'datelist': ('main.fields.DateArrayField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image': ('filebrowser.fields.FileBrowseField', [], {'max_length':\n '255'}), 'partner': ('django.db.models.fields.related.ForeignKey',\n [], {'to': u\"orm['main.Partner']\"}), 'prolongation': (\n 'django.db.models.fields.TimeField', [], {}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'})},\n u'main.immediatelyad': {'Meta': {'object_name': 'ImmediatelyAd'},\n 'content_type': ('django.db.models.fields.related.ForeignKey', [],\n {'to': u\"orm['contenttypes.ContentType']\"}), 'day': (\n 'django.db.models.fields.related.ForeignKey', [], {'related_name':\n \"u'immediatelies'\", 'to': u\"orm['main.Days']\"}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {\n }), 'time': ('django.db.models.fields.TimeField', [], {})},\n u'main.oscommandlog': {'Meta': {'object_name': 'OsCommandLog'},\n 'command': ('django.db.models.fields.TextField', [], {}),\n 'datetime': ('django.db.models.fields.DateTimeField', [], {\n 'auto_now': 'True', 'blank': 'True'}), 'errors': (\n 'django.db.models.fields.TextField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'ouput': ('django.db.models.fields.TextField', [], {}),\n 'return_code': ('django.db.models.fields.CharField', [], {\n 'max_length': '255'})}, u'main.partner': {'Meta': {'object_name':\n 'Partner'}, 'account_number': ('django.db.models.fields.CharField',\n [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'bank':\n ('django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'bik': (\n 'django.db.models.fields.CharField', [], {'max_length': '100',\n 'null': 'True', 'blank': 'True'}), 'director': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'full_name': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'inn': ('django.db.models.fields.CharField', [], {'max_length':\n '50', 'null': 'True', 'blank': 'True'}), 'kpp': (\n 'django.db.models.fields.CharField', [], {'max_length': '50',\n 'null': 'True', 'blank': 'True'}), 'ks': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'legal_address': (\n 'django.db.models.fields.CharField', [], {'max_length': '400',\n 'null': 'True', 'blank': 'True'}), 'name': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'ogrn': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'partner_type': (\n 'django.db.models.fields.PositiveIntegerField', [], {'default': '0'\n }), 'passport': ('django.db.models.fields.TextField', [], {'null':\n 'True', 'blank': 'True'}), 'phones': (\n 'django.db.models.fields.CharField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), 'short_name': (\n 'django.db.models.fields.CharField', [], {'max_length': '500',\n 'null': 'True', 'blank': 'True'})}, u'main.terminal': {'Meta': {\n 'object_name': 'Terminal'}, 'config': (\n 'django.db.models.fields.TextField', [], {'null': 'True', 'blank':\n 'True'}), u'id': ('django.db.models.fields.AutoField', [], {\n 'primary_key': 'True'}), 'text': (\n 'django.db.models.fields.TextField', [], {})}, u'main.textad': {\n 'Meta': {'object_name': 'TextAd'}, 'datelist': (\n 'main.fields.DateArrayField', [], {}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Partner']\"}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'}), 'text': (\n 'django.db.models.fields.TextField', [], {})}, u'main.videoad': {\n 'Meta': {'object_name': 'VideoAd'}, 'compress': (\n 'django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'datelist': ('main.fields.DateArrayField', [], {}), 'file_video': (\n 'filebrowser.fields.FileBrowseField', [], {'max_length': '255',\n 'null': 'True', 'blank': 'True'}), u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['main.Partner']\"}), 'prolongation': (\n 'django.db.models.fields.TimeField', [], {'null': 'True', 'blank':\n 'True'}), 'terminals': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['main.Terminal']\", 'symmetrical': 'False'})}}\n complete_apps = ['main']\n",
"step-5": "# -*- coding: utf-8 -*-\nfrom south.utils import datetime_utils as datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding field 'VideoAd.compress'\n db.add_column(u'main_videoad', 'compress',\n self.gf('django.db.models.fields.BooleanField')(default=False),\n keep_default=False)\n\n\n def backwards(self, orm):\n # Deleting field 'VideoAd.compress'\n db.delete_column(u'main_videoad', 'compress')\n\n\n models = {\n u'contenttypes.contenttype': {\n 'Meta': {'ordering': \"('name',)\", 'unique_together': \"(('app_label', 'model'),)\", 'object_name': 'ContentType', 'db_table': \"'django_content_type'\"},\n 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})\n },\n u'main.days': {\n 'Meta': {'object_name': 'Days'},\n 'date': ('django.db.models.fields.DateField', [], {}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['main.ImageAd']\", 'null': 'True', 'blank': 'True'}),\n 'show_text': ('django.db.models.fields.BooleanField', [], {}),\n 'show_video': ('django.db.models.fields.BooleanField', [], {}),\n 'start_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.time(8, 0)'}),\n 'stop_time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.time(22, 0)'}),\n 'terminal': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['main.Terminal']\"}),\n 'text_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['main.TextAd']\", 'null': 'True', 'blank': 'True'}),\n 'text_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),\n 'video_ad': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u\"orm['main.VideoAd']\", 'null': 'True', 'blank': 'True'}),\n 'video_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})\n },\n u'main.imagead': {\n 'Meta': {'object_name': 'ImageAd'},\n 'datelist': ('main.fields.DateArrayField', [], {}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'image': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['main.Partner']\"}),\n 'prolongation': ('django.db.models.fields.TimeField', [], {}),\n 'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['main.Terminal']\", 'symmetrical': 'False'})\n },\n u'main.immediatelyad': {\n 'Meta': {'object_name': 'ImmediatelyAd'},\n 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['contenttypes.ContentType']\"}),\n 'day': ('django.db.models.fields.related.ForeignKey', [], {'related_name': \"u'immediatelies'\", 'to': u\"orm['main.Days']\"}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),\n 'time': ('django.db.models.fields.TimeField', [], {})\n },\n u'main.oscommandlog': {\n 'Meta': {'object_name': 'OsCommandLog'},\n 'command': ('django.db.models.fields.TextField', [], {}),\n 'datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),\n 'errors': ('django.db.models.fields.TextField', [], {}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'ouput': ('django.db.models.fields.TextField', [], {}),\n 'return_code': ('django.db.models.fields.CharField', [], {'max_length': '255'})\n },\n u'main.partner': {\n 'Meta': {'object_name': 'Partner'},\n 'account_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'bank': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'bik': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),\n 'director': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'inn': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),\n 'kpp': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),\n 'ks': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'legal_address': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),\n 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'ogrn': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'partner_type': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),\n 'passport': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),\n 'phones': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n 'short_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})\n },\n u'main.terminal': {\n 'Meta': {'object_name': 'Terminal'},\n 'config': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'text': ('django.db.models.fields.TextField', [], {})\n },\n u'main.textad': {\n 'Meta': {'object_name': 'TextAd'},\n 'datelist': ('main.fields.DateArrayField', [], {}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['main.Partner']\"}),\n 'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['main.Terminal']\", 'symmetrical': 'False'}),\n 'text': ('django.db.models.fields.TextField', [], {})\n },\n u'main.videoad': {\n 'Meta': {'object_name': 'VideoAd'},\n 'compress': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),\n 'datelist': ('main.fields.DateArrayField', [], {}),\n 'file_video': ('filebrowser.fields.FileBrowseField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['main.Partner']\"}),\n 'prolongation': ('django.db.models.fields.TimeField', [], {'null': 'True', 'blank': 'True'}),\n 'terminals': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['main.Terminal']\", 'symmetrical': 'False'})\n }\n }\n\n complete_apps = ['main']",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: Swking
@File : ZDT.py
@Date : 2018/12/28
@Desc :
"""
import numpy as np
class ZDT1:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - ((Y[0] / g)**0.5))
return Y
class ZDT2:
def __init__(self):
self.dimension = 30
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
class ZDT3:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])
Y[1] = g * (1 - (np.sqrt(Y[0] / g)) - (Y[0] / g) * np.sin(10 * np.pi * Y[0]))
return Y
class ZDT4:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension) - 5
self.min[0] = 0
self.max = np.zeros(self.dimension) + 5
self.max[0] = 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = X[0]
g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - 10 * np.cos(4 * np.pi * X[1:-1]))
Y[1] = g * (1 - (np.sqrt(Y[0] / g)))
return Y
class ZDT6:
def __init__(self):
self.dimension = 10
self.objFuncNum = 2
self.isMin = True
self.min = np.zeros(self.dimension)
self.max = np.zeros(self.dimension) + 1
self.span = (self.min, self.max)
def Func(self, X):
Y = np.zeros(2)
Y[0] = 1 - np.exp(-4 * X[0]) * (np.sin(6 * np.pi * X[0]) ** 6)
g = 1 + 9 * (np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25)
Y[1] = g * (1 - (Y[0] / g) ** 2)
return Y
if __name__ == '__main__':
zdt = ZDT1()
print(zdt.Func(np.ones(zdt.dimension)))
|
normal
|
{
"blob_id": "8ca16947054b681a5f43d8b8029191d031d3a218",
"index": 8352,
"step-1": "<mask token>\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ZDT1:\n <mask token>\n <mask token>\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ZDT1:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 0.5)\n return Y\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nif __name__ == '__main__':\n zdt = ZDT1()\n print(zdt.Func(np.ones(zdt.dimension)))\n",
"step-4": "<mask token>\nimport numpy as np\n\n\nclass ZDT1:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 0.5)\n return Y\n\n\nclass ZDT2:\n\n def __init__(self):\n self.dimension = 30\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nclass ZDT3:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 9 / (self.dimension - 1) * np.sum(X[1:-1])\n Y[1] = g * (1 - np.sqrt(Y[0] / g) - Y[0] / g * np.sin(10 * np.pi *\n Y[0]))\n return Y\n\n\nclass ZDT4:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension) - 5\n self.min[0] = 0\n self.max = np.zeros(self.dimension) + 5\n self.max[0] = 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = X[0]\n g = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - \n 10 * np.cos(4 * np.pi * X[1:-1]))\n Y[1] = g * (1 - np.sqrt(Y[0] / g))\n return Y\n\n\nclass ZDT6:\n\n def __init__(self):\n self.dimension = 10\n self.objFuncNum = 2\n self.isMin = True\n self.min = np.zeros(self.dimension)\n self.max = np.zeros(self.dimension) + 1\n self.span = self.min, self.max\n\n def Func(self, X):\n Y = np.zeros(2)\n Y[0] = 1 - np.exp(-4 * X[0]) * np.sin(6 * np.pi * X[0]) ** 6\n g = 1 + 9 * np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25\n Y[1] = g * (1 - (Y[0] / g) ** 2)\n return Y\n\n\nif __name__ == '__main__':\n zdt = ZDT1()\n print(zdt.Func(np.ones(zdt.dimension)))\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n@Author: Swking\n@File : ZDT.py\n@Date : 2018/12/28\n@Desc : \n\"\"\"\nimport numpy as np\nclass ZDT1:\n\tdef __init__(self):\n\t\tself.dimension = 30\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])\n\t\tY[1] = g * (1 - ((Y[0] / g)**0.5))\n\t\treturn Y\n\nclass ZDT2:\n\tdef __init__(self):\n\t\tself.dimension = 30\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])\n\t\tY[1] = g * (1 - (Y[0] / g) ** 2)\n\t\treturn Y\n\n\nclass ZDT3:\n\tdef __init__(self):\n\t\tself.dimension = 10\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + (9 / (self.dimension - 1)) * np.sum(X[1:-1])\n\t\tY[1] = g * (1 - (np.sqrt(Y[0] / g)) - (Y[0] / g) * np.sin(10 * np.pi * Y[0]))\n\t\treturn Y\n\n\nclass ZDT4:\n\tdef __init__(self):\n\t\tself.dimension = 10\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension) - 5\n\t\tself.min[0] = 0\n\t\tself.max = np.zeros(self.dimension) + 5\n\t\tself.max[0] = 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = X[0]\n\t\tg = 1 + 10 * (self.dimension - 1) + np.sum(np.power(X[1:-1], 2) - 10 * np.cos(4 * np.pi * X[1:-1]))\n\t\tY[1] = g * (1 - (np.sqrt(Y[0] / g)))\n\t\treturn Y\n\n\nclass ZDT6:\n\tdef __init__(self):\n\t\tself.dimension = 10\n\t\tself.objFuncNum = 2\n\t\tself.isMin = True\n\t\tself.min = np.zeros(self.dimension)\n\t\tself.max = np.zeros(self.dimension) + 1\n\t\tself.span = (self.min, self.max)\n\n\tdef Func(self, X):\n\t\tY = np.zeros(2)\n\t\tY[0] = 1 - np.exp(-4 * X[0]) * (np.sin(6 * np.pi * X[0]) ** 6)\n\t\tg = 1 + 9 * (np.sum(X[1:-1] / (self.dimension - 1)) ** 0.25)\n\t\tY[1] = g * (1 - (Y[0] / g) ** 2)\n\t\treturn Y\n\nif __name__ == '__main__':\n\tzdt = ZDT1()\n\tprint(zdt.Func(np.ones(zdt.dimension)))",
"step-ids": [
12,
13,
16,
17,
18
]
}
|
[
12,
13,
16,
17,
18
] |
from .chair_model import run_chair_simulation, init_omega_t, \
JumpingModel, H_to_L
from .utils import load_hcp_peaks, Condition, average_peak_counts
|
normal
|
{
"blob_id": "9087a7bf42070fdb8639c616fdf7f09ad3903656",
"index": 6755,
"step-1": "<mask token>\n",
"step-2": "from .chair_model import run_chair_simulation, init_omega_t, JumpingModel, H_to_L\nfrom .utils import load_hcp_peaks, Condition, average_peak_counts\n",
"step-3": "from .chair_model import run_chair_simulation, init_omega_t, \\\n JumpingModel, H_to_L\nfrom .utils import load_hcp_peaks, Condition, average_peak_counts\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from appJar import gui
app = gui("Calculator", "560x240")
### FUNCTIONS ###
n1, n2 = 0.0, 0.0
result = 0.0
isFirst = True
calc = ""
def doMath(btn):
global result, n1, n2, isFirst, calc
inputNumber()
if(btn == "Add"): calc = "a"
if(btn == "Substract"): calc = "s"
if(btn == "Multiply"): calc = "m"
if(btn == "Divide"): calc = "d"
app.clearEntry("Number")
def calculate(btn):
global result, n1, n2, isFirst, calc
inputNumber()
if(calc == 'a'): result = n1 + n2
if(calc == 's'): result = n1 - n2
if(calc == 'm'): result = n1 * n2
if(calc == 'd'):
try:
result = n1 / n2
except ZeroDivisionError:
clearOut(btn)
app.errorBox("DivisionByZero", "You can't divide by Zero.")
app.clearEntry("Number")
app.setLabel("Result", result)
def clearOut(btn):
global result, n1, n2, isFirst, calc
n1, n2 = 0.0, 0.0
result = 0.0
isFirst = True
calc = ""
def inputNumber():
global n1, n2, isFirst
if(isFirst):
n1 = app.getEntry("Number")
isFirst = False
else:
n2 = app.getEntry("Number")
isFirst = True
### FUNCTIONS ###
app.setStretch("column")
app.setSticky("")
app.setResizable(True)
app.addNumericEntry("Number")
app.setEntryDefault("Number", "Enter Number")
app.addButtons(["Add", "Substract", "Multiply", "Divide"], doMath)
app.addButtons(["Calculate!", "clearOut"], [calculate, clearOut])
app.setButton("clearOut", "C")
app.addEmptyLabel("Result")
app.go()
|
normal
|
{
"blob_id": "084299da1c2f41de96e60d37088466c7b61de38e",
"index": 9750,
"step-1": "<mask token>\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\ndef inputNumber():\n global n1, n2, isFirst\n if isFirst:\n n1 = app.getEntry('Number')\n isFirst = False\n else:\n n2 = app.getEntry('Number')\n isFirst = True\n\n\napp.setStretch('column')\napp.setSticky('')\napp.setResizable(True)\napp.addNumericEntry('Number')\napp.setEntryDefault('Number', 'Enter Number')\napp.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath)\napp.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut])\napp.setButton('clearOut', 'C')\napp.addEmptyLabel('Result')\napp.go()\n",
"step-3": "<mask token>\napp = gui('Calculator', '560x240')\nn1, n2 = 0.0, 0.0\nresult = 0.0\nisFirst = True\ncalc = ''\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\ndef inputNumber():\n global n1, n2, isFirst\n if isFirst:\n n1 = app.getEntry('Number')\n isFirst = False\n else:\n n2 = app.getEntry('Number')\n isFirst = True\n\n\napp.setStretch('column')\napp.setSticky('')\napp.setResizable(True)\napp.addNumericEntry('Number')\napp.setEntryDefault('Number', 'Enter Number')\napp.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath)\napp.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut])\napp.setButton('clearOut', 'C')\napp.addEmptyLabel('Result')\napp.go()\n",
"step-4": "from appJar import gui\napp = gui('Calculator', '560x240')\nn1, n2 = 0.0, 0.0\nresult = 0.0\nisFirst = True\ncalc = ''\n\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if btn == 'Add':\n calc = 'a'\n if btn == 'Substract':\n calc = 's'\n if btn == 'Multiply':\n calc = 'm'\n if btn == 'Divide':\n calc = 'd'\n app.clearEntry('Number')\n\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n inputNumber()\n if calc == 'a':\n result = n1 + n2\n if calc == 's':\n result = n1 - n2\n if calc == 'm':\n result = n1 * n2\n if calc == 'd':\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox('DivisionByZero', \"You can't divide by Zero.\")\n app.clearEntry('Number')\n app.setLabel('Result', result)\n\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = ''\n\n\ndef inputNumber():\n global n1, n2, isFirst\n if isFirst:\n n1 = app.getEntry('Number')\n isFirst = False\n else:\n n2 = app.getEntry('Number')\n isFirst = True\n\n\napp.setStretch('column')\napp.setSticky('')\napp.setResizable(True)\napp.addNumericEntry('Number')\napp.setEntryDefault('Number', 'Enter Number')\napp.addButtons(['Add', 'Substract', 'Multiply', 'Divide'], doMath)\napp.addButtons(['Calculate!', 'clearOut'], [calculate, clearOut])\napp.setButton('clearOut', 'C')\napp.addEmptyLabel('Result')\napp.go()\n",
"step-5": "from appJar import gui\n\napp = gui(\"Calculator\", \"560x240\")\n\n### FUNCTIONS ###\n\nn1, n2 = 0.0, 0.0\nresult = 0.0\nisFirst = True\ncalc = \"\"\n\ndef doMath(btn):\n global result, n1, n2, isFirst, calc\n\n inputNumber()\n\n if(btn == \"Add\"): calc = \"a\"\n if(btn == \"Substract\"): calc = \"s\"\n if(btn == \"Multiply\"): calc = \"m\"\n if(btn == \"Divide\"): calc = \"d\"\n\n app.clearEntry(\"Number\")\n\ndef calculate(btn):\n global result, n1, n2, isFirst, calc\n\n inputNumber()\n\n if(calc == 'a'): result = n1 + n2\n if(calc == 's'): result = n1 - n2\n if(calc == 'm'): result = n1 * n2\n if(calc == 'd'):\n try:\n result = n1 / n2\n except ZeroDivisionError:\n clearOut(btn)\n app.errorBox(\"DivisionByZero\", \"You can't divide by Zero.\")\n\n app.clearEntry(\"Number\")\n app.setLabel(\"Result\", result)\n\ndef clearOut(btn):\n global result, n1, n2, isFirst, calc\n n1, n2 = 0.0, 0.0\n result = 0.0\n isFirst = True\n calc = \"\"\n\ndef inputNumber():\n global n1, n2, isFirst\n\n if(isFirst):\n n1 = app.getEntry(\"Number\")\n isFirst = False\n else:\n n2 = app.getEntry(\"Number\")\n isFirst = True\n\n\n### FUNCTIONS ###\n\napp.setStretch(\"column\")\napp.setSticky(\"\")\napp.setResizable(True)\napp.addNumericEntry(\"Number\")\napp.setEntryDefault(\"Number\", \"Enter Number\")\n\napp.addButtons([\"Add\", \"Substract\", \"Multiply\", \"Divide\"], doMath)\napp.addButtons([\"Calculate!\", \"clearOut\"], [calculate, clearOut])\napp.setButton(\"clearOut\", \"C\")\n\napp.addEmptyLabel(\"Result\")\n\napp.go()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
from marshmallow import fields, post_load
from rebase.common.schema import RebaseSchema, SecureNestedField
from rebase.views.bid_limit import BidLimitSchema
class TicketSetSchema(RebaseSchema):
id = fields.Integer()
bid_limits = SecureNestedField(BidLimitSchema, exclude=('ticket_set',),
only=('id', 'price', 'ticket_snapshot'), many=True)
auction = SecureNestedField('AuctionSchema', only=('id',))
nominations = SecureNestedField('NominationSchema', only=('contractor',
'ticket_set', 'job_fit', 'auction', 'hide'), many=True)
@post_load
def make_ticket_set(self, data):
from rebase.models import TicketSet
return self._get_or_make_object(TicketSet, data)
serializer = TicketSetSchema()
deserializer = TicketSetSchema(strict=True)
update_deserializer = TicketSetSchema(context={'raw': True})
|
normal
|
{
"blob_id": "5ebc4f61810f007fd345b52531f7f4318820b9c8",
"index": 6333,
"step-1": "<mask token>\n\n\nclass TicketSetSchema(RebaseSchema):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TicketSetSchema(RebaseSchema):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @post_load\n def make_ticket_set(self, data):\n from rebase.models import TicketSet\n return self._get_or_make_object(TicketSet, data)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TicketSetSchema(RebaseSchema):\n id = fields.Integer()\n bid_limits = SecureNestedField(BidLimitSchema, exclude=('ticket_set',),\n only=('id', 'price', 'ticket_snapshot'), many=True)\n auction = SecureNestedField('AuctionSchema', only=('id',))\n nominations = SecureNestedField('NominationSchema', only=('contractor',\n 'ticket_set', 'job_fit', 'auction', 'hide'), many=True)\n\n @post_load\n def make_ticket_set(self, data):\n from rebase.models import TicketSet\n return self._get_or_make_object(TicketSet, data)\n\n\nserializer = TicketSetSchema()\ndeserializer = TicketSetSchema(strict=True)\nupdate_deserializer = TicketSetSchema(context={'raw': True})\n",
"step-4": "from marshmallow import fields, post_load\nfrom rebase.common.schema import RebaseSchema, SecureNestedField\nfrom rebase.views.bid_limit import BidLimitSchema\n\n\nclass TicketSetSchema(RebaseSchema):\n id = fields.Integer()\n bid_limits = SecureNestedField(BidLimitSchema, exclude=('ticket_set',),\n only=('id', 'price', 'ticket_snapshot'), many=True)\n auction = SecureNestedField('AuctionSchema', only=('id',))\n nominations = SecureNestedField('NominationSchema', only=('contractor',\n 'ticket_set', 'job_fit', 'auction', 'hide'), many=True)\n\n @post_load\n def make_ticket_set(self, data):\n from rebase.models import TicketSet\n return self._get_or_make_object(TicketSet, data)\n\n\nserializer = TicketSetSchema()\ndeserializer = TicketSetSchema(strict=True)\nupdate_deserializer = TicketSetSchema(context={'raw': True})\n",
"step-5": null,
"step-ids": [
1,
2,
4,
5
]
}
|
[
1,
2,
4,
5
] |
rf = open('A-large.in', 'r')
wf = open('A-large.out', 'w')
cases = int(rf.readline())
for case in range(1, cases + 1):
digits = [False] * 10
n = int(rf.readline())
if n == 0:
wf.write('Case #%s: INSOMNIA\n' % case)
continue
for i in range(1, 999999):
cur = n * i
for c in str(cur):
digits[int(c)] = True
if all(digits):
wf.write('Case #%s: %s\n' % (case, cur))
break
|
normal
|
{
"blob_id": "0074b0cd1e4317e36ef4a41f8179464c2ec6c197",
"index": 8250,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor case in range(1, cases + 1):\n digits = [False] * 10\n n = int(rf.readline())\n if n == 0:\n wf.write('Case #%s: INSOMNIA\\n' % case)\n continue\n for i in range(1, 999999):\n cur = n * i\n for c in str(cur):\n digits[int(c)] = True\n if all(digits):\n wf.write('Case #%s: %s\\n' % (case, cur))\n break\n",
"step-3": "rf = open('A-large.in', 'r')\nwf = open('A-large.out', 'w')\ncases = int(rf.readline())\nfor case in range(1, cases + 1):\n digits = [False] * 10\n n = int(rf.readline())\n if n == 0:\n wf.write('Case #%s: INSOMNIA\\n' % case)\n continue\n for i in range(1, 999999):\n cur = n * i\n for c in str(cur):\n digits[int(c)] = True\n if all(digits):\n wf.write('Case #%s: %s\\n' % (case, cur))\n break\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from tkinter import *
import re
class Molecule:
def __init__(self, nom, poids, adn):
self.nom = nom
self.poids = poids
self.adn = adn
def __repr__(self):
return "{} : {} g".format(self.nom, self.poids)
class Menu:
def __init__(self):
self.data = dict()
self.main = Tk()
self.main.title("Molécules")
self.main.config(bg="black")
self.main.minsize(210, 220)
self.mean = float
Button(self.main, width=14, bg="black", fg="white", text='Ajouter molécule', command=self.add_molecule).grid(
pady=10)
Button(self.main, width=14, bg="black", fg="white", text='Poids maximum', command=self.get_max).grid()
Button(self.main, width=14, bg="black", fg="white", text='Poids moyen', command=self.get_mean).grid(pady=10)
Button(self.main, bg="black", fg="white", text='Molécules au poids supérieur\nà la moyenne',
command=self.greater_than_mean).grid(padx=10)
self.io = Frame(self.main, bg="black")
Button(self.io, bg="black", fg="white", text='Importer', command=self.import_data).grid(row=1, column=1, padx=5)
Button(self.io, bg="black", fg="white", text='Exporter', command=self.export_data).grid(row=1, column=2, padx=5)
self.io.grid(pady=10)
self.dessein = Canvas(self.main, width=500, height=500)
self.y = 45
self.y2 = 50
self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155, fill="deeppink2", outline="")
self.right = self.dessein.create_oval(225, self.y, 300, self.y + 155, fill="deeppink2", outline="")
self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2, 300, 400, 200, 400, fill="salmon1")
self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.y2, 300, 400, 275, 400, fill="salmon2")
self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.y2, 300, 400, 290, 400, fill="salmon3")
self.giggle = True
self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill="salmon1", outline="")
self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill="salmon1", outline="")
self.main.bind("<Down>", self.grow_penis)
self.dessein.grid(pady=10)
Button(self.main, width=14, bg="black", fg="white", text='Enlarge your penis !!!',
command=self.grow_penis).grid()
self.main.mainloop()
def grow_penis(self, event=None):
if self.y >= 0:
self.y -= 2
if self.y2 <= 75:
self.y2 += 1
self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)
self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)
self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, 400, 200, 400)
self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, 400, 275, 400)
self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, 400, 290, 400)
if self.giggle:
self.giggle = False
self.dessein.coords(self.ball_left, 275, 350, 100, 450)
self.dessein.coords(self.ball_right, 225, 350, 400, 450)
else:
self.giggle = True
self.dessein.coords(self.ball_left, 275, 345, 100, 445)
self.dessein.coords(self.ball_right, 225, 345, 400, 445)
def add_molecule(self):
GUIAdd(self)
def get_max(self):
GUIMax(self)
def get_mean(self):
GUIMean(self)
def greater_than_mean(self):
GUIGtm(self)
def calc_mean(self):
self.mean = sum([x['poids'] for x in self.data.values()]) / len(self.data.values())
def import_data(self):
with open('mols.txt', 'r') as input_file:
input_txt = input_file.readlines()
liste_name = input_txt[0].split()
liste_weight = [float(x) for x in input_txt[1].split()]
liste_adn = input_txt[2].split()
for i in range(len(liste_name)):
self.data[liste_name[i]] = {'poids': liste_weight[i], 'ADN': liste_adn[i]}
def export_data(self):
if len(self.data) > 0:
with open('mols.txt', 'w') as output:
valeurs = self.data.values()
liste_weight = [x['poids'] for x in valeurs]
liste_adn = [x['ADN'] for x in valeurs]
output.write(' '.join(self.data.keys()) + '\n')
output.write(' '.join([str(x) for x in liste_weight]) + '\n')
output.write(' '.join(liste_adn))
class GUIAdd:
def __init__(self, menu: Menu):
self.root = menu
self.gui = Toplevel(menu.main)
self.gui.title('Ajout de molécule')
self.gui.minsize(210, 100)
Label(self.gui, text='Nom de la molécule').pack()
self.mole_nom = Entry(self.gui)
self.mole_nom.pack()
Label(self.gui, text='Poids de la molécule').pack()
self.mole_poids = Entry(self.gui)
self.mole_poids.pack()
Label(self.gui, text='ADN de la molécule').pack()
self.mole_adn = Entry(self.gui)
self.mole_adn.pack()
Button(self.gui, text='Ajouter', command=self.close_gui).pack()
self.error = Label(self.gui, text="")
self.error.pack()
self.gui.mainloop()
def close_gui(self):
try:
if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()) > 0 and len(self.mole_adn.get()) > 0:
if self.mole_nom.get() not in self.root.data.keys():
if not re.search(r'[^ACGT]', self.mole_adn.get()):
self.root.data[self.mole_nom.get()] = {'poids': float(self.mole_poids.get()),
'ADN': self.mole_adn.get()}
else:
self.error['text'] = "Séquence d'ADN non réglementaire"
return
else:
self.error['text'] = "Molecule déjà existante dans les données"
return
else:
self.error['text'] = "Tous les champs ne sont pas remplis"
return
except ValueError:
self.error['text'] = "Poids doit être un float ou un int"
return
self.gui.destroy()
class GUIMax:
def __init__(self, menu: Menu):
maxi = 0
max_list = []
self.gui = Toplevel(menu.main)
self.gui.title('Molécule au poids maximal')
self.gui.minsize(210, 100)
for mol in menu.data:
if menu.data[mol]['poids'] > maxi:
maxi = menu.data[mol]['poids']
max_list = [mol]
elif menu.data[mol]['poids'] == maxi:
max_list.append(mol)
for mol in max_list:
Label(self.gui, text="{} : {} g".format(mol, menu.data[mol]["poids"])).pack()
self.gui.mainloop()
class GUIMean:
def __init__(self, menu: Menu):
self.gui = Toplevel(menu.main)
self.gui.title('Poids moyen')
self.gui.minsize(210, 100)
menu.calc_mean()
Label(self.gui, text="Poids moyen des molécules").pack()
Label(self.gui, text=menu.mean).pack()
self.gui.mainloop()
class GUIGtm:
def __init__(self, menu: Menu):
menu.calc_mean()
self.gui = Toplevel(menu.main)
self.gui.title('Molécule au poids supérieur à la moyenne')
self.gui.minsize(210, 100)
for mol in menu.data.keys():
if menu.data[mol]['poids'] >= menu.mean:
Label(self.gui, text="{} : {} g".format(mol, menu.data[mol]["poids"])).pack()
self.gui.mainloop()
def pascal(n: int):
prec = [1]
for i in range(1, n + 2):
print(' '.join([str(x) for x in prec]))
new = []
for j in range(i + 1):
if j == 0 or j == i:
new.append(1)
else:
new.append(prec[j] + prec[j - 1])
prec = new
Menu()
# pascal(50)
|
normal
|
{
"blob_id": "4d05e65dce9f689ae533a57466bc75fa24db7b4d",
"index": 4558,
"step-1": "<mask token>\n\n\nclass Menu:\n\n def __init__(self):\n self.data = dict()\n self.main = Tk()\n self.main.title('Molécules')\n self.main.config(bg='black')\n self.main.minsize(210, 220)\n self.mean = float\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Ajouter molécule', command=self.add_molecule).grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids maximum', command=self.get_max).grid()\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids moyen', command=self.get_mean).grid(pady=10)\n Button(self.main, bg='black', fg='white', text=\n 'Molécules au poids supérieur\\nà la moyenne', command=self.\n greater_than_mean).grid(padx=10)\n self.io = Frame(self.main, bg='black')\n Button(self.io, bg='black', fg='white', text='Importer', command=\n self.import_data).grid(row=1, column=1, padx=5)\n Button(self.io, bg='black', fg='white', text='Exporter', command=\n self.export_data).grid(row=1, column=2, padx=5)\n self.io.grid(pady=10)\n self.dessein = Canvas(self.main, width=500, height=500)\n self.y = 45\n self.y2 = 50\n self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155,\n fill='deeppink2', outline='')\n self.right = self.dessein.create_oval(225, self.y, 300, self.y + \n 155, fill='deeppink2', outline='')\n self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2,\n 300, 400, 200, 400, fill='salmon1')\n self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.\n y2, 300, 400, 275, 400, fill='salmon2')\n self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.\n y2, 300, 400, 290, 400, fill='salmon3')\n self.giggle = True\n self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill=\n 'salmon1', outline='')\n self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill\n ='salmon1', outline='')\n self.main.bind('<Down>', self.grow_penis)\n self.dessein.grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Enlarge your penis !!!', command=self.grow_penis).grid()\n self.main.mainloop()\n\n def grow_penis(self, event=None):\n if self.y >= 0:\n self.y -= 2\n if self.y2 <= 75:\n self.y2 += 1\n self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)\n self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)\n self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, \n 400, 200, 400)\n self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, \n 400, 275, 400)\n self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, \n 400, 290, 400)\n if self.giggle:\n self.giggle = False\n self.dessein.coords(self.ball_left, 275, 350, 100, 450)\n self.dessein.coords(self.ball_right, 225, 350, 400, 450)\n else:\n self.giggle = True\n self.dessein.coords(self.ball_left, 275, 345, 100, 445)\n self.dessein.coords(self.ball_right, 225, 345, 400, 445)\n\n def add_molecule(self):\n GUIAdd(self)\n\n def get_max(self):\n GUIMax(self)\n\n def get_mean(self):\n GUIMean(self)\n <mask token>\n\n def calc_mean(self):\n self.mean = sum([x['poids'] for x in self.data.values()]) / len(self\n .data.values())\n <mask token>\n\n def export_data(self):\n if len(self.data) > 0:\n with open('mols.txt', 'w') as output:\n valeurs = self.data.values()\n liste_weight = [x['poids'] for x in valeurs]\n liste_adn = [x['ADN'] for x in valeurs]\n output.write(' '.join(self.data.keys()) + '\\n')\n output.write(' '.join([str(x) for x in liste_weight]) + '\\n')\n output.write(' '.join(liste_adn))\n\n\nclass GUIAdd:\n\n def __init__(self, menu: Menu):\n self.root = menu\n self.gui = Toplevel(menu.main)\n self.gui.title('Ajout de molécule')\n self.gui.minsize(210, 100)\n Label(self.gui, text='Nom de la molécule').pack()\n self.mole_nom = Entry(self.gui)\n self.mole_nom.pack()\n Label(self.gui, text='Poids de la molécule').pack()\n self.mole_poids = Entry(self.gui)\n self.mole_poids.pack()\n Label(self.gui, text='ADN de la molécule').pack()\n self.mole_adn = Entry(self.gui)\n self.mole_adn.pack()\n Button(self.gui, text='Ajouter', command=self.close_gui).pack()\n self.error = Label(self.gui, text='')\n self.error.pack()\n self.gui.mainloop()\n\n def close_gui(self):\n try:\n if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()\n ) > 0 and len(self.mole_adn.get()) > 0:\n if self.mole_nom.get() not in self.root.data.keys():\n if not re.search('[^ACGT]', self.mole_adn.get()):\n self.root.data[self.mole_nom.get()] = {'poids':\n float(self.mole_poids.get()), 'ADN': self.\n mole_adn.get()}\n else:\n self.error['text'] = \"Séquence d'ADN non réglementaire\"\n return\n else:\n self.error['text'\n ] = 'Molecule déjà existante dans les données'\n return\n else:\n self.error['text'] = 'Tous les champs ne sont pas remplis'\n return\n except ValueError:\n self.error['text'] = 'Poids doit être un float ou un int'\n return\n self.gui.destroy()\n\n\nclass GUIMax:\n\n def __init__(self, menu: Menu):\n maxi = 0\n max_list = []\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids maximal')\n self.gui.minsize(210, 100)\n for mol in menu.data:\n if menu.data[mol]['poids'] > maxi:\n maxi = menu.data[mol]['poids']\n max_list = [mol]\n elif menu.data[mol]['poids'] == maxi:\n max_list.append(mol)\n for mol in max_list:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol][\n 'poids'])).pack()\n self.gui.mainloop()\n\n\nclass GUIMean:\n\n def __init__(self, menu: Menu):\n self.gui = Toplevel(menu.main)\n self.gui.title('Poids moyen')\n self.gui.minsize(210, 100)\n menu.calc_mean()\n Label(self.gui, text='Poids moyen des molécules').pack()\n Label(self.gui, text=menu.mean).pack()\n self.gui.mainloop()\n\n\nclass GUIGtm:\n\n def __init__(self, menu: Menu):\n menu.calc_mean()\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids supérieur à la moyenne')\n self.gui.minsize(210, 100)\n for mol in menu.data.keys():\n if menu.data[mol]['poids'] >= menu.mean:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol]\n ['poids'])).pack()\n self.gui.mainloop()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Menu:\n\n def __init__(self):\n self.data = dict()\n self.main = Tk()\n self.main.title('Molécules')\n self.main.config(bg='black')\n self.main.minsize(210, 220)\n self.mean = float\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Ajouter molécule', command=self.add_molecule).grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids maximum', command=self.get_max).grid()\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids moyen', command=self.get_mean).grid(pady=10)\n Button(self.main, bg='black', fg='white', text=\n 'Molécules au poids supérieur\\nà la moyenne', command=self.\n greater_than_mean).grid(padx=10)\n self.io = Frame(self.main, bg='black')\n Button(self.io, bg='black', fg='white', text='Importer', command=\n self.import_data).grid(row=1, column=1, padx=5)\n Button(self.io, bg='black', fg='white', text='Exporter', command=\n self.export_data).grid(row=1, column=2, padx=5)\n self.io.grid(pady=10)\n self.dessein = Canvas(self.main, width=500, height=500)\n self.y = 45\n self.y2 = 50\n self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155,\n fill='deeppink2', outline='')\n self.right = self.dessein.create_oval(225, self.y, 300, self.y + \n 155, fill='deeppink2', outline='')\n self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2,\n 300, 400, 200, 400, fill='salmon1')\n self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.\n y2, 300, 400, 275, 400, fill='salmon2')\n self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.\n y2, 300, 400, 290, 400, fill='salmon3')\n self.giggle = True\n self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill=\n 'salmon1', outline='')\n self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill\n ='salmon1', outline='')\n self.main.bind('<Down>', self.grow_penis)\n self.dessein.grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Enlarge your penis !!!', command=self.grow_penis).grid()\n self.main.mainloop()\n\n def grow_penis(self, event=None):\n if self.y >= 0:\n self.y -= 2\n if self.y2 <= 75:\n self.y2 += 1\n self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)\n self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)\n self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, \n 400, 200, 400)\n self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, \n 400, 275, 400)\n self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, \n 400, 290, 400)\n if self.giggle:\n self.giggle = False\n self.dessein.coords(self.ball_left, 275, 350, 100, 450)\n self.dessein.coords(self.ball_right, 225, 350, 400, 450)\n else:\n self.giggle = True\n self.dessein.coords(self.ball_left, 275, 345, 100, 445)\n self.dessein.coords(self.ball_right, 225, 345, 400, 445)\n\n def add_molecule(self):\n GUIAdd(self)\n\n def get_max(self):\n GUIMax(self)\n\n def get_mean(self):\n GUIMean(self)\n\n def greater_than_mean(self):\n GUIGtm(self)\n\n def calc_mean(self):\n self.mean = sum([x['poids'] for x in self.data.values()]) / len(self\n .data.values())\n <mask token>\n\n def export_data(self):\n if len(self.data) > 0:\n with open('mols.txt', 'w') as output:\n valeurs = self.data.values()\n liste_weight = [x['poids'] for x in valeurs]\n liste_adn = [x['ADN'] for x in valeurs]\n output.write(' '.join(self.data.keys()) + '\\n')\n output.write(' '.join([str(x) for x in liste_weight]) + '\\n')\n output.write(' '.join(liste_adn))\n\n\nclass GUIAdd:\n\n def __init__(self, menu: Menu):\n self.root = menu\n self.gui = Toplevel(menu.main)\n self.gui.title('Ajout de molécule')\n self.gui.minsize(210, 100)\n Label(self.gui, text='Nom de la molécule').pack()\n self.mole_nom = Entry(self.gui)\n self.mole_nom.pack()\n Label(self.gui, text='Poids de la molécule').pack()\n self.mole_poids = Entry(self.gui)\n self.mole_poids.pack()\n Label(self.gui, text='ADN de la molécule').pack()\n self.mole_adn = Entry(self.gui)\n self.mole_adn.pack()\n Button(self.gui, text='Ajouter', command=self.close_gui).pack()\n self.error = Label(self.gui, text='')\n self.error.pack()\n self.gui.mainloop()\n\n def close_gui(self):\n try:\n if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()\n ) > 0 and len(self.mole_adn.get()) > 0:\n if self.mole_nom.get() not in self.root.data.keys():\n if not re.search('[^ACGT]', self.mole_adn.get()):\n self.root.data[self.mole_nom.get()] = {'poids':\n float(self.mole_poids.get()), 'ADN': self.\n mole_adn.get()}\n else:\n self.error['text'] = \"Séquence d'ADN non réglementaire\"\n return\n else:\n self.error['text'\n ] = 'Molecule déjà existante dans les données'\n return\n else:\n self.error['text'] = 'Tous les champs ne sont pas remplis'\n return\n except ValueError:\n self.error['text'] = 'Poids doit être un float ou un int'\n return\n self.gui.destroy()\n\n\nclass GUIMax:\n\n def __init__(self, menu: Menu):\n maxi = 0\n max_list = []\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids maximal')\n self.gui.minsize(210, 100)\n for mol in menu.data:\n if menu.data[mol]['poids'] > maxi:\n maxi = menu.data[mol]['poids']\n max_list = [mol]\n elif menu.data[mol]['poids'] == maxi:\n max_list.append(mol)\n for mol in max_list:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol][\n 'poids'])).pack()\n self.gui.mainloop()\n\n\nclass GUIMean:\n\n def __init__(self, menu: Menu):\n self.gui = Toplevel(menu.main)\n self.gui.title('Poids moyen')\n self.gui.minsize(210, 100)\n menu.calc_mean()\n Label(self.gui, text='Poids moyen des molécules').pack()\n Label(self.gui, text=menu.mean).pack()\n self.gui.mainloop()\n\n\nclass GUIGtm:\n\n def __init__(self, menu: Menu):\n menu.calc_mean()\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids supérieur à la moyenne')\n self.gui.minsize(210, 100)\n for mol in menu.data.keys():\n if menu.data[mol]['poids'] >= menu.mean:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol]\n ['poids'])).pack()\n self.gui.mainloop()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Molecule:\n\n def __init__(self, nom, poids, adn):\n self.nom = nom\n self.poids = poids\n self.adn = adn\n\n def __repr__(self):\n return '{} : {} g'.format(self.nom, self.poids)\n\n\nclass Menu:\n\n def __init__(self):\n self.data = dict()\n self.main = Tk()\n self.main.title('Molécules')\n self.main.config(bg='black')\n self.main.minsize(210, 220)\n self.mean = float\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Ajouter molécule', command=self.add_molecule).grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids maximum', command=self.get_max).grid()\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids moyen', command=self.get_mean).grid(pady=10)\n Button(self.main, bg='black', fg='white', text=\n 'Molécules au poids supérieur\\nà la moyenne', command=self.\n greater_than_mean).grid(padx=10)\n self.io = Frame(self.main, bg='black')\n Button(self.io, bg='black', fg='white', text='Importer', command=\n self.import_data).grid(row=1, column=1, padx=5)\n Button(self.io, bg='black', fg='white', text='Exporter', command=\n self.export_data).grid(row=1, column=2, padx=5)\n self.io.grid(pady=10)\n self.dessein = Canvas(self.main, width=500, height=500)\n self.y = 45\n self.y2 = 50\n self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155,\n fill='deeppink2', outline='')\n self.right = self.dessein.create_oval(225, self.y, 300, self.y + \n 155, fill='deeppink2', outline='')\n self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2,\n 300, 400, 200, 400, fill='salmon1')\n self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.\n y2, 300, 400, 275, 400, fill='salmon2')\n self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.\n y2, 300, 400, 290, 400, fill='salmon3')\n self.giggle = True\n self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill=\n 'salmon1', outline='')\n self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill\n ='salmon1', outline='')\n self.main.bind('<Down>', self.grow_penis)\n self.dessein.grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Enlarge your penis !!!', command=self.grow_penis).grid()\n self.main.mainloop()\n\n def grow_penis(self, event=None):\n if self.y >= 0:\n self.y -= 2\n if self.y2 <= 75:\n self.y2 += 1\n self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)\n self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)\n self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, \n 400, 200, 400)\n self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, \n 400, 275, 400)\n self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, \n 400, 290, 400)\n if self.giggle:\n self.giggle = False\n self.dessein.coords(self.ball_left, 275, 350, 100, 450)\n self.dessein.coords(self.ball_right, 225, 350, 400, 450)\n else:\n self.giggle = True\n self.dessein.coords(self.ball_left, 275, 345, 100, 445)\n self.dessein.coords(self.ball_right, 225, 345, 400, 445)\n\n def add_molecule(self):\n GUIAdd(self)\n\n def get_max(self):\n GUIMax(self)\n\n def get_mean(self):\n GUIMean(self)\n\n def greater_than_mean(self):\n GUIGtm(self)\n\n def calc_mean(self):\n self.mean = sum([x['poids'] for x in self.data.values()]) / len(self\n .data.values())\n\n def import_data(self):\n with open('mols.txt', 'r') as input_file:\n input_txt = input_file.readlines()\n liste_name = input_txt[0].split()\n liste_weight = [float(x) for x in input_txt[1].split()]\n liste_adn = input_txt[2].split()\n for i in range(len(liste_name)):\n self.data[liste_name[i]] = {'poids': liste_weight[i], 'ADN':\n liste_adn[i]}\n\n def export_data(self):\n if len(self.data) > 0:\n with open('mols.txt', 'w') as output:\n valeurs = self.data.values()\n liste_weight = [x['poids'] for x in valeurs]\n liste_adn = [x['ADN'] for x in valeurs]\n output.write(' '.join(self.data.keys()) + '\\n')\n output.write(' '.join([str(x) for x in liste_weight]) + '\\n')\n output.write(' '.join(liste_adn))\n\n\nclass GUIAdd:\n\n def __init__(self, menu: Menu):\n self.root = menu\n self.gui = Toplevel(menu.main)\n self.gui.title('Ajout de molécule')\n self.gui.minsize(210, 100)\n Label(self.gui, text='Nom de la molécule').pack()\n self.mole_nom = Entry(self.gui)\n self.mole_nom.pack()\n Label(self.gui, text='Poids de la molécule').pack()\n self.mole_poids = Entry(self.gui)\n self.mole_poids.pack()\n Label(self.gui, text='ADN de la molécule').pack()\n self.mole_adn = Entry(self.gui)\n self.mole_adn.pack()\n Button(self.gui, text='Ajouter', command=self.close_gui).pack()\n self.error = Label(self.gui, text='')\n self.error.pack()\n self.gui.mainloop()\n\n def close_gui(self):\n try:\n if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()\n ) > 0 and len(self.mole_adn.get()) > 0:\n if self.mole_nom.get() not in self.root.data.keys():\n if not re.search('[^ACGT]', self.mole_adn.get()):\n self.root.data[self.mole_nom.get()] = {'poids':\n float(self.mole_poids.get()), 'ADN': self.\n mole_adn.get()}\n else:\n self.error['text'] = \"Séquence d'ADN non réglementaire\"\n return\n else:\n self.error['text'\n ] = 'Molecule déjà existante dans les données'\n return\n else:\n self.error['text'] = 'Tous les champs ne sont pas remplis'\n return\n except ValueError:\n self.error['text'] = 'Poids doit être un float ou un int'\n return\n self.gui.destroy()\n\n\nclass GUIMax:\n\n def __init__(self, menu: Menu):\n maxi = 0\n max_list = []\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids maximal')\n self.gui.minsize(210, 100)\n for mol in menu.data:\n if menu.data[mol]['poids'] > maxi:\n maxi = menu.data[mol]['poids']\n max_list = [mol]\n elif menu.data[mol]['poids'] == maxi:\n max_list.append(mol)\n for mol in max_list:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol][\n 'poids'])).pack()\n self.gui.mainloop()\n\n\nclass GUIMean:\n\n def __init__(self, menu: Menu):\n self.gui = Toplevel(menu.main)\n self.gui.title('Poids moyen')\n self.gui.minsize(210, 100)\n menu.calc_mean()\n Label(self.gui, text='Poids moyen des molécules').pack()\n Label(self.gui, text=menu.mean).pack()\n self.gui.mainloop()\n\n\nclass GUIGtm:\n\n def __init__(self, menu: Menu):\n menu.calc_mean()\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids supérieur à la moyenne')\n self.gui.minsize(210, 100)\n for mol in menu.data.keys():\n if menu.data[mol]['poids'] >= menu.mean:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol]\n ['poids'])).pack()\n self.gui.mainloop()\n\n\ndef pascal(n: int):\n prec = [1]\n for i in range(1, n + 2):\n print(' '.join([str(x) for x in prec]))\n new = []\n for j in range(i + 1):\n if j == 0 or j == i:\n new.append(1)\n else:\n new.append(prec[j] + prec[j - 1])\n prec = new\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Molecule:\n\n def __init__(self, nom, poids, adn):\n self.nom = nom\n self.poids = poids\n self.adn = adn\n\n def __repr__(self):\n return '{} : {} g'.format(self.nom, self.poids)\n\n\nclass Menu:\n\n def __init__(self):\n self.data = dict()\n self.main = Tk()\n self.main.title('Molécules')\n self.main.config(bg='black')\n self.main.minsize(210, 220)\n self.mean = float\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Ajouter molécule', command=self.add_molecule).grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids maximum', command=self.get_max).grid()\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Poids moyen', command=self.get_mean).grid(pady=10)\n Button(self.main, bg='black', fg='white', text=\n 'Molécules au poids supérieur\\nà la moyenne', command=self.\n greater_than_mean).grid(padx=10)\n self.io = Frame(self.main, bg='black')\n Button(self.io, bg='black', fg='white', text='Importer', command=\n self.import_data).grid(row=1, column=1, padx=5)\n Button(self.io, bg='black', fg='white', text='Exporter', command=\n self.export_data).grid(row=1, column=2, padx=5)\n self.io.grid(pady=10)\n self.dessein = Canvas(self.main, width=500, height=500)\n self.y = 45\n self.y2 = 50\n self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155,\n fill='deeppink2', outline='')\n self.right = self.dessein.create_oval(225, self.y, 300, self.y + \n 155, fill='deeppink2', outline='')\n self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2,\n 300, 400, 200, 400, fill='salmon1')\n self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.\n y2, 300, 400, 275, 400, fill='salmon2')\n self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.\n y2, 300, 400, 290, 400, fill='salmon3')\n self.giggle = True\n self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill=\n 'salmon1', outline='')\n self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill\n ='salmon1', outline='')\n self.main.bind('<Down>', self.grow_penis)\n self.dessein.grid(pady=10)\n Button(self.main, width=14, bg='black', fg='white', text=\n 'Enlarge your penis !!!', command=self.grow_penis).grid()\n self.main.mainloop()\n\n def grow_penis(self, event=None):\n if self.y >= 0:\n self.y -= 2\n if self.y2 <= 75:\n self.y2 += 1\n self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)\n self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)\n self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, \n 400, 200, 400)\n self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, \n 400, 275, 400)\n self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, \n 400, 290, 400)\n if self.giggle:\n self.giggle = False\n self.dessein.coords(self.ball_left, 275, 350, 100, 450)\n self.dessein.coords(self.ball_right, 225, 350, 400, 450)\n else:\n self.giggle = True\n self.dessein.coords(self.ball_left, 275, 345, 100, 445)\n self.dessein.coords(self.ball_right, 225, 345, 400, 445)\n\n def add_molecule(self):\n GUIAdd(self)\n\n def get_max(self):\n GUIMax(self)\n\n def get_mean(self):\n GUIMean(self)\n\n def greater_than_mean(self):\n GUIGtm(self)\n\n def calc_mean(self):\n self.mean = sum([x['poids'] for x in self.data.values()]) / len(self\n .data.values())\n\n def import_data(self):\n with open('mols.txt', 'r') as input_file:\n input_txt = input_file.readlines()\n liste_name = input_txt[0].split()\n liste_weight = [float(x) for x in input_txt[1].split()]\n liste_adn = input_txt[2].split()\n for i in range(len(liste_name)):\n self.data[liste_name[i]] = {'poids': liste_weight[i], 'ADN':\n liste_adn[i]}\n\n def export_data(self):\n if len(self.data) > 0:\n with open('mols.txt', 'w') as output:\n valeurs = self.data.values()\n liste_weight = [x['poids'] for x in valeurs]\n liste_adn = [x['ADN'] for x in valeurs]\n output.write(' '.join(self.data.keys()) + '\\n')\n output.write(' '.join([str(x) for x in liste_weight]) + '\\n')\n output.write(' '.join(liste_adn))\n\n\nclass GUIAdd:\n\n def __init__(self, menu: Menu):\n self.root = menu\n self.gui = Toplevel(menu.main)\n self.gui.title('Ajout de molécule')\n self.gui.minsize(210, 100)\n Label(self.gui, text='Nom de la molécule').pack()\n self.mole_nom = Entry(self.gui)\n self.mole_nom.pack()\n Label(self.gui, text='Poids de la molécule').pack()\n self.mole_poids = Entry(self.gui)\n self.mole_poids.pack()\n Label(self.gui, text='ADN de la molécule').pack()\n self.mole_adn = Entry(self.gui)\n self.mole_adn.pack()\n Button(self.gui, text='Ajouter', command=self.close_gui).pack()\n self.error = Label(self.gui, text='')\n self.error.pack()\n self.gui.mainloop()\n\n def close_gui(self):\n try:\n if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()\n ) > 0 and len(self.mole_adn.get()) > 0:\n if self.mole_nom.get() not in self.root.data.keys():\n if not re.search('[^ACGT]', self.mole_adn.get()):\n self.root.data[self.mole_nom.get()] = {'poids':\n float(self.mole_poids.get()), 'ADN': self.\n mole_adn.get()}\n else:\n self.error['text'] = \"Séquence d'ADN non réglementaire\"\n return\n else:\n self.error['text'\n ] = 'Molecule déjà existante dans les données'\n return\n else:\n self.error['text'] = 'Tous les champs ne sont pas remplis'\n return\n except ValueError:\n self.error['text'] = 'Poids doit être un float ou un int'\n return\n self.gui.destroy()\n\n\nclass GUIMax:\n\n def __init__(self, menu: Menu):\n maxi = 0\n max_list = []\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids maximal')\n self.gui.minsize(210, 100)\n for mol in menu.data:\n if menu.data[mol]['poids'] > maxi:\n maxi = menu.data[mol]['poids']\n max_list = [mol]\n elif menu.data[mol]['poids'] == maxi:\n max_list.append(mol)\n for mol in max_list:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol][\n 'poids'])).pack()\n self.gui.mainloop()\n\n\nclass GUIMean:\n\n def __init__(self, menu: Menu):\n self.gui = Toplevel(menu.main)\n self.gui.title('Poids moyen')\n self.gui.minsize(210, 100)\n menu.calc_mean()\n Label(self.gui, text='Poids moyen des molécules').pack()\n Label(self.gui, text=menu.mean).pack()\n self.gui.mainloop()\n\n\nclass GUIGtm:\n\n def __init__(self, menu: Menu):\n menu.calc_mean()\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids supérieur à la moyenne')\n self.gui.minsize(210, 100)\n for mol in menu.data.keys():\n if menu.data[mol]['poids'] >= menu.mean:\n Label(self.gui, text='{} : {} g'.format(mol, menu.data[mol]\n ['poids'])).pack()\n self.gui.mainloop()\n\n\ndef pascal(n: int):\n prec = [1]\n for i in range(1, n + 2):\n print(' '.join([str(x) for x in prec]))\n new = []\n for j in range(i + 1):\n if j == 0 or j == i:\n new.append(1)\n else:\n new.append(prec[j] + prec[j - 1])\n prec = new\n\n\nMenu()\n",
"step-5": "from tkinter import *\nimport re\n\n\nclass Molecule:\n def __init__(self, nom, poids, adn):\n self.nom = nom\n self.poids = poids\n self.adn = adn\n\n def __repr__(self):\n return \"{} : {} g\".format(self.nom, self.poids)\n\n\nclass Menu:\n def __init__(self):\n self.data = dict()\n self.main = Tk()\n self.main.title(\"Molécules\")\n self.main.config(bg=\"black\")\n self.main.minsize(210, 220)\n self.mean = float\n Button(self.main, width=14, bg=\"black\", fg=\"white\", text='Ajouter molécule', command=self.add_molecule).grid(\n pady=10)\n Button(self.main, width=14, bg=\"black\", fg=\"white\", text='Poids maximum', command=self.get_max).grid()\n Button(self.main, width=14, bg=\"black\", fg=\"white\", text='Poids moyen', command=self.get_mean).grid(pady=10)\n Button(self.main, bg=\"black\", fg=\"white\", text='Molécules au poids supérieur\\nà la moyenne',\n command=self.greater_than_mean).grid(padx=10)\n\n self.io = Frame(self.main, bg=\"black\")\n Button(self.io, bg=\"black\", fg=\"white\", text='Importer', command=self.import_data).grid(row=1, column=1, padx=5)\n Button(self.io, bg=\"black\", fg=\"white\", text='Exporter', command=self.export_data).grid(row=1, column=2, padx=5)\n self.io.grid(pady=10)\n self.dessein = Canvas(self.main, width=500, height=500)\n self.y = 45\n self.y2 = 50\n\n self.left = self.dessein.create_oval(275, self.y, 200, self.y + 155, fill=\"deeppink2\", outline=\"\")\n self.right = self.dessein.create_oval(225, self.y, 300, self.y + 155, fill=\"deeppink2\", outline=\"\")\n self.corps = self.dessein.create_polygon(200, self.y2, 300, self.y2, 300, 400, 200, 400, fill=\"salmon1\")\n self.shadow1 = self.dessein.create_polygon(275, self.y2, 300, self.y2, 300, 400, 275, 400, fill=\"salmon2\")\n self.shadow2 = self.dessein.create_polygon(290, self.y2, 300, self.y2, 300, 400, 290, 400, fill=\"salmon3\")\n self.giggle = True\n self.ball_left = self.dessein.create_oval(275, 345, 100, 445, fill=\"salmon1\", outline=\"\")\n self.ball_right = self.dessein.create_oval(225, 345, 400, 445, fill=\"salmon1\", outline=\"\")\n self.main.bind(\"<Down>\", self.grow_penis)\n self.dessein.grid(pady=10)\n Button(self.main, width=14, bg=\"black\", fg=\"white\", text='Enlarge your penis !!!',\n command=self.grow_penis).grid()\n self.main.mainloop()\n\n def grow_penis(self, event=None):\n if self.y >= 0:\n self.y -= 2\n if self.y2 <= 75:\n self.y2 += 1\n self.dessein.coords(self.left, 275, self.y, 200, self.y + 155)\n self.dessein.coords(self.right, 225, self.y, 300, self.y + 155)\n self.dessein.coords(self.corps, 200, self.y2, 300, self.y2, 300, 400, 200, 400)\n self.dessein.coords(self.shadow1, 275, self.y2, 300, self.y2, 300, 400, 275, 400)\n self.dessein.coords(self.shadow2, 290, self.y2, 300, self.y2, 300, 400, 290, 400)\n if self.giggle:\n self.giggle = False\n self.dessein.coords(self.ball_left, 275, 350, 100, 450)\n self.dessein.coords(self.ball_right, 225, 350, 400, 450)\n else:\n self.giggle = True\n self.dessein.coords(self.ball_left, 275, 345, 100, 445)\n self.dessein.coords(self.ball_right, 225, 345, 400, 445)\n\n def add_molecule(self):\n GUIAdd(self)\n\n def get_max(self):\n GUIMax(self)\n\n def get_mean(self):\n GUIMean(self)\n\n def greater_than_mean(self):\n GUIGtm(self)\n\n def calc_mean(self):\n self.mean = sum([x['poids'] for x in self.data.values()]) / len(self.data.values())\n\n def import_data(self):\n with open('mols.txt', 'r') as input_file:\n input_txt = input_file.readlines()\n liste_name = input_txt[0].split()\n liste_weight = [float(x) for x in input_txt[1].split()]\n liste_adn = input_txt[2].split()\n for i in range(len(liste_name)):\n self.data[liste_name[i]] = {'poids': liste_weight[i], 'ADN': liste_adn[i]}\n\n def export_data(self):\n if len(self.data) > 0:\n with open('mols.txt', 'w') as output:\n valeurs = self.data.values()\n liste_weight = [x['poids'] for x in valeurs]\n liste_adn = [x['ADN'] for x in valeurs]\n\n output.write(' '.join(self.data.keys()) + '\\n')\n output.write(' '.join([str(x) for x in liste_weight]) + '\\n')\n output.write(' '.join(liste_adn))\n\n\nclass GUIAdd:\n def __init__(self, menu: Menu):\n self.root = menu\n self.gui = Toplevel(menu.main)\n self.gui.title('Ajout de molécule')\n self.gui.minsize(210, 100)\n\n Label(self.gui, text='Nom de la molécule').pack()\n self.mole_nom = Entry(self.gui)\n self.mole_nom.pack()\n\n Label(self.gui, text='Poids de la molécule').pack()\n self.mole_poids = Entry(self.gui)\n self.mole_poids.pack()\n\n Label(self.gui, text='ADN de la molécule').pack()\n self.mole_adn = Entry(self.gui)\n self.mole_adn.pack()\n\n Button(self.gui, text='Ajouter', command=self.close_gui).pack()\n self.error = Label(self.gui, text=\"\")\n self.error.pack()\n\n self.gui.mainloop()\n\n def close_gui(self):\n try:\n if len(self.mole_nom.get()) > 0 and len(self.mole_poids.get()) > 0 and len(self.mole_adn.get()) > 0:\n if self.mole_nom.get() not in self.root.data.keys():\n if not re.search(r'[^ACGT]', self.mole_adn.get()):\n self.root.data[self.mole_nom.get()] = {'poids': float(self.mole_poids.get()),\n 'ADN': self.mole_adn.get()}\n else:\n self.error['text'] = \"Séquence d'ADN non réglementaire\"\n return\n else:\n self.error['text'] = \"Molecule déjà existante dans les données\"\n return\n else:\n self.error['text'] = \"Tous les champs ne sont pas remplis\"\n return\n except ValueError:\n self.error['text'] = \"Poids doit être un float ou un int\"\n return\n self.gui.destroy()\n\n\nclass GUIMax:\n def __init__(self, menu: Menu):\n maxi = 0\n max_list = []\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids maximal')\n self.gui.minsize(210, 100)\n for mol in menu.data:\n if menu.data[mol]['poids'] > maxi:\n maxi = menu.data[mol]['poids']\n max_list = [mol]\n elif menu.data[mol]['poids'] == maxi:\n max_list.append(mol)\n for mol in max_list:\n Label(self.gui, text=\"{} : {} g\".format(mol, menu.data[mol][\"poids\"])).pack()\n self.gui.mainloop()\n\n\nclass GUIMean:\n def __init__(self, menu: Menu):\n self.gui = Toplevel(menu.main)\n self.gui.title('Poids moyen')\n self.gui.minsize(210, 100)\n menu.calc_mean()\n Label(self.gui, text=\"Poids moyen des molécules\").pack()\n Label(self.gui, text=menu.mean).pack()\n\n self.gui.mainloop()\n\n\nclass GUIGtm:\n def __init__(self, menu: Menu):\n menu.calc_mean()\n self.gui = Toplevel(menu.main)\n self.gui.title('Molécule au poids supérieur à la moyenne')\n self.gui.minsize(210, 100)\n\n for mol in menu.data.keys():\n if menu.data[mol]['poids'] >= menu.mean:\n Label(self.gui, text=\"{} : {} g\".format(mol, menu.data[mol][\"poids\"])).pack()\n\n self.gui.mainloop()\n\n\ndef pascal(n: int):\n prec = [1]\n for i in range(1, n + 2):\n print(' '.join([str(x) for x in prec]))\n new = []\n for j in range(i + 1):\n if j == 0 or j == i:\n new.append(1)\n else:\n new.append(prec[j] + prec[j - 1])\n prec = new\n\n\nMenu()\n# pascal(50)\n",
"step-ids": [
17,
18,
23,
24,
26
]
}
|
[
17,
18,
23,
24,
26
] |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 19 13:42:09 2019
@author: Administrator
"""
from config.path_config import *
import GV
def ReadTxtName(rootdir):
#读取文件中的每一行,转为list
lines = []
with open(rootdir, 'r') as file_to_read:
while True:
line = file_to_read.readline()
if not line:
break
line = line.strip('\n')
lines.append(line)
return lines
def project_query_lz_main(question):
#找语句中是否匹配到了项目名称
txt_line = ReadTxtName(PROJECT_NAMES)
for project_name in txt_line:
if project_name in question:
#print('我们觉得您是想查' + project_name + '项目的信息')
GV.SHOW = True
return ('我们觉得您是想查' + project_name +
'项目的信息,但是我们还没有记录项目详细信息')
GV.FLAG = 3
GV.SHOW = False
#state = False
#print('与项目无关,此处跳出,接其他模块')
return question
#project_query_lz_main('工银天梭项目进度怎么样了',2)
|
normal
|
{
"blob_id": "92bbccfbfebf905965c9cb0f1a85ffaa7d0cf6b5",
"index": 3796,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef project_query_lz_main(question):\n txt_line = ReadTxtName(PROJECT_NAMES)\n for project_name in txt_line:\n if project_name in question:\n GV.SHOW = True\n return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'\n GV.FLAG = 3\n GV.SHOW = False\n return question\n",
"step-3": "<mask token>\n\n\ndef ReadTxtName(rootdir):\n lines = []\n with open(rootdir, 'r') as file_to_read:\n while True:\n line = file_to_read.readline()\n if not line:\n break\n line = line.strip('\\n')\n lines.append(line)\n return lines\n\n\ndef project_query_lz_main(question):\n txt_line = ReadTxtName(PROJECT_NAMES)\n for project_name in txt_line:\n if project_name in question:\n GV.SHOW = True\n return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'\n GV.FLAG = 3\n GV.SHOW = False\n return question\n",
"step-4": "<mask token>\nfrom config.path_config import *\nimport GV\n\n\ndef ReadTxtName(rootdir):\n lines = []\n with open(rootdir, 'r') as file_to_read:\n while True:\n line = file_to_read.readline()\n if not line:\n break\n line = line.strip('\\n')\n lines.append(line)\n return lines\n\n\ndef project_query_lz_main(question):\n txt_line = ReadTxtName(PROJECT_NAMES)\n for project_name in txt_line:\n if project_name in question:\n GV.SHOW = True\n return '我们觉得您是想查' + project_name + '项目的信息,但是我们还没有记录项目详细信息'\n GV.FLAG = 3\n GV.SHOW = False\n return question\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 19 13:42:09 2019\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\nfrom config.path_config import *\r\nimport GV\r\n \r\ndef ReadTxtName(rootdir):\r\n #读取文件中的每一行,转为list\r\n lines = []\r\n with open(rootdir, 'r') as file_to_read:\r\n while True:\r\n line = file_to_read.readline()\r\n if not line:\r\n break\r\n line = line.strip('\\n')\r\n lines.append(line)\r\n return lines\r\n\r\ndef project_query_lz_main(question):\r\n #找语句中是否匹配到了项目名称\r\n txt_line = ReadTxtName(PROJECT_NAMES) \r\n for project_name in txt_line:\r\n if project_name in question:\r\n #print('我们觉得您是想查' + project_name + '项目的信息')\r\n GV.SHOW = True\r\n return ('我们觉得您是想查' + project_name + \r\n '项目的信息,但是我们还没有记录项目详细信息')\r\n GV.FLAG = 3\r\n GV.SHOW = False\r\n #state = False\r\n #print('与项目无关,此处跳出,接其他模块')\r\n return question\r\n\r\n#project_query_lz_main('工银天梭项目进度怎么样了',2)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
'''
Created on Mar 27, 2019
@author: Iulia
'''
from Graph import Graph
from Controller import *
from Iterators.Vertices import *
from File import File
from Iterators.EdgesIterator import EdgesIterator
def test():
tup = File.readInput("file.txt")
graph = tup[0]
edgeData = tup[1]
ctrl = Controller(graph, edgeData)
vertices = ctrl.nrVertices()
itv = verticesIterator(vertices)
assert(itv.valid())
cont = 0
while (itv.valid()):
cont += 1
e = itv.getCurrent()
itv.next()
e = itv.getCurrent()
assert(ctrl.existsVertex(e))
assert(cont == ctrl.nrVertices())
itv.first()
assert(itv.valid())
################
ite = EdgesIterator(graph.getInbound(3))
assert(ite.valid())
assert(ite.getCurrent() == 1)
ite.next()
assert(ite.valid())
assert(ite.getCurrent() == 2)
test()
|
normal
|
{
"blob_id": "b01ff71792895bb8839e09ae8c4a449405349990",
"index": 7066,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test():\n tup = File.readInput('file.txt')\n graph = tup[0]\n edgeData = tup[1]\n ctrl = Controller(graph, edgeData)\n vertices = ctrl.nrVertices()\n itv = verticesIterator(vertices)\n assert itv.valid()\n cont = 0\n while itv.valid():\n cont += 1\n e = itv.getCurrent()\n itv.next()\n e = itv.getCurrent()\n assert ctrl.existsVertex(e)\n assert cont == ctrl.nrVertices()\n itv.first()\n assert itv.valid()\n ite = EdgesIterator(graph.getInbound(3))\n assert ite.valid()\n assert ite.getCurrent() == 1\n ite.next()\n assert ite.valid()\n assert ite.getCurrent() == 2\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef test():\n tup = File.readInput('file.txt')\n graph = tup[0]\n edgeData = tup[1]\n ctrl = Controller(graph, edgeData)\n vertices = ctrl.nrVertices()\n itv = verticesIterator(vertices)\n assert itv.valid()\n cont = 0\n while itv.valid():\n cont += 1\n e = itv.getCurrent()\n itv.next()\n e = itv.getCurrent()\n assert ctrl.existsVertex(e)\n assert cont == ctrl.nrVertices()\n itv.first()\n assert itv.valid()\n ite = EdgesIterator(graph.getInbound(3))\n assert ite.valid()\n assert ite.getCurrent() == 1\n ite.next()\n assert ite.valid()\n assert ite.getCurrent() == 2\n\n\ntest()\n",
"step-4": "<mask token>\nfrom Graph import Graph\nfrom Controller import *\nfrom Iterators.Vertices import *\nfrom File import File\nfrom Iterators.EdgesIterator import EdgesIterator\n\n\ndef test():\n tup = File.readInput('file.txt')\n graph = tup[0]\n edgeData = tup[1]\n ctrl = Controller(graph, edgeData)\n vertices = ctrl.nrVertices()\n itv = verticesIterator(vertices)\n assert itv.valid()\n cont = 0\n while itv.valid():\n cont += 1\n e = itv.getCurrent()\n itv.next()\n e = itv.getCurrent()\n assert ctrl.existsVertex(e)\n assert cont == ctrl.nrVertices()\n itv.first()\n assert itv.valid()\n ite = EdgesIterator(graph.getInbound(3))\n assert ite.valid()\n assert ite.getCurrent() == 1\n ite.next()\n assert ite.valid()\n assert ite.getCurrent() == 2\n\n\ntest()\n",
"step-5": "'''\nCreated on Mar 27, 2019\n\n@author: Iulia\n'''\n\nfrom Graph import Graph\nfrom Controller import *\nfrom Iterators.Vertices import *\nfrom File import File\nfrom Iterators.EdgesIterator import EdgesIterator\n\ndef test():\n tup = File.readInput(\"file.txt\")\n graph = tup[0]\n edgeData = tup[1]\n ctrl = Controller(graph, edgeData)\n \n vertices = ctrl.nrVertices()\n \n itv = verticesIterator(vertices)\n assert(itv.valid())\n \n cont = 0\n while (itv.valid()):\n cont += 1\n e = itv.getCurrent()\n itv.next() \n e = itv.getCurrent()\n assert(ctrl.existsVertex(e))\n assert(cont == ctrl.nrVertices())\n itv.first()\n assert(itv.valid())\n \n ################\n ite = EdgesIterator(graph.getInbound(3))\n assert(ite.valid())\n assert(ite.getCurrent() == 1)\n ite.next()\n assert(ite.valid())\n assert(ite.getCurrent() == 2)\n \ntest()\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#calss header
class _WATERWAYS():
def __init__(self,):
self.name = "WATERWAYS"
self.definitions = waterway
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['waterway']
|
normal
|
{
"blob_id": "33daf5753b27f6b4bcb7c98e28cf2168e7f0b403",
"index": 9541,
"step-1": "<mask token>\n",
"step-2": "class _WATERWAYS:\n <mask token>\n",
"step-3": "class _WATERWAYS:\n\n def __init__(self):\n self.name = 'WATERWAYS'\n self.definitions = waterway\n self.parents = []\n self.childen = []\n self.properties = []\n self.jsondata = {}\n self.basic = ['waterway']\n",
"step-4": "\n\n#calss header\nclass _WATERWAYS():\n\tdef __init__(self,): \n\t\tself.name = \"WATERWAYS\"\n\t\tself.definitions = waterway\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.basic = ['waterway']\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
indelCost = 1
swapCost = 13
subCost = 12
noOp = 0
def alignStrings(x,y):
nx = len(x)
ny = len(y)
S = matrix(nx+1, ny+1) #??
for i in range (nx+1)
for j in range (ny+1)
if i == 0: #if the string is empty
S[i][j] = j #this will put all the letters from j in i
elif j == 0: #if the second string is empy
S[i][j] = i #this will putj all the letter from i in j
elif
|
normal
|
{
"blob_id": "65aa85675393efa1a0d8e5bab4b1dbf388018c58",
"index": 261,
"step-1": "\nindelCost = 1\nswapCost = 13\nsubCost = 12\nnoOp = 0\n\t\ndef alignStrings(x,y):\n\t\n\tnx = len(x)\n\tny = len(y)\n\tS = matrix(nx+1, ny+1) #?? \n\t\n\tfor i in range (nx+1)\n\t\tfor j in range (ny+1)\n\t\t\tif i == 0:\t#if the string is empty\n\t\t\t\tS[i][j] = j\t\t\t#this will put all the letters from j in i\n\t\t\telif j == 0:\t\t#if the second string is empy\n\t\t\t\tS[i][j] = i\t\t#this will putj all the letter from i in j\n\t\t\telif \n\t\t\t\t\n\t\t\t\n\t\n\t\n\t\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 17:34:32 2019
@author: fanlizhou
Analyze codon usage of sequence from 'SP_gene_seq.txt' and 'LP_gene_seq.txt'
Plot heatmap of amino acid usage and codon usage
Plot codon usage in each gene for each amino acid. Genes were arranged so that
the gene expression of SP decrease from 0 to 50 (x-axis) and the gene expression
of LP increase from 51 to 100 (x-axis)
Usage: codon_usage.py [-h] [--label LABEL] sp_file lp_file
Options:
--label Define the label of out-put files. Default="top"
sp_file Path to the SP data files
lp_file Path to the LP data files
"""
import io, os, argparse, collections
from scipy import stats
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser(description=
'Analyze codon usage of SP and LP\n')
parser.add_argument('sp_file', help = 'one input SP data file\n')
parser.add_argument('lp_file', help = 'one input LP data file\n')
parser.add_argument('--label', '-l',
type = str, required = False, default = 'top',
help = 'Define the label of out-put files. Default="top"\n')
args = parser.parse_args()
for path in [args.sp_file, args.lp_file]:
if not os.path.isfile(path):
parser.error('File "%s" cannot be found.' % (path))
return args
# a Codon_Usage class to store codon usage information for each genotype
class Codon_Usage:
def __init__(self, filename):
self.seq, self.gene_num = self.get_seq(filename)
def get_seq(self, filename):
file = io.open(filename)
# list of selected gene sequences, excluded genes that are non-triple
all_seq = []
gene_seq = ''
count_all = 0
count_non_triple = 0
for line in file:
# read a gene information line
if line[0]=='>':
count_all += 1
# if a gene has been read, then append it to all_seq if the
# sequence is triple
if gene_seq!='':
if len(gene_seq)%3:
count_non_triple += 1
else:
all_seq.append(gene_seq)
gene_seq = ''
# read a gene sequence line
else:
gene_seq += line.strip()
file.close()
print('%s:\n%d genes added\n%d are non-triple\n'%
(filename[:2],count_all, count_non_triple))
return (all_seq, count_all - count_non_triple)
def get_AA(self, codon):
# dict key: codon -> AA
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',
'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',
'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',
'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',
'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',
'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',
'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',
'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',
'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',
'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',
'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',
'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',
'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',
'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',
'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',
'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}
if codon in codon_map:
return codon_map[codon]
else:
return ''
def get_usage_dict(self, seq):
# usage_dict structure:
# dict key: AA -> [
# dict key: codon ->
# [codon_count,
# codon_count/AA_count]
# AA_count
# ]
usage_dict = \
collections.defaultdict(lambda:
[
collections.defaultdict(
lambda: [0, 0]),
0
])
# save AAs usage information
for index in range(0, len(seq), 3):
codon = seq[index:index+3]
AA = self.get_AA(codon)
if AA:
# count how many times the AA appears
usage_dict[AA][1] += 1
# count how many times the codon is used
usage_dict[AA][0][codon][0] += 1
# calculate the codon usage percentage for an AA
for AA in usage_dict:
for codon in usage_dict[AA][0]:
usage_dict[AA][0][codon][1] = \
usage_dict[AA][0][codon][0]/usage_dict[AA][1]
return usage_dict
def get_AA_dict(self):
# AA_dict structure:
# 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage
# percentage of each gene
AA_dict = \
collections.defaultdict(
lambda:collections.defaultdict(list))
# dict key: AA -> codon list
AA_map = {
'Phe':['TTT', 'TTC'],
'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],
'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],
'Tyr':['TAT', 'TAC'],
'STOP':['TAA', 'TAG', 'TGA'],
'Cys':['TGT', 'TGC'],
'Trp':['TGG'],
'Pro':['CCT', 'CCC', 'CCA', 'CCG'],
'His':['CAT', 'CAC'],
'Gln':['CAA', 'CAG'],
'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],
'Ile':['ATT', 'ATC', 'ATA'],
'Met':['ATG'],
'Thr':['ACT', 'ACC', 'ACA', 'ACG'],
'Asn':['AAT', 'AAC'],
'Lys':['AAA', 'AAG'],
'Val':['GTT', 'GTC', 'GTA', 'GTG'],
'Ala':['GCT', 'GCC', 'GCA', 'GCG'],
'Asp':['GAT', 'GAC'],
'Glu':['GAA', 'GAG'],
'Gly':['GGT', 'GGC', 'GGA', 'GGG']
}
# list of codon usage for each gene
usage_dict_list = []
# get codon usage information for each gene
for seq in self.seq:
usage_dict_list.append(self.get_usage_dict(seq))
# get the list of codon usage percentage from each gene
for AA in list(AA_map.keys()):
for codon in AA_map[AA]:
# get codon usage information from each gene
for usage_dict in usage_dict_list:
# append codon usage percentage in the gene
AA_dict[AA][codon].append(
usage_dict[AA][0][codon][1])
return AA_dict
def heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):
# list of Chi-Square test results
AA_chisquare = []
# AA plotting annotation information
AA_text = []
# list of student's t-test results
codon_ttest = []
# codon plotting annotaion information
codon_text = []
i = 0
j = 0
# number of genes analyzed
count_all = 0
# number of genes that show significant results
count_sig = 0
for AA in list(sp_AA_dict.keys()):
# mean values of codon usage for each AA
sp_codon_mean = []
lp_codon_mean = []
for codon in sp_AA_dict[AA]:
# calculate ttest results
p_val = stats.ttest_ind(sp_AA_dict[AA][codon],
lp_AA_dict[AA][codon],
equal_var = False)[1]
# display eight codons in a row
if not i % 8:
codon_ttest.append([])
codon_text.append([])
i += 1
# handle NULL values
if np.isnan(p_val):
codon_ttest[-1].append(0)
codon_text[-1].append(codon + '\n NA')
# save ttest p-values and annotation information
else:
codon_ttest[-1].append(p_val)
codon_text[-1].append(codon + '\n' + str(round(p_val, 2)))
count_all += 1
if p_val < 0.5:
count_sig += 1
sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))
lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))
# get Chi-Square test results of each AA
p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),
axis = None)[1]
# display six AA in a row
if not j % 6:
AA_chisquare.append([])
AA_text.append([])
j += 1
# handle Null values
if np.isnan(p_val):
AA_chisquare[-1].append(0)
AA_text[-1].append(AA + '\n NA')
# save Chi-Square test p-values and annotation information
else:
AA_chisquare[-1].append(p_val)
AA_text[-1].append(AA + '\n' + str(round(p_val, 2)))
# handle empty cells
for n in range(j % 6, 6):
AA_chisquare[-1].append(0)
AA_text[-1].append('')
# get list of AAs that show significant difference between SP and LP groups
AAs = choose_codons(codon_ttest, codon_text)
AA_chisquare = np.array(AA_chisquare)
codon_ttest = np.array(codon_ttest)
AA_text = np.array(AA_text)
codon_text = np.array(codon_text)
print('%d out of %d codon show significant usage difference \
between SP and LP genes (p_value < 0.5)\n' %
(count_sig, count_all))
plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)
plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)
return AAs
def plot_heatmap(data, text, cbarlabel, label):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))
im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)
annotate_heatmap(im, text)
fig.tight_layout()
plt.show
plt.savefig(f'../results/{cbarlabel}_{label}.png')
def heatmap(data, ax, cmap, cbarlabel):
if not ax:
ax = plt.gca()
im = ax.imshow(data, cmap)
cbar = ax.figure.colorbar(im, ax=ax)
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks(np.arange(data.shape[0]))
ax.set_xticklabels(range(data.shape[1]))
ax.set_yticklabels(range(data.shape[0]))
ax.tick_params(top=False, bottom=True,
labeltop=False, labelbottom=True)
# draw white space between squares
for edge, spine in ax.spines.items():
spine.set_visible(False)
ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)
ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)
ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)
ax.tick_params(which = 'minor', bottom = False, left = False)
cbar.ax.set_ylabel(cbarlabel, va = 'top')
return im, cbar
def annotate_heatmap(im, text_label):
textcolors = ['black','white']
data = im.get_array()
# set threshold to decide color
threshold = im.norm(data.max()) / 2
kw = dict(horizontalalignment = 'center',
verticalalignment = 'center')
for i in range(data.shape[0]):
for j in range(data.shape[1]):
kw.update(color = textcolors[im.norm(data[i,j]) > threshold])
im.axes.text(j, i, text_label[i,j], **kw)
def choose_codons(ttest, text):
# dict key: AA -> codon
# only contains AAs with only two codon choices
codon_map = {
'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',
'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His',
'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn',
'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp',
'GAA':'Glu', 'GAG':'Glu'}
codon_dict = collections.defaultdict(list)
for i in range(len(ttest)):
for j in range(len(ttest[i])):
if ttest[i][j] < 0.01:
codon = text[i][j][:3]
if codon in codon_map:
codon_dict[codon_map[codon]].append(codon)
file = io.open('AAs_to_compare.txt', 'w')
file.write('Compare following AAs\n')
# AAs that have only two codon choices and show significant
# codon usage difference between SP and LP groups
AAs = []
for AA in codon_dict.keys():
AAs.append(AA)
if len(codon_dict[AA]) == 2:
file.write('%s: %s, %s\n' %
(AA, codon_dict[AA][0], codon_dict[AA][1]))
else:
file.write('%s: %s\n' % (AA, codon_dict[AA][0]))
file.close()
return AAs
def plot_SP_LP(sp_AA_dict, lp_AA_dict):
# plot each AA
for AA in list(sp_AA_dict.keys()):
# list of codon usage information
codon_data = []
# List of codon names
codons = []
for codon in sp_AA_dict[AA]:
# LP group data is displayed from lowest expressed genes
# to highest expressed genes
lp_AA_dict[AA][codon].reverse()
codons.append(codon)
codon_data.append([])
# display SP group data first and then LP group data
codon_data[-1].append(sp_AA_dict[AA][codon])
codon_data[-1].append(lp_AA_dict[AA][codon])
# plot usage curves
codon_usage_plot(codon_data, AA, codons)
def codon_usage_plot(data, AA, codons):
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (15,5))
for i in range(len(data)):
# 0-50 shows SP group data
x_sp = np.linspace(0, 50, len(data[i][0]))
# 50-100 shows LP group data
x_lp = np.linspace(50, 100, len(data[i][1]))
ax.plot(x_sp, data[i][0], label = 'sp_' + codons[i])
ax.plot(x_lp, data[i][1], label = 'lp_' + codons[i])
ax.legend(loc = 1)
ax.set_title(AA)
def plot_distribution(sp_dict, lp_dict, AA):
fig, axes = plt.subplots(nrows = 2, ncols =1, figsize = (40, 20))
for codon in sp_dict[AA]:
x = np.arange(len(sp_dict[AA][codon]))
sp_y = np.array(sp_dict[AA][codon])
lp_y = np.array(lp_dict[AA][codon])
axes[0].plot(x, sp_y)
axes[1].plot(x, lp_y)
plt.show
def get_skellam_distribution(sp_dict, lp_dict, AA):
sp_mu = {}
lp_mu = {}
codons = []
# get mean values
for codon in sp_dict[AA]:
codons.append(codon)
sp_mu[codon] = np.mean(sp_dict[AA][codon])
lp_mu[codon] = np.mean(lp_dict[AA][codon])
skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)
skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)
def skellam_plot(mu1, mu2, name):
print(mu1,' ', mu2, ' ', mu1-mu2, ' ', name)
fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (5, 5))
x = np.arange(stats.skellam.ppf(0.01, mu1, mu2),
stats.skellam.ppf(0.99, mu1, mu2))
ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker = 'o', label = name)
ax.legend(loc = 1)
plt.show
# main flow
args = parse_args()
sp_codon_usage = Codon_Usage(args.sp_file)
lp_codon_usage = Codon_Usage(args.lp_file)
sp_AA_dict = sp_codon_usage.get_AA_dict()
lp_AA_dict = lp_codon_usage.get_AA_dict()
print("Analyzing SP and LP %s group data\n" % (args.label))
AAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)
plot_SP_LP(sp_AA_dict, lp_AA_dict)
# optional
# get Skellam distributions of AAs that have only two codon choices
# and show distictive usage between SP and LP
'''
sp_all_codon_usage = Codon_Usage('SP_all_gene_seq.txt')
lp_all_codon_usage = Codon_Usage('LP_all_gene_seq.txt')
sp_all_AA_dict = sp_all_codon_usage.get_AA_dict()
lp_all_AA_dict = lp_all_codon_usage.get_AA_dict()
for AA in AAs:
plot_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
get_skellam_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)
'''
|
normal
|
{
"blob_id": "ae7a2de8742e353818d4f5a28feb9bce04d787bb",
"index": 8382,
"step-1": "<mask token>\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\n<mask token>\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\n<mask token>\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\n<mask token>\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n annotate_heatmap(im, text)\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png')\n\n\n<mask token>\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n for AA in list(sp_AA_dict.keys()):\n codon_data = []\n codons = []\n for codon in sp_AA_dict[AA]:\n lp_AA_dict[AA][codon].reverse()\n codons.append(codon)\n codon_data.append([])\n codon_data[-1].append(sp_AA_dict[AA][codon])\n codon_data[-1].append(lp_AA_dict[AA][codon])\n codon_usage_plot(codon_data, AA, codons)\n\n\n<mask token>\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\n<mask token>\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n annotate_heatmap(im, text)\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png')\n\n\ndef heatmap(data, ax, cmap, cbarlabel):\n if not ax:\n ax = plt.gca()\n im = ax.imshow(data, cmap)\n cbar = ax.figure.colorbar(im, ax=ax)\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n ax.set_xticklabels(range(data.shape[1]))\n ax.set_yticklabels(range(data.shape[0]))\n ax.tick_params(top=False, bottom=True, labeltop=False, labelbottom=True)\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)\n ax.grid(which='minor', color='w', linestyle='-', linewidth=3)\n ax.tick_params(which='minor', bottom=False, left=False)\n cbar.ax.set_ylabel(cbarlabel, va='top')\n return im, cbar\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n for AA in list(sp_AA_dict.keys()):\n codon_data = []\n codons = []\n for codon in sp_AA_dict[AA]:\n lp_AA_dict[AA][codon].reverse()\n codons.append(codon)\n codon_data.append([])\n codon_data[-1].append(sp_AA_dict[AA][codon])\n codon_data[-1].append(lp_AA_dict[AA][codon])\n codon_usage_plot(codon_data, AA, codons)\n\n\ndef codon_usage_plot(data, AA, codons):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 5))\n for i in range(len(data)):\n x_sp = np.linspace(0, 50, len(data[i][0]))\n x_lp = np.linspace(50, 100, len(data[i][1]))\n ax.plot(x_sp, data[i][0], label='sp_' + codons[i])\n ax.plot(x_lp, data[i][1], label='lp_' + codons[i])\n ax.legend(loc=1)\n ax.set_title(AA)\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\ndef skellam_plot(mu1, mu2, name):\n print(mu1, ' ', mu2, ' ', mu1 - mu2, ' ', name)\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))\n x = np.arange(stats.skellam.ppf(0.01, mu1, mu2), stats.skellam.ppf(0.99,\n mu1, mu2))\n ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker='o', label=name)\n ax.legend(loc=1)\n plt.show\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport io, os, argparse, collections\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help='one input SP data file\\n')\n parser.add_argument('lp_file', help='one input LP data file\\n')\n parser.add_argument('--label', '-l', type=str, required=False, default=\n 'top', help=\"\"\"Define the label of out-put files. Default=\"top\\\"\n\"\"\")\n args = parser.parse_args()\n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % path)\n return args\n\n\nclass Codon_Usage:\n\n def __init__(self, filename):\n self.seq, self.gene_num = self.get_seq(filename)\n\n def get_seq(self, filename):\n file = io.open(filename)\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n for line in file:\n if line[0] == '>':\n count_all += 1\n if gene_seq != '':\n if len(gene_seq) % 3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n gene_seq = ''\n else:\n gene_seq += line.strip()\n file.close()\n print('%s:\\n%d genes added\\n%d are non-triple\\n' % (filename[:2],\n count_all, count_non_triple))\n return all_seq, count_all - count_non_triple\n\n def get_AA(self, codon):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TTA': 'Leu', 'TTG': 'Leu',\n 'TCT': 'Ser', 'TCC': 'Ser', 'TCA': 'Ser', 'TCG': 'Ser', 'TAT':\n 'Tyr', 'TAC': 'Tyr', 'TAA': 'STOP', 'TAG': 'STOP', 'TGT': 'Cys',\n 'TGC': 'Cys', 'TGA': 'STOP', 'TGG': 'Trp', 'CTT': 'Leu', 'CTC':\n 'Leu', 'CTA': 'Leu', 'CTG': 'Leu', 'CCT': 'Pro', 'CCC': 'Pro',\n 'CCA': 'Pro', 'CCG': 'Pro', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'CGT': 'Arg', 'CGC': 'Arg', 'CGA': 'Arg',\n 'CGG': 'Arg', 'ATT': 'Ile', 'ATC': 'Ile', 'ATA': 'Ile', 'ATG':\n 'Met', 'ACT': 'Thr', 'ACC': 'Thr', 'ACA': 'Thr', 'ACG': 'Thr',\n 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys', 'AAG': 'Lys', 'AGT':\n 'Ser', 'AGC': 'Ser', 'AGA': 'Arg', 'AGG': 'Arg', 'GTT': 'Val',\n 'GTC': 'Val', 'GTA': 'Val', 'GTG': 'Val', 'GCT': 'Ala', 'GCC':\n 'Ala', 'GCA': 'Ala', 'GCG': 'Ala', 'GAT': 'Asp', 'GAC': 'Asp',\n 'GAA': 'Glu', 'GAG': 'Glu', 'GGT': 'Gly', 'GGC': 'Gly', 'GGA':\n 'Gly', 'GGG': 'Gly'}\n if codon in codon_map:\n return codon_map[codon]\n else:\n return ''\n\n def get_usage_dict(self, seq):\n usage_dict = collections.defaultdict(lambda : [collections.\n defaultdict(lambda : [0, 0]), 0])\n for index in range(0, len(seq), 3):\n codon = seq[index:index + 3]\n AA = self.get_AA(codon)\n if AA:\n usage_dict[AA][1] += 1\n usage_dict[AA][0][codon][0] += 1\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = usage_dict[AA][0][codon][0\n ] / usage_dict[AA][1]\n return usage_dict\n\n def get_AA_dict(self):\n AA_dict = collections.defaultdict(lambda : collections.defaultdict(\n list))\n AA_map = {'Phe': ['TTT', 'TTC'], 'Leu': ['TTA', 'TTG', 'CTT', 'CTC',\n 'CTA', 'CTG'], 'Ser': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'\n ], 'Tyr': ['TAT', 'TAC'], 'STOP': ['TAA', 'TAG', 'TGA'], 'Cys':\n ['TGT', 'TGC'], 'Trp': ['TGG'], 'Pro': ['CCT', 'CCC', 'CCA',\n 'CCG'], 'His': ['CAT', 'CAC'], 'Gln': ['CAA', 'CAG'], 'Arg': [\n 'CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Ile': ['ATT', 'ATC',\n 'ATA'], 'Met': ['ATG'], 'Thr': ['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn': ['AAT', 'AAC'], 'Lys': ['AAA', 'AAG'], 'Val': ['GTT',\n 'GTC', 'GTA', 'GTG'], 'Ala': ['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp': ['GAT', 'GAC'], 'Glu': ['GAA', 'GAG'], 'Gly': ['GGT',\n 'GGC', 'GGA', 'GGG']}\n usage_dict_list = []\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n for usage_dict in usage_dict_list:\n AA_dict[AA][codon].append(usage_dict[AA][0][codon][1])\n return AA_dict\n\n\ndef heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label):\n AA_chisquare = []\n AA_text = []\n codon_ttest = []\n codon_text = []\n i = 0\n j = 0\n count_all = 0\n count_sig = 0\n for AA in list(sp_AA_dict.keys()):\n sp_codon_mean = []\n lp_codon_mean = []\n for codon in sp_AA_dict[AA]:\n p_val = stats.ttest_ind(sp_AA_dict[AA][codon], lp_AA_dict[AA][\n codon], equal_var=False)[1]\n if not i % 8:\n codon_ttest.append([])\n codon_text.append([])\n i += 1\n if np.isnan(p_val):\n codon_ttest[-1].append(0)\n codon_text[-1].append(codon + '\\n NA')\n else:\n codon_ttest[-1].append(p_val)\n codon_text[-1].append(codon + '\\n' + str(round(p_val, 2)))\n count_all += 1\n if p_val < 0.5:\n count_sig += 1\n sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))\n lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon]))\n p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]),\n axis=None)[1]\n if not j % 6:\n AA_chisquare.append([])\n AA_text.append([])\n j += 1\n if np.isnan(p_val):\n AA_chisquare[-1].append(0)\n AA_text[-1].append(AA + '\\n NA')\n else:\n AA_chisquare[-1].append(p_val)\n AA_text[-1].append(AA + '\\n' + str(round(p_val, 2)))\n for n in range(j % 6, 6):\n AA_chisquare[-1].append(0)\n AA_text[-1].append('')\n AAs = choose_codons(codon_ttest, codon_text)\n AA_chisquare = np.array(AA_chisquare)\n codon_ttest = np.array(codon_ttest)\n AA_text = np.array(AA_text)\n codon_text = np.array(codon_text)\n print(\n \"\"\"%d out of %d codon show significant usage difference between SP and LP genes (p_value < 0.5)\n\"\"\"\n % (count_sig, count_all))\n plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)\n plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)\n return AAs\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5))\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n annotate_heatmap(im, text)\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png')\n\n\ndef heatmap(data, ax, cmap, cbarlabel):\n if not ax:\n ax = plt.gca()\n im = ax.imshow(data, cmap)\n cbar = ax.figure.colorbar(im, ax=ax)\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n ax.set_xticklabels(range(data.shape[1]))\n ax.set_yticklabels(range(data.shape[0]))\n ax.tick_params(top=False, bottom=True, labeltop=False, labelbottom=True)\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor=True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor=True)\n ax.grid(which='minor', color='w', linestyle='-', linewidth=3)\n ax.tick_params(which='minor', bottom=False, left=False)\n cbar.ax.set_ylabel(cbarlabel, va='top')\n return im, cbar\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black', 'white']\n data = im.get_array()\n threshold = im.norm(data.max()) / 2\n kw = dict(horizontalalignment='center', verticalalignment='center')\n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color=textcolors[im.norm(data[i, j]) > threshold])\n im.axes.text(j, i, text_label[i, j], **kw)\n\n\ndef choose_codons(ttest, text):\n codon_map = {'TTT': 'Phe', 'TTC': 'Phe', 'TAT': 'Tyr', 'TAC': 'Tyr',\n 'TGT': 'Cys', 'TGC': 'Cys', 'CAT': 'His', 'CAC': 'His', 'CAA':\n 'Gln', 'CAG': 'Gln', 'AAT': 'Asn', 'AAC': 'Asn', 'AAA': 'Lys',\n 'AAG': 'Lys', 'GAT': 'Asp', 'GAC': 'Asp', 'GAA': 'Glu', 'GAG': 'Glu'}\n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n file = io.open('AAs_to_compare.txt', 'w')\n file.write('Compare following AAs\\n')\n AAs = []\n for AA in codon_dict.keys():\n AAs.append(AA)\n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % (AA, codon_dict[AA][0], codon_dict[\n AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n file.close()\n return AAs\n\n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n for AA in list(sp_AA_dict.keys()):\n codon_data = []\n codons = []\n for codon in sp_AA_dict[AA]:\n lp_AA_dict[AA][codon].reverse()\n codons.append(codon)\n codon_data.append([])\n codon_data[-1].append(sp_AA_dict[AA][codon])\n codon_data[-1].append(lp_AA_dict[AA][codon])\n codon_usage_plot(codon_data, AA, codons)\n\n\ndef codon_usage_plot(data, AA, codons):\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 5))\n for i in range(len(data)):\n x_sp = np.linspace(0, 50, len(data[i][0]))\n x_lp = np.linspace(50, 100, len(data[i][1]))\n ax.plot(x_sp, data[i][0], label='sp_' + codons[i])\n ax.plot(x_lp, data[i][1], label='lp_' + codons[i])\n ax.legend(loc=1)\n ax.set_title(AA)\n\n\ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(40, 20))\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA):\n sp_mu = {}\n lp_mu = {}\n codons = []\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\ndef skellam_plot(mu1, mu2, name):\n print(mu1, ' ', mu2, ' ', mu1 - mu2, ' ', name)\n fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(5, 5))\n x = np.arange(stats.skellam.ppf(0.01, mu1, mu2), stats.skellam.ppf(0.99,\n mu1, mu2))\n ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker='o', label=name)\n ax.legend(loc=1)\n plt.show\n\n\nargs = parse_args()\nsp_codon_usage = Codon_Usage(args.sp_file)\nlp_codon_usage = Codon_Usage(args.lp_file)\nsp_AA_dict = sp_codon_usage.get_AA_dict()\nlp_AA_dict = lp_codon_usage.get_AA_dict()\nprint('Analyzing SP and LP %s group data\\n' % args.label)\nAAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)\nplot_SP_LP(sp_AA_dict, lp_AA_dict)\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nCreated on Wed Mar 13 17:34:32 2019\n\n@author: fanlizhou\n\nAnalyze codon usage of sequence from 'SP_gene_seq.txt' and 'LP_gene_seq.txt'\nPlot heatmap of amino acid usage and codon usage\nPlot codon usage in each gene for each amino acid. Genes were arranged so that\nthe gene expression of SP decrease from 0 to 50 (x-axis) and the gene expression\nof LP increase from 51 to 100 (x-axis)\n\nUsage: codon_usage.py [-h] [--label LABEL] sp_file lp_file \n\nOptions:\n--label Define the label of out-put files. Default=\"top\"\nsp_file Path to the SP data files\nlp_file Path to the LP data files\n\n\"\"\"\n\nimport io, os, argparse, collections\nfrom scipy import stats\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\n 'Analyze codon usage of SP and LP\\n')\n parser.add_argument('sp_file', help = 'one input SP data file\\n')\n parser.add_argument('lp_file', help = 'one input LP data file\\n')\n parser.add_argument('--label', '-l', \n type = str, required = False, default = 'top', \n help = 'Define the label of out-put files. Default=\"top\"\\n')\n \n args = parser.parse_args()\n \n for path in [args.sp_file, args.lp_file]:\n if not os.path.isfile(path):\n parser.error('File \"%s\" cannot be found.' % (path))\n \n return args\n\n\n# a Codon_Usage class to store codon usage information for each genotype\nclass Codon_Usage:\n \n def __init__(self, filename): \n self.seq, self.gene_num = self.get_seq(filename)\n \n \n def get_seq(self, filename): \n file = io.open(filename)\n # list of selected gene sequences, excluded genes that are non-triple\n all_seq = []\n gene_seq = ''\n count_all = 0\n count_non_triple = 0\n \n for line in file:\n # read a gene information line\n if line[0]=='>':\n count_all += 1\n \n # if a gene has been read, then append it to all_seq if the\n # sequence is triple\n if gene_seq!='': \n if len(gene_seq)%3:\n count_non_triple += 1\n else:\n all_seq.append(gene_seq)\n \n gene_seq = ''\n \n # read a gene sequence line \n else:\n gene_seq += line.strip()\n \n \n file.close() \n print('%s:\\n%d genes added\\n%d are non-triple\\n'%\n (filename[:2],count_all, count_non_triple))\n \n return (all_seq, count_all - count_non_triple)\n \n\n def get_AA(self, codon):\n # dict key: codon -> AA\n codon_map = {\n 'TTT':'Phe', 'TTC':'Phe', 'TTA':'Leu', 'TTG':'Leu',\n 'TCT':'Ser', 'TCC':'Ser', 'TCA':'Ser', 'TCG':'Ser',\n 'TAT':'Tyr', 'TAC':'Tyr', 'TAA':'STOP', 'TAG':'STOP',\n 'TGT':'Cys', 'TGC':'Cys', 'TGA':'STOP', 'TGG':'Trp',\n 'CTT':'Leu', 'CTC':'Leu', 'CTA':'Leu', 'CTG':'Leu',\n 'CCT':'Pro', 'CCC':'Pro', 'CCA':'Pro', 'CCG':'Pro',\n 'CAT':'His', 'CAC':'His', 'CAA':'Gln', 'CAG':'Gln',\n 'CGT':'Arg', 'CGC':'Arg', 'CGA':'Arg', 'CGG':'Arg',\n 'ATT':'Ile', 'ATC':'Ile', 'ATA':'Ile', 'ATG':'Met',\n 'ACT':'Thr', 'ACC':'Thr', 'ACA':'Thr', 'ACG':'Thr',\n 'AAT':'Asn', 'AAC':'Asn', 'AAA':'Lys', 'AAG':'Lys',\n 'AGT':'Ser', 'AGC':'Ser', 'AGA':'Arg', 'AGG':'Arg',\n 'GTT':'Val', 'GTC':'Val', 'GTA':'Val', 'GTG':'Val',\n 'GCT':'Ala', 'GCC':'Ala', 'GCA':'Ala', 'GCG':'Ala',\n 'GAT':'Asp', 'GAC':'Asp', 'GAA':'Glu', 'GAG':'Glu',\n 'GGT':'Gly', 'GGC':'Gly', 'GGA':'Gly', 'GGG':'Gly'}\n\n if codon in codon_map:\n return codon_map[codon] \n else:\n return ''\n \n \n def get_usage_dict(self, seq):\n # usage_dict structure:\n # dict key: AA -> [\n # dict key: codon -> \n # [codon_count,\n # codon_count/AA_count]\n # AA_count\n # ] \n usage_dict = \\\n collections.defaultdict(lambda: \n [\n collections.defaultdict(\n lambda: [0, 0]), \n 0\n ])\n # save AAs usage information\n for index in range(0, len(seq), 3):\n codon = seq[index:index+3]\n AA = self.get_AA(codon)\n if AA:\n # count how many times the AA appears\n usage_dict[AA][1] += 1\n # count how many times the codon is used\n usage_dict[AA][0][codon][0] += 1\n \n # calculate the codon usage percentage for an AA\n for AA in usage_dict:\n for codon in usage_dict[AA][0]:\n usage_dict[AA][0][codon][1] = \\\n usage_dict[AA][0][codon][0]/usage_dict[AA][1]\n\n return usage_dict\n\n\n def get_AA_dict(self): \n # AA_dict structure:\n # 1st dict key: AA -> 2nd dict key: codon -> a list of codon usage \n # percentage of each gene \n AA_dict = \\\n collections.defaultdict(\n lambda:collections.defaultdict(list))\n \n # dict key: AA -> codon list\n AA_map = {\n 'Phe':['TTT', 'TTC'],\n 'Leu':['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],\n 'Ser':['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'], \n 'Tyr':['TAT', 'TAC'], \n 'STOP':['TAA', 'TAG', 'TGA'],\n 'Cys':['TGT', 'TGC'], \n 'Trp':['TGG'],\n 'Pro':['CCT', 'CCC', 'CCA', 'CCG'],\n 'His':['CAT', 'CAC'], \n 'Gln':['CAA', 'CAG'],\n 'Arg':['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'],\n 'Ile':['ATT', 'ATC', 'ATA'], \n 'Met':['ATG'],\n 'Thr':['ACT', 'ACC', 'ACA', 'ACG'],\n 'Asn':['AAT', 'AAC'], \n 'Lys':['AAA', 'AAG'],\n 'Val':['GTT', 'GTC', 'GTA', 'GTG'],\n 'Ala':['GCT', 'GCC', 'GCA', 'GCG'],\n 'Asp':['GAT', 'GAC'], \n 'Glu':['GAA', 'GAG'],\n 'Gly':['GGT', 'GGC', 'GGA', 'GGG']\n }\n \n # list of codon usage for each gene\n usage_dict_list = []\n \n # get codon usage information for each gene\n for seq in self.seq:\n usage_dict_list.append(self.get_usage_dict(seq))\n \n # get the list of codon usage percentage from each gene \n for AA in list(AA_map.keys()):\n for codon in AA_map[AA]:\n # get codon usage information from each gene\n for usage_dict in usage_dict_list:\n # append codon usage percentage in the gene\n AA_dict[AA][codon].append(\n usage_dict[AA][0][codon][1])\n \n return AA_dict \n \n\ndef heatmap_SP_LP(sp_AA_dict, lp_AA_dict, label): \n # list of Chi-Square test results\n AA_chisquare = []\n # AA plotting annotation information\n AA_text = []\n \n # list of student's t-test results\n codon_ttest = []\n # codon plotting annotaion information\n codon_text = []\n \n i = 0\n j = 0\n # number of genes analyzed\n count_all = 0\n # number of genes that show significant results\n count_sig = 0\n \n for AA in list(sp_AA_dict.keys()): \n # mean values of codon usage for each AA\n sp_codon_mean = []\n lp_codon_mean = [] \n \n for codon in sp_AA_dict[AA]:\n # calculate ttest results \n p_val = stats.ttest_ind(sp_AA_dict[AA][codon],\n lp_AA_dict[AA][codon],\n equal_var = False)[1]\n \n # display eight codons in a row\n if not i % 8:\n codon_ttest.append([])\n codon_text.append([])\n i += 1\n \n # handle NULL values\n if np.isnan(p_val):\n codon_ttest[-1].append(0)\n codon_text[-1].append(codon + '\\n NA')\n # save ttest p-values and annotation information \n else: \n codon_ttest[-1].append(p_val)\n codon_text[-1].append(codon + '\\n' + str(round(p_val, 2)))\n count_all += 1\n if p_val < 0.5:\n count_sig += 1\n \n sp_codon_mean.append(np.mean(sp_AA_dict[AA][codon]))\n lp_codon_mean.append(np.mean(lp_AA_dict[AA][codon])) \n \n # get Chi-Square test results of each AA\n p_val = stats.chisquare(np.array([sp_codon_mean, lp_codon_mean]), \n axis = None)[1]\n \n # display six AA in a row\n if not j % 6:\n AA_chisquare.append([])\n AA_text.append([])\n j += 1\n \n # handle Null values\n if np.isnan(p_val): \n AA_chisquare[-1].append(0)\n AA_text[-1].append(AA + '\\n NA')\n # save Chi-Square test p-values and annotation information\n else: \n AA_chisquare[-1].append(p_val)\n AA_text[-1].append(AA + '\\n' + str(round(p_val, 2)))\n \n # handle empty cells\n for n in range(j % 6, 6):\n AA_chisquare[-1].append(0)\n AA_text[-1].append('')\n \n # get list of AAs that show significant difference between SP and LP groups\n AAs = choose_codons(codon_ttest, codon_text) \n\n AA_chisquare = np.array(AA_chisquare)\n codon_ttest = np.array(codon_ttest)\n \n AA_text = np.array(AA_text)\n codon_text = np.array(codon_text)\n\n print('%d out of %d codon show significant usage difference \\\n between SP and LP genes (p_value < 0.5)\\n' % \n (count_sig, count_all))\n plot_heatmap(AA_chisquare, AA_text, 'AAs_ChiSquare', label)\n plot_heatmap(codon_ttest, codon_text, 'Codons_ttest', label)\n \n return AAs\n\n\ndef plot_heatmap(data, text, cbarlabel, label):\n \n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (10, 5))\n\n im, cbar = heatmap(data, ax, 'YlGn', cbarlabel)\n \n annotate_heatmap(im, text)\n\n fig.tight_layout()\n plt.show\n plt.savefig(f'../results/{cbarlabel}_{label}.png') \n \ndef heatmap(data, ax, cmap, cbarlabel):\n \n if not ax:\n ax = plt.gca()\n \n im = ax.imshow(data, cmap)\n \n cbar = ax.figure.colorbar(im, ax=ax)\n\n ax.set_xticks(np.arange(data.shape[1]))\n ax.set_yticks(np.arange(data.shape[0]))\n ax.set_xticklabels(range(data.shape[1]))\n ax.set_yticklabels(range(data.shape[0]))\n\n ax.tick_params(top=False, bottom=True,\n labeltop=False, labelbottom=True)\n\n # draw white space between squares\n for edge, spine in ax.spines.items():\n spine.set_visible(False)\n \n ax.set_xticks(np.arange(data.shape[1] + 1) - 0.5, minor = True)\n ax.set_yticks(np.arange(data.shape[0] + 1) - 0.5, minor = True)\n ax.grid(which = 'minor', color = 'w', linestyle = '-', linewidth = 3)\n ax.tick_params(which = 'minor', bottom = False, left = False) \n cbar.ax.set_ylabel(cbarlabel, va = 'top')\n\n return im, cbar\n\n\ndef annotate_heatmap(im, text_label):\n textcolors = ['black','white']\n\n data = im.get_array()\n # set threshold to decide color\n threshold = im.norm(data.max()) / 2\n \n kw = dict(horizontalalignment = 'center',\n verticalalignment = 'center')\n \n for i in range(data.shape[0]):\n for j in range(data.shape[1]):\n kw.update(color = textcolors[im.norm(data[i,j]) > threshold])\n im.axes.text(j, i, text_label[i,j], **kw)\n\n\ndef choose_codons(ttest, text): \n # dict key: AA -> codon\n # only contains AAs with only two codon choices \n codon_map = {\n 'TTT':'Phe', 'TTC':'Phe', 'TAT':'Tyr', 'TAC':'Tyr',\n 'TGT':'Cys', 'TGC':'Cys', 'CAT':'His', 'CAC':'His', \n 'CAA':'Gln', 'CAG':'Gln', 'AAT':'Asn', 'AAC':'Asn', \n 'AAA':'Lys', 'AAG':'Lys', 'GAT':'Asp', 'GAC':'Asp', \n 'GAA':'Glu', 'GAG':'Glu'} \n \n codon_dict = collections.defaultdict(list)\n for i in range(len(ttest)):\n for j in range(len(ttest[i])):\n if ttest[i][j] < 0.01:\n codon = text[i][j][:3]\n if codon in codon_map:\n codon_dict[codon_map[codon]].append(codon)\n \n file = io.open('AAs_to_compare.txt', 'w') \n file.write('Compare following AAs\\n')\n # AAs that have only two codon choices and show significant \n # codon usage difference between SP and LP groups\n AAs = []\n \n for AA in codon_dict.keys():\n AAs.append(AA) \n if len(codon_dict[AA]) == 2:\n file.write('%s: %s, %s\\n' % \n (AA, codon_dict[AA][0], codon_dict[AA][1]))\n else:\n file.write('%s: %s\\n' % (AA, codon_dict[AA][0]))\n \n file.close()\n \n return AAs\n \n\ndef plot_SP_LP(sp_AA_dict, lp_AA_dict):\n # plot each AA\n for AA in list(sp_AA_dict.keys()): \n # list of codon usage information\n codon_data = []\n # List of codon names\n codons = []\n \n for codon in sp_AA_dict[AA]: \n # LP group data is displayed from lowest expressed genes \n # to highest expressed genes\n lp_AA_dict[AA][codon].reverse()\n \n codons.append(codon) \n codon_data.append([])\n # display SP group data first and then LP group data\n codon_data[-1].append(sp_AA_dict[AA][codon]) \n codon_data[-1].append(lp_AA_dict[AA][codon])\n \n # plot usage curves \n codon_usage_plot(codon_data, AA, codons)\n\n \ndef codon_usage_plot(data, AA, codons):\n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (15,5))\n \n for i in range(len(data)):\n # 0-50 shows SP group data\n x_sp = np.linspace(0, 50, len(data[i][0]))\n # 50-100 shows LP group data\n x_lp = np.linspace(50, 100, len(data[i][1]))\n \n ax.plot(x_sp, data[i][0], label = 'sp_' + codons[i])\n ax.plot(x_lp, data[i][1], label = 'lp_' + codons[i])\n ax.legend(loc = 1)\n ax.set_title(AA)\n\n \ndef plot_distribution(sp_dict, lp_dict, AA):\n fig, axes = plt.subplots(nrows = 2, ncols =1, figsize = (40, 20))\n\n for codon in sp_dict[AA]:\n x = np.arange(len(sp_dict[AA][codon]))\n sp_y = np.array(sp_dict[AA][codon])\n lp_y = np.array(lp_dict[AA][codon])\n \n axes[0].plot(x, sp_y)\n axes[1].plot(x, lp_y)\n \n plt.show\n\n\ndef get_skellam_distribution(sp_dict, lp_dict, AA): \n sp_mu = {}\n lp_mu = {}\n codons = []\n \n # get mean values\n for codon in sp_dict[AA]:\n codons.append(codon)\n sp_mu[codon] = np.mean(sp_dict[AA][codon])\n lp_mu[codon] = np.mean(lp_dict[AA][codon])\n \n skellam_plot(sp_mu[codons[0]], sp_mu[codons[1]], 'SP-' + AA)\n skellam_plot(lp_mu[codons[0]], lp_mu[codons[1]], 'LP-' + AA)\n\n\ndef skellam_plot(mu1, mu2, name): \n print(mu1,' ', mu2, ' ', mu1-mu2, ' ', name)\n\n fig, ax = plt.subplots(nrows = 1, ncols = 1, figsize = (5, 5)) \n x = np.arange(stats.skellam.ppf(0.01, mu1, mu2), \n stats.skellam.ppf(0.99, mu1, mu2))\n ax.plot(x, stats.skellam.pmf(x, mu1, mu2), marker = 'o', label = name)\n ax.legend(loc = 1)\n \n plt.show\n \n \n# main flow\nargs = parse_args()\nsp_codon_usage = Codon_Usage(args.sp_file)\nlp_codon_usage = Codon_Usage(args.lp_file)\n\nsp_AA_dict = sp_codon_usage.get_AA_dict() \nlp_AA_dict = lp_codon_usage.get_AA_dict()\n\nprint(\"Analyzing SP and LP %s group data\\n\" % (args.label))\n \nAAs = heatmap_SP_LP(sp_AA_dict, lp_AA_dict, args.label)\nplot_SP_LP(sp_AA_dict, lp_AA_dict)\n\n# optional\n# get Skellam distributions of AAs that have only two codon choices \n# and show distictive usage between SP and LP\n'''\nsp_all_codon_usage = Codon_Usage('SP_all_gene_seq.txt')\nlp_all_codon_usage = Codon_Usage('LP_all_gene_seq.txt')\n\nsp_all_AA_dict = sp_all_codon_usage.get_AA_dict() \nlp_all_AA_dict = lp_all_codon_usage.get_AA_dict()\n\nfor AA in AAs:\n plot_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)\n get_skellam_distribution(sp_all_AA_dict, lp_all_AA_dict, AA)\n'''",
"step-ids": [
11,
13,
16,
20,
21
]
}
|
[
11,
13,
16,
20,
21
] |
from flask import Blueprint
class NestableBlueprint(Blueprint):
def register_blueprint(self, blueprint, **options):
def deferred(state):
# state.url_prefix => 自己url前缀 + blueprint.url_prefix => /v3/api/cmdb/
url_prefix = (state.url_prefix or u"") + (options.get('url_prefix', blueprint.url_prefix) or u"")
if 'url_prefix' in options:
del options['url_prefix']
# app.register_blueprint(blueprint, '/v3/api/cmdb/')
state.app.register_blueprint(blueprint, url_prefix=url_prefix, **options)
self.record(deferred)
|
normal
|
{
"blob_id": "2c505f3f1dfdefae8edbea0916873229bcda901f",
"index": 764,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass NestableBlueprint(Blueprint):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass NestableBlueprint(Blueprint):\n\n def register_blueprint(self, blueprint, **options):\n\n def deferred(state):\n url_prefix = (state.url_prefix or u'') + (options.get(\n 'url_prefix', blueprint.url_prefix) or u'')\n if 'url_prefix' in options:\n del options['url_prefix']\n state.app.register_blueprint(blueprint, url_prefix=url_prefix,\n **options)\n self.record(deferred)\n",
"step-4": "from flask import Blueprint\n\n\nclass NestableBlueprint(Blueprint):\n\n def register_blueprint(self, blueprint, **options):\n\n def deferred(state):\n url_prefix = (state.url_prefix or u'') + (options.get(\n 'url_prefix', blueprint.url_prefix) or u'')\n if 'url_prefix' in options:\n del options['url_prefix']\n state.app.register_blueprint(blueprint, url_prefix=url_prefix,\n **options)\n self.record(deferred)\n",
"step-5": "from flask import Blueprint\n\n\nclass NestableBlueprint(Blueprint):\n def register_blueprint(self, blueprint, **options):\n def deferred(state):\n # state.url_prefix => 自己url前缀 + blueprint.url_prefix => /v3/api/cmdb/\n url_prefix = (state.url_prefix or u\"\") + (options.get('url_prefix', blueprint.url_prefix) or u\"\")\n if 'url_prefix' in options:\n del options['url_prefix']\n # app.register_blueprint(blueprint, '/v3/api/cmdb/')\n state.app.register_blueprint(blueprint, url_prefix=url_prefix, **options)\n self.record(deferred)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding: utf-8
import sys
#from operator import itemgetter
sysread = sys.stdin.readline
read = sys.stdin.read
from heapq import heappop, heappush
from collections import defaultdict
sys.setrecursionlimit(10**7)
import math
#from itertools import product#accumulate, combinations, product
#import bisect# lower_bound etc
#import numpy as np
#from copy import deepcopy
def run():
mod = 1000000007
N, *AB = map(int, read().split())
A_B = []
INF = float('inf')
zerozero = 0
for i in range(N):
a = AB[2*i]
b = AB[2*i+1]
if a== 0 and b == 0:
zerozero += 1
elif b == 0:
A_B.append((INF, 0))
elif a == 0:
A_B.append((0, INF))
else:
tmp = math.gcd(a,b)
if a / b > 0 :v = 1
else: v = -1
A_B.append((abs(a//tmp), v * abs(b//tmp)))
comb_dict = defaultdict(lambda:[0,0])
for ai, bi in A_B:
if ai == INF:
comb_dict[0][1] += 1
elif bi == INF:
comb_dict[0][0] += 1
elif bi < 0:
comb_dict[(ai,bi)][0] += 1
else:
comb_dict[(bi, -ai)][1] += 1
ret = 1
for _, val_list in comb_dict.items():
a,b = val_list
if a == 0 or b == 0:
ret *= pow(2, max(a,b), mod)
else:
ret *= pow(2, a, mod) + pow(2, b, mod) - 1
ret %= mod
ret += zerozero-1
print(ret%mod)
if __name__ == "__main__":
run()
|
normal
|
{
"blob_id": "f73a3bd7665ac9cc90085fcac2530c93bef69d3d",
"index": 6705,
"step-1": "<mask token>\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.setrecursionlimit(10 ** 7)\n<mask token>\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\nif __name__ == '__main__':\n run()\n",
"step-3": "<mask token>\nsysread = sys.stdin.readline\nread = sys.stdin.read\n<mask token>\nsys.setrecursionlimit(10 ** 7)\n<mask token>\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\nif __name__ == '__main__':\n run()\n",
"step-4": "import sys\nsysread = sys.stdin.readline\nread = sys.stdin.read\nfrom heapq import heappop, heappush\nfrom collections import defaultdict\nsys.setrecursionlimit(10 ** 7)\nimport math\n\n\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2 * i]\n b = AB[2 * i + 1]\n if a == 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a, b)\n if a / b > 0:\n v = 1\n else:\n v = -1\n A_B.append((abs(a // tmp), v * abs(b // tmp)))\n comb_dict = defaultdict(lambda : [0, 0])\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[ai, bi][0] += 1\n else:\n comb_dict[bi, -ai][1] += 1\n ret = 1\n for _, val_list in comb_dict.items():\n a, b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a, b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero - 1\n print(ret % mod)\n\n\nif __name__ == '__main__':\n run()\n",
"step-5": "# coding: utf-8\nimport sys\n#from operator import itemgetter\nsysread = sys.stdin.readline\nread = sys.stdin.read\nfrom heapq import heappop, heappush\nfrom collections import defaultdict\nsys.setrecursionlimit(10**7)\nimport math\n#from itertools import product#accumulate, combinations, product\n#import bisect# lower_bound etc\n#import numpy as np\n#from copy import deepcopy\ndef run():\n mod = 1000000007\n N, *AB = map(int, read().split())\n A_B = []\n INF = float('inf')\n zerozero = 0\n for i in range(N):\n a = AB[2*i]\n b = AB[2*i+1]\n if a== 0 and b == 0:\n zerozero += 1\n elif b == 0:\n A_B.append((INF, 0))\n elif a == 0:\n A_B.append((0, INF))\n else:\n tmp = math.gcd(a,b)\n if a / b > 0 :v = 1\n else: v = -1\n A_B.append((abs(a//tmp), v * abs(b//tmp)))\n\n comb_dict = defaultdict(lambda:[0,0])\n\n for ai, bi in A_B:\n if ai == INF:\n comb_dict[0][1] += 1\n elif bi == INF:\n comb_dict[0][0] += 1\n elif bi < 0:\n comb_dict[(ai,bi)][0] += 1\n else:\n comb_dict[(bi, -ai)][1] += 1\n\n ret = 1\n for _, val_list in comb_dict.items():\n a,b = val_list\n if a == 0 or b == 0:\n ret *= pow(2, max(a,b), mod)\n else:\n ret *= pow(2, a, mod) + pow(2, b, mod) - 1\n ret %= mod\n ret += zerozero-1\n print(ret%mod)\n\n\n\nif __name__ == \"__main__\":\n run()",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""
Given a list of partitioned and sentiment-analyzed tweets, run several trials
to guess who won the election
"""
import json
import math
import sys
import pprint
import feature_vector
def positive_volume(f):
return f['relative_volume'] * f['positive_percent']
def inv_negative_volume(f):
return 1.0 - f['relative_volume'] * f['negative_percent']
def normalized_sentiment(f):
return (f['average_sentiment'] + 1) / 2
def normalized_square_sentiment(f):
return (f['avg_square_sentiment'] + 1) / 2
def weighted_sentiment(f):
return (f['relative_volume'] * f['average_sentiment'] + 1) / 2
# We want a function that's close to 1 unless the relative tweet volume is low
def quadratic_diff_penalty(f, scale):
val = f['relative_volume']
return 1 - scale * (1 - val) ** 2
# Experiment using x ** 3 as the penalty function
def cubic_diff_penalty(f, scale):
val = f['relative_volume']
return 1 - scale * (1 - val) ** 3
def linear_combination(f, a1, a2, a3, a4 = 0, a5 = 0):
return (a1 * positive_volume(f)
+ a2 * inv_negative_volume(f)
+ a3 * normalized_sentiment(f)
+ a4 * normalized_square_sentiment(f)
+ a5 * weighted_sentiment(f))
def run_trial(function, feature_map):
candidate_scores = {}
total_score = 0
for candidate, features in feature_map.items():
score = function(features)
candidate_scores[candidate] = score
total_score += score
for candidate, score in candidate_scores.items():
candidate_scores[candidate] = score / total_score
return candidate_scores
def predict(tweet_dictionary, print_all):
features = feature_vector.gen_feature_vector(tweet_dictionary)
trial_list = [
#1
lambda f: linear_combination(f, 1, 0, 0),
lambda f: linear_combination(f, 0.5, 0, 0.5),
lambda f: linear_combination(f, 0.33, 0.33, 0.33),
lambda f: linear_combination(f, 0.25, 0.25, 0.5),
lambda f: linear_combination(f, 0.5, 0.25, 0.25),
lambda f: linear_combination(f, 0.2, 0.1, 0.0, 0.7),
lambda f: linear_combination(f, 0.0, 0.0, 0.0, 1.0),
lambda f: linear_combination(f, 0.5, 0.0, 0.0, 0.5),
lambda f: linear_combination(f, 0.3, 0.15, 0.15, 0.3),
lambda f: linear_combination(f, 0.5, 0.1, 0.1, 0.3),
#11
lambda f: linear_combination(f, 0.6, 0.0, 0.0, 0.4),
lambda f: linear_combination(f, 0.55, 0.0, 0.2, 0.25),
lambda f: linear_combination(f, 0.5, 0.1, 0.15, 0.25),
lambda f: linear_combination(f, 0.5, 0.05, 0.1, 0.35),
lambda f: linear_combination(f, 0.4, 0.05, 0.1, 0.35, 0.1),
lambda f: linear_combination(f, 0.45, 0.05, 0.05, 0.35, 0.1),
lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.35, 0.2),
lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3),
lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 1),
lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 0.25),
# 21
lambda f: linear_combination(f, 0.25, 0.0, 0.15, 0.4, 0.2) * quadratic_diff_penalty(f, 0.25),
lambda f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.3),
lambda f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.4),
lambda f: linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.4),
lambda f: linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.45),
lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.45),
lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.5),
lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.5),
lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.6),
lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.7),
# 31
lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty(f, 0.7),
lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty(f, 0.75),
lambda f: linear_combination(f, 0.05, 0.0, 0.25, 0.7, 0) * cubic_diff_penalty(f, 0.75),
]
if print_all:
print('Feature vector:')
pprint.pprint(features)
print('\nTrial Results:')
for index, function in enumerate(trial_list):
print('trial %d:' % (index + 1))
print(run_trial(function, features))
print()
print()
final_trial_result = run_trial(trial_list[-1], features)
print('Predicted Outcome:')
max_percent = 0
winning_candidate = ''
for candidate, percent in final_trial_result.items():
print(candidate + ': ', int(percent * 100008) / 1000)
if (percent > max_percent):
max_percent = percent
winning_candidate = candidate
print('\nProjected Winner:')
print(winning_candidate)
if __name__ == '__main__':
if len(sys.argv) != 2 and len(sys.argv) != 3:
print('Usage: python predict.py filename [print_all = True]')
exit()
with open(sys.argv[1], 'r') as tweet_file:
print_all = True if len(sys.argv) == 2 else (sys.argv[2].lower() == 'true')
predict(json.loads(tweet_file.read()), print_all)
|
normal
|
{
"blob_id": "d508cb0a8d4291f1c8e76d9d720be352c05ef146",
"index": 8651,
"step-1": "<mask token>\n\n\ndef positive_volume(f):\n return f['relative_volume'] * f['positive_percent']\n\n\n<mask token>\n\n\ndef normalized_sentiment(f):\n return (f['average_sentiment'] + 1) / 2\n\n\ndef normalized_square_sentiment(f):\n return (f['avg_square_sentiment'] + 1) / 2\n\n\ndef weighted_sentiment(f):\n return (f['relative_volume'] * f['average_sentiment'] + 1) / 2\n\n\ndef quadratic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 2\n\n\n<mask token>\n\n\ndef linear_combination(f, a1, a2, a3, a4=0, a5=0):\n return a1 * positive_volume(f) + a2 * inv_negative_volume(f\n ) + a3 * normalized_sentiment(f) + a4 * normalized_square_sentiment(f\n ) + a5 * weighted_sentiment(f)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef positive_volume(f):\n return f['relative_volume'] * f['positive_percent']\n\n\n<mask token>\n\n\ndef normalized_sentiment(f):\n return (f['average_sentiment'] + 1) / 2\n\n\ndef normalized_square_sentiment(f):\n return (f['avg_square_sentiment'] + 1) / 2\n\n\ndef weighted_sentiment(f):\n return (f['relative_volume'] * f['average_sentiment'] + 1) / 2\n\n\ndef quadratic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 2\n\n\ndef cubic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 3\n\n\ndef linear_combination(f, a1, a2, a3, a4=0, a5=0):\n return a1 * positive_volume(f) + a2 * inv_negative_volume(f\n ) + a3 * normalized_sentiment(f) + a4 * normalized_square_sentiment(f\n ) + a5 * weighted_sentiment(f)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef positive_volume(f):\n return f['relative_volume'] * f['positive_percent']\n\n\ndef inv_negative_volume(f):\n return 1.0 - f['relative_volume'] * f['negative_percent']\n\n\ndef normalized_sentiment(f):\n return (f['average_sentiment'] + 1) / 2\n\n\ndef normalized_square_sentiment(f):\n return (f['avg_square_sentiment'] + 1) / 2\n\n\ndef weighted_sentiment(f):\n return (f['relative_volume'] * f['average_sentiment'] + 1) / 2\n\n\ndef quadratic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 2\n\n\ndef cubic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 3\n\n\ndef linear_combination(f, a1, a2, a3, a4=0, a5=0):\n return a1 * positive_volume(f) + a2 * inv_negative_volume(f\n ) + a3 * normalized_sentiment(f) + a4 * normalized_square_sentiment(f\n ) + a5 * weighted_sentiment(f)\n\n\ndef run_trial(function, feature_map):\n candidate_scores = {}\n total_score = 0\n for candidate, features in feature_map.items():\n score = function(features)\n candidate_scores[candidate] = score\n total_score += score\n for candidate, score in candidate_scores.items():\n candidate_scores[candidate] = score / total_score\n return candidate_scores\n\n\ndef predict(tweet_dictionary, print_all):\n features = feature_vector.gen_feature_vector(tweet_dictionary)\n trial_list = [lambda f: linear_combination(f, 1, 0, 0), lambda f:\n linear_combination(f, 0.5, 0, 0.5), lambda f: linear_combination(f,\n 0.33, 0.33, 0.33), lambda f: linear_combination(f, 0.25, 0.25, 0.5),\n lambda f: linear_combination(f, 0.5, 0.25, 0.25), lambda f:\n linear_combination(f, 0.2, 0.1, 0.0, 0.7), lambda f:\n linear_combination(f, 0.0, 0.0, 0.0, 1.0), lambda f:\n linear_combination(f, 0.5, 0.0, 0.0, 0.5), lambda f:\n linear_combination(f, 0.3, 0.15, 0.15, 0.3), lambda f:\n linear_combination(f, 0.5, 0.1, 0.1, 0.3), lambda f:\n linear_combination(f, 0.6, 0.0, 0.0, 0.4), lambda f:\n linear_combination(f, 0.55, 0.0, 0.2, 0.25), lambda f:\n linear_combination(f, 0.5, 0.1, 0.15, 0.25), lambda f:\n linear_combination(f, 0.5, 0.05, 0.1, 0.35), lambda f:\n linear_combination(f, 0.4, 0.05, 0.1, 0.35, 0.1), lambda f:\n linear_combination(f, 0.45, 0.05, 0.05, 0.35, 0.1), lambda f:\n linear_combination(f, 0.35, 0.0, 0.1, 0.35, 0.2), lambda f:\n linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3), lambda f: \n linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) *\n quadratic_diff_penalty(f, 1), lambda f: linear_combination(f, 0.35,\n 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 0.25), lambda f: \n linear_combination(f, 0.25, 0.0, 0.15, 0.4, 0.2) *\n quadratic_diff_penalty(f, 0.25), lambda f: linear_combination(f, \n 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.3), lambda\n f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) *\n quadratic_diff_penalty(f, 0.4), lambda f: linear_combination(f, 0.2,\n 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.4), lambda f: \n linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) *\n quadratic_diff_penalty(f, 0.45), lambda f: linear_combination(f, \n 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.45), lambda\n f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) *\n quadratic_diff_penalty(f, 0.5), lambda f: linear_combination(f, \n 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.5), lambda f:\n linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) *\n cubic_diff_penalty(f, 0.6), lambda f: linear_combination(f, 0.15, \n 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.7), lambda f: \n linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty\n (f, 0.7), lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) *\n cubic_diff_penalty(f, 0.75), lambda f: linear_combination(f, 0.05, \n 0.0, 0.25, 0.7, 0) * cubic_diff_penalty(f, 0.75)]\n if print_all:\n print('Feature vector:')\n pprint.pprint(features)\n print('\\nTrial Results:')\n for index, function in enumerate(trial_list):\n print('trial %d:' % (index + 1))\n print(run_trial(function, features))\n print()\n print()\n final_trial_result = run_trial(trial_list[-1], features)\n print('Predicted Outcome:')\n max_percent = 0\n winning_candidate = ''\n for candidate, percent in final_trial_result.items():\n print(candidate + ': ', int(percent * 100008) / 1000)\n if percent > max_percent:\n max_percent = percent\n winning_candidate = candidate\n print('\\nProjected Winner:')\n print(winning_candidate)\n\n\n<mask token>\n",
"step-4": "<mask token>\nimport json\nimport math\nimport sys\nimport pprint\nimport feature_vector\n\n\ndef positive_volume(f):\n return f['relative_volume'] * f['positive_percent']\n\n\ndef inv_negative_volume(f):\n return 1.0 - f['relative_volume'] * f['negative_percent']\n\n\ndef normalized_sentiment(f):\n return (f['average_sentiment'] + 1) / 2\n\n\ndef normalized_square_sentiment(f):\n return (f['avg_square_sentiment'] + 1) / 2\n\n\ndef weighted_sentiment(f):\n return (f['relative_volume'] * f['average_sentiment'] + 1) / 2\n\n\ndef quadratic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 2\n\n\ndef cubic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 3\n\n\ndef linear_combination(f, a1, a2, a3, a4=0, a5=0):\n return a1 * positive_volume(f) + a2 * inv_negative_volume(f\n ) + a3 * normalized_sentiment(f) + a4 * normalized_square_sentiment(f\n ) + a5 * weighted_sentiment(f)\n\n\ndef run_trial(function, feature_map):\n candidate_scores = {}\n total_score = 0\n for candidate, features in feature_map.items():\n score = function(features)\n candidate_scores[candidate] = score\n total_score += score\n for candidate, score in candidate_scores.items():\n candidate_scores[candidate] = score / total_score\n return candidate_scores\n\n\ndef predict(tweet_dictionary, print_all):\n features = feature_vector.gen_feature_vector(tweet_dictionary)\n trial_list = [lambda f: linear_combination(f, 1, 0, 0), lambda f:\n linear_combination(f, 0.5, 0, 0.5), lambda f: linear_combination(f,\n 0.33, 0.33, 0.33), lambda f: linear_combination(f, 0.25, 0.25, 0.5),\n lambda f: linear_combination(f, 0.5, 0.25, 0.25), lambda f:\n linear_combination(f, 0.2, 0.1, 0.0, 0.7), lambda f:\n linear_combination(f, 0.0, 0.0, 0.0, 1.0), lambda f:\n linear_combination(f, 0.5, 0.0, 0.0, 0.5), lambda f:\n linear_combination(f, 0.3, 0.15, 0.15, 0.3), lambda f:\n linear_combination(f, 0.5, 0.1, 0.1, 0.3), lambda f:\n linear_combination(f, 0.6, 0.0, 0.0, 0.4), lambda f:\n linear_combination(f, 0.55, 0.0, 0.2, 0.25), lambda f:\n linear_combination(f, 0.5, 0.1, 0.15, 0.25), lambda f:\n linear_combination(f, 0.5, 0.05, 0.1, 0.35), lambda f:\n linear_combination(f, 0.4, 0.05, 0.1, 0.35, 0.1), lambda f:\n linear_combination(f, 0.45, 0.05, 0.05, 0.35, 0.1), lambda f:\n linear_combination(f, 0.35, 0.0, 0.1, 0.35, 0.2), lambda f:\n linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3), lambda f: \n linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) *\n quadratic_diff_penalty(f, 1), lambda f: linear_combination(f, 0.35,\n 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 0.25), lambda f: \n linear_combination(f, 0.25, 0.0, 0.15, 0.4, 0.2) *\n quadratic_diff_penalty(f, 0.25), lambda f: linear_combination(f, \n 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.3), lambda\n f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) *\n quadratic_diff_penalty(f, 0.4), lambda f: linear_combination(f, 0.2,\n 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.4), lambda f: \n linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) *\n quadratic_diff_penalty(f, 0.45), lambda f: linear_combination(f, \n 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.45), lambda\n f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) *\n quadratic_diff_penalty(f, 0.5), lambda f: linear_combination(f, \n 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.5), lambda f:\n linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) *\n cubic_diff_penalty(f, 0.6), lambda f: linear_combination(f, 0.15, \n 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.7), lambda f: \n linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty\n (f, 0.7), lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) *\n cubic_diff_penalty(f, 0.75), lambda f: linear_combination(f, 0.05, \n 0.0, 0.25, 0.7, 0) * cubic_diff_penalty(f, 0.75)]\n if print_all:\n print('Feature vector:')\n pprint.pprint(features)\n print('\\nTrial Results:')\n for index, function in enumerate(trial_list):\n print('trial %d:' % (index + 1))\n print(run_trial(function, features))\n print()\n print()\n final_trial_result = run_trial(trial_list[-1], features)\n print('Predicted Outcome:')\n max_percent = 0\n winning_candidate = ''\n for candidate, percent in final_trial_result.items():\n print(candidate + ': ', int(percent * 100008) / 1000)\n if percent > max_percent:\n max_percent = percent\n winning_candidate = candidate\n print('\\nProjected Winner:')\n print(winning_candidate)\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2 and len(sys.argv) != 3:\n print('Usage: python predict.py filename [print_all = True]')\n exit()\n with open(sys.argv[1], 'r') as tweet_file:\n print_all = True if len(sys.argv) == 2 else sys.argv[2].lower(\n ) == 'true'\n predict(json.loads(tweet_file.read()), print_all)\n",
"step-5": "\"\"\"\nGiven a list of partitioned and sentiment-analyzed tweets, run several trials\nto guess who won the election\n\"\"\"\n\nimport json\nimport math\nimport sys\nimport pprint\n\nimport feature_vector\n\ndef positive_volume(f):\n return f['relative_volume'] * f['positive_percent']\n\ndef inv_negative_volume(f):\n return 1.0 - f['relative_volume'] * f['negative_percent']\n\ndef normalized_sentiment(f):\n return (f['average_sentiment'] + 1) / 2\n\ndef normalized_square_sentiment(f):\n return (f['avg_square_sentiment'] + 1) / 2\n\ndef weighted_sentiment(f):\n return (f['relative_volume'] * f['average_sentiment'] + 1) / 2\n\n# We want a function that's close to 1 unless the relative tweet volume is low\ndef quadratic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 2\n\n# Experiment using x ** 3 as the penalty function\ndef cubic_diff_penalty(f, scale):\n val = f['relative_volume']\n return 1 - scale * (1 - val) ** 3\n\n\ndef linear_combination(f, a1, a2, a3, a4 = 0, a5 = 0):\n return (a1 * positive_volume(f)\n + a2 * inv_negative_volume(f)\n + a3 * normalized_sentiment(f)\n + a4 * normalized_square_sentiment(f)\n + a5 * weighted_sentiment(f))\n\ndef run_trial(function, feature_map):\n candidate_scores = {}\n total_score = 0\n for candidate, features in feature_map.items():\n score = function(features)\n candidate_scores[candidate] = score\n total_score += score\n for candidate, score in candidate_scores.items():\n candidate_scores[candidate] = score / total_score\n return candidate_scores\n\ndef predict(tweet_dictionary, print_all):\n features = feature_vector.gen_feature_vector(tweet_dictionary)\n trial_list = [\n #1\n lambda f: linear_combination(f, 1, 0, 0),\n lambda f: linear_combination(f, 0.5, 0, 0.5),\n lambda f: linear_combination(f, 0.33, 0.33, 0.33),\n lambda f: linear_combination(f, 0.25, 0.25, 0.5),\n lambda f: linear_combination(f, 0.5, 0.25, 0.25),\n lambda f: linear_combination(f, 0.2, 0.1, 0.0, 0.7),\n lambda f: linear_combination(f, 0.0, 0.0, 0.0, 1.0),\n lambda f: linear_combination(f, 0.5, 0.0, 0.0, 0.5),\n lambda f: linear_combination(f, 0.3, 0.15, 0.15, 0.3),\n lambda f: linear_combination(f, 0.5, 0.1, 0.1, 0.3),\n #11\n lambda f: linear_combination(f, 0.6, 0.0, 0.0, 0.4),\n lambda f: linear_combination(f, 0.55, 0.0, 0.2, 0.25),\n lambda f: linear_combination(f, 0.5, 0.1, 0.15, 0.25),\n lambda f: linear_combination(f, 0.5, 0.05, 0.1, 0.35),\n lambda f: linear_combination(f, 0.4, 0.05, 0.1, 0.35, 0.1),\n lambda f: linear_combination(f, 0.45, 0.05, 0.05, 0.35, 0.1),\n lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.35, 0.2),\n lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3),\n lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 1),\n lambda f: linear_combination(f, 0.35, 0.0, 0.1, 0.25, 0.3) * quadratic_diff_penalty(f, 0.25),\n # 21\n lambda f: linear_combination(f, 0.25, 0.0, 0.15, 0.4, 0.2) * quadratic_diff_penalty(f, 0.25),\n lambda f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.3),\n lambda f: linear_combination(f, 0.25, 0.0, 0.2, 0.45, 0.1) * quadratic_diff_penalty(f, 0.4),\n lambda f: linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.4),\n lambda f: linear_combination(f, 0.2, 0.0, 0.2, 0.5, 0.1) * quadratic_diff_penalty(f, 0.45),\n lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.45),\n lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * quadratic_diff_penalty(f, 0.5),\n lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.5),\n lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.6),\n lambda f: linear_combination(f, 0.15, 0.0, 0.25, 0.55, 0.05) * cubic_diff_penalty(f, 0.7),\n # 31\n lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty(f, 0.7),\n lambda f: linear_combination(f, 0.1, 0.0, 0.25, 0.65, 0) * cubic_diff_penalty(f, 0.75),\n lambda f: linear_combination(f, 0.05, 0.0, 0.25, 0.7, 0) * cubic_diff_penalty(f, 0.75),\n ]\n\n if print_all:\n print('Feature vector:')\n pprint.pprint(features)\n print('\\nTrial Results:')\n for index, function in enumerate(trial_list):\n print('trial %d:' % (index + 1))\n print(run_trial(function, features))\n print()\n print()\n final_trial_result = run_trial(trial_list[-1], features)\n print('Predicted Outcome:')\n max_percent = 0\n winning_candidate = ''\n for candidate, percent in final_trial_result.items():\n print(candidate + ': ', int(percent * 100008) / 1000)\n if (percent > max_percent):\n max_percent = percent\n winning_candidate = candidate\n print('\\nProjected Winner:')\n print(winning_candidate)\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2 and len(sys.argv) != 3:\n print('Usage: python predict.py filename [print_all = True]')\n exit()\n with open(sys.argv[1], 'r') as tweet_file:\n print_all = True if len(sys.argv) == 2 else (sys.argv[2].lower() == 'true')\n predict(json.loads(tweet_file.read()), print_all)\n",
"step-ids": [
6,
7,
10,
12,
13
]
}
|
[
6,
7,
10,
12,
13
] |
from odoo import models, fields, api
from datetime import datetime, timedelta
from odoo import exceptions
import logging
import math
_logger = logging.getLogger(__name__)
class BillOfLading(models.Model):
_name = 'freight.bol'
_description = 'Bill Of Lading'
_order = 'date_of_issue desc, write_date desc'
_rec_name = 'bol_no'
_inherit = ['mail.thread', 'mail.activity.mixin']
# Header
bol_status = fields.Selection([('01', 'Draft'), ('02', 'Original'), ('03', 'Surrender'), ('04', 'Telex Release')],
string="B/L Status", default="01", copy=False, track_visibility='onchange', store=True)
service_type = fields.Selection([('ocean', 'Ocean'), ('air', 'Air'), ('land', 'Land')], string="Shipment Mode",
default="ocean", track_visibility='onchange')
direction = fields.Selection([('import', 'Import'), ('export', 'Export')], string="Direction", default="export",
track_visibility='onchange')
cargo_type = fields.Selection([('fcl', 'FCL'), ('lcl', 'LCL')], string='Cargo Type', default="fcl",
track_visibility='onchange')
type_of_movement = fields.Selection([('cy-cy', 'CY/CY'), ('cy-cfs', 'CY/CFS'), ('cfs-cfs', 'CFS/CFS'), ('cfs-cy', 'CFS/CY')],
string='Type Of Movement', track_visibility='onchange')
booking_ref = fields.Many2one('freight.booking', string='Booking Job Ref', track_visibility='onchange',
copy=False, index=True)
no_of_original_bl = fields.Selection([('0', '0'), ('1', '1'), ('3', '3')], string="No Of original B/L",
default="0", track_visibility='onchange')
doc_form_no = fields.Char(string='Doc. Form No.', track_visibility='onchange')
service_contract_no = fields.Char(string='Service Contract No', track_visibility='onchange')
bol_no = fields.Char(string='HBL No', copy=False, readonly=True, index=True)
carrier_booking_no = fields.Char(string='Carrier Booking No', copy=False, readonly=True)
date_of_issue = fields.Date(string='Shipment Date', copy=False, default=datetime.now().date(),
track_visibility='onchange', index=True)
date_laden_on_board = fields.Date(string='Shipped on Board Date')
place_of_issue = fields.Char(string='Place of Issue', track_visibility='onchange')
export_reference = fields.Char(string='Export Reference', track_visibility='onchange')
fa_reference = fields.Char(string='Forwarding Agent and References', track_visibility='onchange')
point_country_origin = fields.Text(string='Point and Country of Origin', track_visibility='onchange')
term = fields.Char(string='Term', track_visibility='onchange', help='eg, CY-CY')
commodity = fields.Many2one('product.product', string='Commodity', track_visibility='onchange')
commodity1 = fields.Many2one('freight.commodity1', string='Commodity', track_visibility='onchange')
shipper_load = fields.Boolean('Shipper Load, Seal and Count')
analytic_account_id = fields.Many2one('account.analytic.account', string="Analytic Account",
track_visibility='always', copy=False)
@api.multi
def _get_default_commodity_category(self):
commodity_lines = self.env['freight.product.category'].search([('type', '=ilike', 'commodity')])
for commodity_line in commodity_lines:
_logger.warning('_get_default_commodity_category=' + str(commodity_line.product_category))
return commodity_line.product_category
commodity_category_id = fields.Many2one('product.category', string="Commodity Product Id",
default=_get_default_commodity_category)
# Invoice Status
invoice_status = fields.Selection([('01', 'New'),
('02', 'Partially Invoiced'),
('03', 'Fully Invoiced')],
string="Invoice Status", default="01", copy=False,
track_visibility='onchange')
invoice_paid_status = fields.Selection([('01', 'New'),
('02', 'Partially Paid'),
('03', 'Fully Paid')],
string="Invoice Paid Status", default="01", copy=False,
track_visibility='onchange')
# Party Info
customer_name = fields.Many2one('res.partner', string='Customer Name', track_visibility='onchange')
contact_name = fields.Many2one('res.partner', string='Contact Name', track_visibility='onchange')
shipper = fields.Text(string='Shipper', track_visibility='onchange',
help="The Party who shipped the freight, eg Exporter")
notify_party = fields.Text(string='Notify Party',
help="The Party who will be notified by Liner when the freight arrived",
track_visibility='onchange')
carrier_c = fields.Many2one('res.partner', string="Carrier")
consignee = fields.Text(string='Consignee', help="The Party who received the freight", track_visibility='onchange')
routing_instruction = fields.Text(string='For Delivery Of Goods Please Apply To', track_visibility='onchange')
delivery_contact = fields.Text(string='Contact for Delivery', help="Contact information for delivery of goods",
track_visibility='onchange')
unstuff_at = fields.Char(string='Unstuff At', track_visibility='onchange')
# Shipment Info
voyage_no = fields.Char(string='Voyage No', track_visibility='onchange')
vessel = fields.Char(string='Vessel Name', track_visibility='onchange')
manifest_no = fields.Char(string='Manifest No', track_visibility='onchange')
port_of_loading_input = fields.Char(string='Port of Loading', track_visibility='onchange')
port_of_discharge_input = fields.Char(string='Port of Discharge', track_visibility='onchange')
port_of_discharge_eta = fields.Date(string='Loading ETA', track_visibility='onchange')
place_of_delivery = fields.Char(string='Final Destination', track_visibility='onchange')
place_of_receipt = fields.Char(string='Place of Receipt', track_visibility='onchange')
pre_carriage_by = fields.Char(string='Pre-Carriage By', track_visibility='onchange')
# Remark
note = fields.Text(string='Remarks', track_visibility='onchange')
# System Info
sales_person = fields.Many2one('res.users', string="Salesperson", track_visibility='onchange')
company_id = fields.Many2one('res.company', 'Company', required=True, index=True, readonly=1,
default=lambda self: self.env.user.company_id.id)
# analytic_account_id = fields.Many2one('account.analytic.account', string="Analytic Account",
# track_visibility='always', copy=False)
# Line Item
cargo_line_ids = fields.One2many('freight.bol.cargo', 'cargo_line', string="Cargo Line",
copy=True, auto_join=True, track_visibility='always')
charge_line_ids = fields.One2many('freight.bol.charge', 'charge_line', string="Charge Line",
copy=True, auto_join=True, track_visibility='always')
cost_profit_ids = fields.One2many('freight.bol.cost.profit', 'bol_id', string="Cost & Profit",
copy=True, auto_join=True, track_visibility='always')
# Not Used
invoice_count = fields.Integer(string='Invoice Count', compute='_get_invoiced_count', copy=False)
vendor_bill_count = fields.Integer(string='Vendor Bill Count', compute='_get_bill_count', copy=False)
si_count = fields.Integer(string='SI Count', compute='_get_si_count', copy=False)
shipper_c = fields.Many2one('res.partner', string='Shipper')
consignee_c = fields.Many2one('res.partner', string='Consignee Name')
notify_party_c = fields.Many2one('res.partner', string='Notify Party')
total_no_of_packages_words = fields.Char(string='Total Packages', track_visibility='onchange',
help='Total no of packages or container in Words')
lines_description = fields.Integer()
line_description1 = fields.Text()
line_description2 = fields.Text()
@api.model
def create(self, vals):
vals['bol_no'] = self.env['ir.sequence'].next_by_code('hbl')
res = super(BillOfLading, self).create(vals)
return res
@api.multi
def name_get(self):
result = []
for bol in self:
name = str(bol.bol_no)
result.append((bol.id, name))
return result
@api.multi
def action_send_bl(self):
self.ensure_one()
ir_model_data = self.env['ir.model.data']
try:
template_id = \
ir_model_data.get_object_reference('sci_goexcel_freight', 'email_template_bol')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = {
'default_model': 'freight.bol',
'default_res_id': self.ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True,
'custom_layout': "mail.mail_notification_light",
# 'proforma': self.env.context.get('proforma', False),
'force_email': True
}
# base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
# ctx['action_url'] = "{}/web?db={}".format(base_url, self.env.cr.dbname)
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
@api.multi
def action_invoice(self):
self.ensure_one()
view = self.env.ref('sci_goexcel_freight.invoice_view_form')
return {
'name': 'Create Invoice',
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'invoice.wizard',
'views': [(view.id, 'form')],
'view_id': view.id,
'target': 'new',
'context': dict(bl_id=self.id),
}
@api.multi
def action_create_vendor_bill(self):
# only lines with vendor
vendor_po = self.cost_profit_ids.filtered(lambda c: c.vendor_id)
po_lines = vendor_po.sorted(key=lambda p: p.vendor_id.id)
vendor_count = False
vendor_id = False
if not self.analytic_account_id:
values = {
'name': '%s' % self.booking_ref.booking_no,
'partner_id': self.booking_ref.customer_name.id,
'code': self.bol_no,
'company_id': self.booking_ref.company_id.id,
}
analytic_account = self.env['account.analytic.account'].sudo().create(values)
self.booking_ref.write({'analytic_account_id': analytic_account.id})
self.write({'analytic_account_id': analytic_account.id})
for line in po_lines:
if line.vendor_id != vendor_id:
vb = self.env['account.invoice']
vendor_count = True
vendor_id = line.vendor_id
value = []
vendor_bill_created = []
filtered_vb_lines = po_lines.filtered(lambda r: r.vendor_id == vendor_id)
for vb_line in filtered_vb_lines:
if not vb_line.billed:
account_id = False
price_after_converted = vb_line.cost_price * vb_line.cost_currency_rate
if vb_line.product_id.property_account_expense_id:
account_id = vb_line.product_id.property_account_expense_id
elif vb_line.product_id.categ_id.property_account_expense_categ_id:
account_id = vb_line.product_id.categ_id.property_account_expense_categ_id
value.append([0, 0, {
# 'invoice_id': vendor_bill.id or False,
'account_id': account_id.id or False,
'name': vb_line.product_id.name or '',
'product_id': vb_line.product_id.id or False,
'quantity': vb_line.cost_qty or 0.0,
'uom_id': vb_line.uom_id.id or False,
'price_unit': price_after_converted or 0.0,
'account_analytic_id': self.analytic_account_id.id,
'bl_line_id': vb_line.id,
}])
vendor_bill_created.append(vb_line)
vb_line.billed = True
# print('vendor_id=' + vendor_id.name)
if value:
vendor_bill_id = vb.create({
'type': 'in_invoice',
'invoice_line_ids': value,
'default_currency_id': self.env.user.company_id.currency_id.id,
'company_id': self.company_id.id,
'date_invoice': fields.Date.context_today(self),
'origin': self.bol_no,
'partner_id': vendor_id.id,
'account_id': vb_line.vendor_id.property_account_payable_id.id or False,
'freight_booking': self.booking_ref.id,
})
for new_vendor_bill in vendor_bill_created:
new_vendor_bill.vendor_bill_id = vendor_bill_id.id
if vendor_count is False:
raise exceptions.ValidationError('No Vendor in Cost & Profit!!!')
def action_copy_to_booking(self):
booking = self.env['freight.booking'].search([('id', '=', self.booking_ref.id),])
booking_val = {
'cargo_type': self.cargo_type or False,
'shipper_address_input': self.shipper,
'consignee_address_input': self.consignee,
'notify_party_address_input': self.notify_party,
'carrier_booking_no' : self.carrier_booking_no or False,
'voyage_no': self.voyage_no,
'port_of_loading_input': self.port_of_loading_input,
'port_of_discharge_input': self.port_of_discharge_input,
'place_of_delivery': self.place_of_delivery,
'note': self.note,
'bol_status': self.bol_status,
'no_of_original_bl': self.no_of_original_bl,
'carrier': self.carrier_c.id,
}
booking.sudo().write(booking_val)
for booking_line in booking.operation_line_ids:
booking_line.sudo().unlink()
for booking_line in booking.operation_line_ids2:
booking_line.sudo().unlink()
for line in self.cargo_line_ids:
if self.cargo_type == 'fcl':
if line.container_product_name:
operation_line_obj = self.env['freight.operations.line']
op_line = operation_line_obj.create({
'operation_id': booking.id,
'container_no': line.container_no or '',
'container_product_id': line.container_product_id.id or False,
'seal_no': line.seal_no or '',
'container_product_name': line.container_product_name or '',
'packages_no': line.packages_no_value or '',
'packages_no_uom': line.packages_no_uom.id,
'exp_net_weight': line.exp_net_weight or '',
'exp_gross_weight': line.exp_gross_weight or '',
'dim_length': line.dim_length or '',
'dim_width': line.dim_width or '',
'dim_height': line.dim_height or '',
'exp_vol': line.exp_vol or '',
'remark': line.marks or '',
})
booking.operation_line_ids = op_line
else:
if line.container_product_name:
operation_line_obj = self.env['freight.operations.line2']
op_line = operation_line_obj.create({
'operation_id2': booking.id,
'container_no': line.container_no or '',
'container_product_id': line.container_product_id.id or False,
'seal_no': line.seal_no or '',
'container_product_name': line.container_product_name or '',
'packages_no': line.packages_no_value or '',
'packages_no_uom': line.packages_no_uom.id,
'exp_net_weight': line.exp_net_weight or '',
'exp_gross_weight': line.exp_gross_weight or '',
'dim_length': line.dim_length or '',
'dim_width': line.dim_width or '',
'dim_height': line.dim_height or '',
'exp_vol': line.exp_vol or '',
'shipping_mark': line.marks or '',
})
booking.operation_line_ids2 = op_line
def action_copy_from_booking(self):
booking = self.env['freight.booking'].search([('id', '=', self.booking_ref.id)])
for line in booking.cost_profit_ids:
operation_line_obj = self.env['freight.bol.cost.profit']
op_line = operation_line_obj.create({
'bol_id': self.id,
'product_id': line.product_id.id or False,
'product_name': line.product_name or '',
'profit_qty': line.profit_qty or 0,
'list_price': line.list_price or 0,
'profit_amount': line.profit_amount or 0,
'profit_currency': line.profit_currency.id or False,
'profit_currency_rate': line.profit_currency_rate or 0,
'cost_qty': line.cost_qty or 0,
'cost_price': line.cost_price or 0,
'cost_amount': line.cost_amount or 0,
'vendor_id': line.vendor_id.id or False,
'cost_currency': line.cost_currency.id or False,
'cost_currency_rate': line.cost_currency_rate or 0,
})
def action_create_si(self):
si_obj = self.env['freight.website.si']
si_val = {
'si_status': '01',
'carrier': self.carrier_c.id or False,
'direction': self.direction or False,
'cargo_type': self.cargo_type or False,
'service_type': self.service_type or False,
'customer_name': self.customer_name.id or False,
'shipper': self.shipper,
'consignee': self.consignee,
'notify_party': self.notify_party,
'carrier_booking_ref': self.carrier_booking_no,
'voyage_no': self.voyage_no,
'port_of_loading_input': self.port_of_loading_input,
'port_of_discharge_input': self.port_of_discharge_input,
'place_of_delivery': self.place_of_delivery,
'bl_ref': self.id,
}
si = si_obj.create(si_val)
if self.cargo_type == 'fcl':
container_line = self.cargo_line_ids
si_line_obj = self.env['freight.website.si.fcl']
for line in container_line:
if line.container_product_id or line.container_no:
si_line = si_line_obj.create({
'container_product_id': line.container_product_id.id or False,
'container_product_name': line.container_product_name or False,
'fcl_line': si.id or '',
'container_no': line.container_no or '',
'packages_no': line.packages_no_value or 0.0,
'packages_no_uom': line.packages_no_uom.id,
'exp_gross_weight': line.exp_gross_weight or 0.0,
'exp_vol': line.exp_vol or 0.0,
})
si.write({'fcl_line_ids': si_line or False})
else:
container_line = self.cargo_line_ids
si_line_obj = self.env['freight.website.si.lcl']
for line in container_line:
if line.container_product_id or line.container_no:
si_line = si_line_obj.create({
'container_product_name': line.container_product_name or False,
#'container_product_id': line.container_commodity_id.id or False,
'lcl_line': si.id or '',
'container_no': line.container_no or '',
'packages_no': line.packages_no_value or 0.0,
'packages_no_uom': line.packages_no_uom.id,
'exp_gross_weight': line.exp_gross_weight or 0.0,
'exp_net_weight': line.exp_net_weight or 0.0,
'exp_vol': line.exp_vol or 0.0,
# 'remark_line': line.remark or '',
})
si.write({'lcl_line_ids': si_line or False})
@api.multi
def operation_invoices(self):
"""Show Invoice for specific Freight Operation smart Button."""
for operation in self:
invoices = self.env['account.invoice'].search([
('freight_hbl', '=', operation.id),
('type', 'in', ['out_invoice', 'out_refund']),
('state', '!=', 'cancel'),
])
action = self.env.ref('account.action_invoice_tree1').read()[0]
if len(invoices) > 1:
action['domain'] = [('id', 'in', invoices.ids)]
elif len(invoices) == 1:
action['views'] = [(self.env.ref('account.invoice_form').id, 'form')]
action['res_id'] = invoices.ids[0]
else:
action = {'type': 'ir.actions.act_window_close'}
return action
@api.multi
def operation_bill(self):
for operation in self:
# Get from the vendor bill list
vendor_bill_list = []
for cost_profit_line in operation.cost_profit_ids:
for vendor_bill_line in cost_profit_line.vendor_bill_ids:
if vendor_bill_line.type in ['in_invoice', 'in_refund']:
vendor_bill_list.append(vendor_bill_line.id)
invoices = self.env['account.invoice'].search([
('freight_hbl', '=', operation.id),
('type', 'in', ['in_invoice', 'in_refund']),
('state', '!=', 'cancel'),
])
invoice_name_list = []
for x in invoices:
invoice_name_list.append(x.id)
unique_list = []
for y in vendor_bill_list:
if invoice_name_list and len(invoice_name_list) > 0:
if y not in invoice_name_list:
unique_list.append(y)
else:
unique_list.append(y)
for z in invoice_name_list:
# if z not in vendor_bill_list:
unique_list.append(z)
if len(unique_list) > 1:
views = [(self.env.ref('account.invoice_supplier_tree').id, 'tree'),
(self.env.ref('account.invoice_supplier_form').id, 'form')]
return {
'name': 'Vendor bills',
'view_type': 'form',
'view_mode': 'tree,form',
# 'view_id': self.env.ref('account.invoice_supplier_tree').id,
'view_id': False,
'res_model': 'account.invoice',
'views': views,
# 'context': "{'type':'in_invoice'}",
'domain': [('id', 'in', unique_list)],
'type': 'ir.actions.act_window',
# 'target': 'new',
}
elif len(unique_list) == 1:
# print('in vendor bill length =1')
return {
# 'name': self.booking_no,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.invoice',
'res_id': unique_list[0] or False, # readonly mode
# 'domain': [('id', 'in', purchase_order.ids)],
'type': 'ir.actions.act_window',
'target': 'popup', # readonly mode
}
@api.multi
def operation_si(self):
for operation in self:
si = self.env['freight.website.si'].search([('bl_ref', '=', operation.id), ])
if len(si) > 1:
views = [(self.env.ref('sci_goexcel_freight.view_tree_si').id, 'tree'),
(self.env.ref('sci_goexcel_freight.view_form_si').id, 'form')]
return {
'name': 'Shipping Instruction',
'view_type': 'form',
'view_mode': 'tree,form',
'view_id': False,
'res_model': 'freight.website.si',
'views': views,
'domain': [('id', 'in', si.ids)],
'type': 'ir.actions.act_window',
}
elif len(si) == 1:
return {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'freight.website.si',
'res_id': si.id or False,
'type': 'ir.actions.act_window',
'target': 'popup', # readonly mode
}
else:
action = {'type': 'ir.actions.act_window_close'}
return action
def _get_invoiced_count(self):
for operation in self:
invoices = self.env['account.invoice'].search([
('freight_hbl', '=', operation.id),
('type', 'in', ['out_invoice','out_refund']),
('state', '!=', 'cancel'),
])
self.update({
'invoice_count': len(invoices),
#'invoice_ids': invoices,
})
def _get_bill_count(self):
# vendor bill is created from booking job, vendor bill header will have the booking job id
for operation in self:
# Get from the vendor bill list
vendor_bill_list = []
# vendor_bill_list_temp = []
for cost_profit_line in operation.cost_profit_ids:
for vendor_bill_line in cost_profit_line.vendor_bill_ids:
if vendor_bill_line.type in ['in_invoice', 'in_refund']:
vendor_bill_list.append(vendor_bill_line.id)
# vendor_bill_list_temp.append(vendor_bill_line.id)
# print('vendor_bill_list: ', len(vendor_bill_list))
# remove the duplicates in the vendor bill list
unique_vendor_bill_list = []
for i in vendor_bill_list:
if i not in unique_vendor_bill_list:
unique_vendor_bill_list.append(i)
# print('unique_vendor_bill_list: ', len(unique_vendor_bill_list))
# Get the vendor list (Create the vendor from the job)
invoices = self.env['account.invoice'].search([
('freight_hbl', '=', operation.id),
('type', 'in', ['in_invoice', 'in_refund']),
('state', '!=', 'cancel'),
])
# print('vendor bills:', len(invoices))
invoice_name_list = []
for x in invoices:
invoice_name_list.append(x.id)
unique_list = []
# for x in invoices:
# invoice_name_list.append(x.vendor_bill_id.id)
# unique_list = []
for y in unique_vendor_bill_list:
if invoice_name_list and len(invoice_name_list) > 0:
if y not in invoice_name_list:
unique_list.append(y)
else:
unique_list.append(y)
for z in invoice_name_list:
# if z not in vendor_bill_list:
unique_list.append(z)
if len(unique_list) > 0:
self.update({
'vendor_bill_count': len(unique_list),
})
def _get_si_count(self):
for operation in self:
si = self.env['freight.website.si'].search([
('bl_ref', '=', operation.id),
])
self.update({
'si_count': len(si),
})
# TS - add for Purchase Receipt
purchase_receipt_count = fields.Integer(string='Purchase Receipt Count', compute='_get_pr_count', copy=False)
def _get_pr_count(self):
# get purchase receipt (Account Voucher) on the lines
for operation in self:
# Get PR list
pr_lines = self.env['account.voucher.line'].search([
('freight_hbl', '=', operation.id),
])
pr_list = []
for pr_line in pr_lines:
if pr_line.voucher_id.state != 'cancel' and pr_line.voucher_id.voucher_type == 'purchase':
pr_list.append(pr_line.voucher_id.id)
# pr_name_list = []
# for x in pr_list:
# pr_name_list.append(x.id)
unique_list = []
for i in pr_list:
if i not in unique_list:
unique_list.append(i)
if len(unique_list) > 0:
self.update({
'purchase_receipt_count': len(unique_list),
})
@api.multi
def operation_pr(self):
for operation in self:
for operation in self:
# Get PR list
pr_lines = self.env['account.voucher.line'].search([
('freight_hbl', '=', operation.id),
])
pr_list = []
for pr_line in pr_lines:
if pr_line.voucher_id.state != 'cancel' and pr_line.voucher_id.voucher_type == 'purchase':
pr_list.append(pr_line.voucher_id.id)
# pr_name_list = []
# for x in pr_list:
# pr_name_list.append(x.id)
unique_list = []
for i in pr_list:
if i not in unique_list:
unique_list.append(i)
if len(unique_list) > 1:
views = [(self.env.ref('account_voucher.view_voucher_tree').id, 'tree'),
(self.env.ref('account_voucher.view_purchase_receipt_form').id, 'form')]
return {
'name': 'Purchase Receipt',
'view_type': 'form',
'view_mode': 'tree,form',
# 'view_id': self.env.ref('account.invoice_supplier_tree').id,
'view_id': False,
'res_model': 'account.voucher',
'views': views,
# 'context': "{'type':'in_invoice'}",
'domain': [('id', 'in', unique_list)],
'type': 'ir.actions.act_window',
# 'target': 'new',
}
elif len(unique_list) == 1:
# print('in vendor bill length =1')
return {
# 'name': self.booking_no,
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.voucher',
'res_id': unique_list[0] or False, # readonly mode
# 'domain': [('id', 'in', purchase_order.ids)],
'type': 'ir.actions.act_window',
'target': 'popup', # readonly mode
}
class CargoLine(models.Model):
_name = 'freight.bol.cargo'
_description = 'Cargo Line'
cargo_line = fields.Many2one('freight.bol', string='Cargo Line', required=True, ondelete='cascade',
index=True, copy=False)
sequence = fields.Integer(string="sequence")
marks = fields.Text(string='Marks and Numbers')
container_no = fields.Char(string="Container No.")
container_product_id = fields.Many2one('product.product', string='Container', track_visibility='onchange')
seal_no = fields.Char(string="Seal No.")
container_product_name = fields.Text(string='Description of Goods')
packages_no_value = fields.Integer(string="No. of Packages")
packages_no_uom = fields.Many2one('uom.uom', string="UoM")
exp_net_weight = fields.Float(string="Net Weight(KG)", help="Expected Weight in kg.", track_visibility='onchange')
exp_gross_weight = fields.Float(string="Gross Weight(KG)", digits=(12, 4), help="Expected Weight in kg.")
dim_length = fields.Float(string='Length', help="Length in cm", default="0.00", track_visibility='onchange')
dim_width = fields.Float(string='Width', default="0.00", help="Width in cm", track_visibility='onchange')
dim_height = fields.Float(string='Height', default="0.00", help="Height in cm", track_visibility='onchange')
exp_vol = fields.Float(string="Measurement (M3)", digits=(12, 4),
help="Expected Volume in m3 Measure")
packages_no = fields.Char(string="No. of Packages")
@api.multi
def _get_default_container_category(self):
container_lines = self.env['freight.product.category'].search([('type', '=ilike', 'container')])
for container_line in container_lines:
# _logger.warning('_get_default_container_category=' + str(container_line.product_category))
return container_line.product_category
container_category_id = fields.Many2one('product.category', string="Container Product Id",
default=_get_default_container_category)
@api.onchange('container_product_name')
def _onchange_description(self):
bl = self.env['freight.bol'].search([('bol_no', '=', self.cargo_line.bol_no)])
if self.container_product_name:
lines_description = self.container_product_name.count('\n')/20
lines_description = math.ceil(lines_description)
x = self.container_product_name.split('\n')
count = 0
line_description1 = ''
line_description2 = ''
for line in x:
if count < 20:
line_description1 = line_description1 + line + '\n'
count = count + 1
else:
line_description2 = line_description2 + line + '\n'
count = count + 1
bl.write({'lines_description': lines_description,
'line_description1': line_description1,
'line_description2': line_description2,
})
@api.model
def create(self, vals):
# _logger.warning("in create")
res = super(CargoLine, self).create(vals)
content = ""
if vals.get("marks"):
content = content + " \u2022 Marks and Numbers: " + str(vals.get("marks")) + "<br/>"
if vals.get("container_product_name"):
content = content + " \u2022 Description of Goods: " + str(vals.get("container_product_name")) + "<br/>"
if vals.get("packages_no"):
content = content + " \u2022 No. of Packages: " + str(vals.get("packages_no")) + "<br/>"
if vals.get("seal_no"):
content = content + " \u2022 Seal no: " + str(vals.get("seal_no")) + "<br/>"
if vals.get("container_no"):
content = content + " \u2022 Container No.: " + str(vals.get("container_no")) + "<br/>"
if vals.get("exp_gross_weight"):
content = content + " \u2022 Gross Weight(KG): " + str(vals.get("exp_gross_weight")) + "<br/>"
if vals.get("exp_vol"):
content = content + " \u2022 Measurement (M3): " + str(vals.get("exp_vol")) + "<br/>"
res.cargo_line.message_post(body=content)
return res
@api.multi
def write(self, vals):
# _logger.warning("in write")
res = super(CargoLine, self).write(vals)
# _logger.warning("after super write")
content = ""
if vals.get("marks"):
content = content + " \u2022 Marks and Numbers: " + str(vals.get("marks")) + "<br/>"
if vals.get("container_product_name"):
content = content + " \u2022 Description of Goods: " + str(vals.get("container_product_name")) + "<br/>"
if vals.get("packages_no"):
content = content + " \u2022 No. of Packages: " + str(vals.get("packages_no")) + "<br/>"
if vals.get("seal_no"):
content = content + " \u2022 Seal no: " + str(vals.get("seal_no")) + "<br/>"
if vals.get("container_no"):
content = content + " \u2022 Container No.: " + str(vals.get("container_no")) + "<br/>"
if vals.get("exp_gross_weight"):
content = content + " \u2022 Gross Weight(KG): " + str(vals.get("exp_gross_weight")) + "<br/>"
if vals.get("exp_vol"):
content = content + " \u2022 Measurement (M3): " + str(vals.get("exp_vol")) + "<br/>"
self.cargo_line.message_post(body=content)
return res
class ChargeLine(models.Model):
_name = 'freight.bol.charge'
_description = 'Charge Line'
charge_line = fields.Many2one('freight.bol', string='Charge Line', required=True, ondelete='cascade',
index=True, copy=False)
sequence = fields.Integer(string="sequence")
freight_charges = fields.Text(string='Freight & Charges')
rate = fields.Char(string='Rate')
per = fields.Char(string="Per")
amount = fields.Char(string="Amount")
prepaid = fields.Char(string="Prepaid")
collect = fields.Char(string="Collect")
payable_at_by = fields.Char(string="Payable at/by")
# fcl_container_qty = fields.Float(string="Qty", digits=(8, 0), track_visibility='onchange')
revenue_tons = fields.Char(string='Revenue Tons')
@api.model
def create(self, vals):
# _logger.warning("in create")
res = super(ChargeLine, self).create(vals)
content = ""
if vals.get("freight_charges"):
content = content + " \u2022 Freight & Charges: " + str(vals.get("freight_charges")) + "<br/>"
if vals.get("revenue_tons"):
content = content + " \u2022 Revenue Tons: " + str(vals.get("revenue_tons")) + "<br/>"
if vals.get("rate"):
content = content + " \u2022 Rate: " + str(vals.get("rate")) + "<br/>"
if vals.get("per"):
content = content + " \u2022 Per: " + str(vals.get("per")) + "<br/>"
if vals.get("amount"):
content = content + " \u2022 Amount: " + str(vals.get("amount")) + "<br/>"
if vals.get("prepaid"):
content = content + " \u2022 Prepaid: " + str(vals.get("prepaid")) + "<br/>"
if vals.get("collect"):
content = content + " \u2022 Collect: " + str(vals.get("collect")) + "<br/>"
if vals.get("payable_at_by"):
content = content + " \u2022 Payable at/by: " + str(vals.get("payable_at_by")) + "<br/>"
res.charge_line.message_post(body=content)
return res
@api.multi
def write(self, vals):
# _logger.warning("in write")
res = super(ChargeLine, self).write(vals)
# _logger.warning("after super write")
content = ""
if vals.get("freight_charges"):
content = content + " \u2022 Freight & Charges: " + str(vals.get("freight_charges")) + "<br/>"
if vals.get("revenue_tons"):
content = content + " \u2022 Revenue Tons: " + str(vals.get("revenue_tons")) + "<br/>"
if vals.get("rate"):
content = content + " \u2022 Rate: " + str(vals.get("rate")) + "<br/>"
if vals.get("per"):
content = content + " \u2022 Per: " + str(vals.get("per")) + "<br/>"
if vals.get("amount"):
content = content + " \u2022 Amount: " + str(vals.get("amount")) + "<br/>"
if vals.get("prepaid"):
content = content + " \u2022 Prepaid: " + str(vals.get("prepaid")) + "<br/>"
if vals.get("collect"):
content = content + " \u2022 Collect: " + str(vals.get("collect")) + "<br/>"
if vals.get("payable_at_by"):
content = content + " \u2022 Payable at/by: " + str(vals.get("payable_at_by")) + "<br/>"
self.charge_line.message_post(body=content)
return res
class CostProfit(models.Model):
_name = 'freight.bol.cost.profit'
_description = "BOL Cost & Profit"
sequence = fields.Integer(string="sequence")
bol_id = fields.Many2one('freight.bol', string='BOL ID', required=True, ondelete='cascade',
index=True, copy=False)
product_id = fields.Many2one('product.product', string="Product")
product_name = fields.Text(string="Description")
#Profit
#profit_qty = fields.Integer(string='Qty', default="1")
#profit_qty = fields.Float(string='Qty', default="1", digits=(12, 2))
list_price = fields.Float(string="Unit Price")
uom_id = fields.Many2one('uom.uom', string="UoM")
profit_gst = fields.Selection([('zer', 'ZER')], string="GST", default="zer", track_visibility='onchange')
tax_id = fields.Many2many('account.tax', string='Taxes', domain=['|', ('active', '=', False), ('active', '=', True)])
profit_currency = fields.Many2one('res.currency', 'Currency',
default=lambda self: self.env.user.company_id.currency_id.id, track_visibility='onchange')
profit_currency_rate = fields.Float(string='Rate', default="1.00", track_visibility='onchange')
profit_amount = fields.Float(string="Amt", compute="_compute_profit_amount", store=True, track_visibility='onchange')
sale_total = fields.Float(string="Total Sales", compute="_compute_sale_total", store=True, track_visibility='onchange')
#Cost
#cost_qty = fields.Integer(string='Qty', default="1", track_visibility='onchange')
profit_qty = fields.Float(string='Qty', default="1.000", digit=(12, 3))
cost_qty = fields.Float(string='Qty', default="1.000", digit=(12, 3))
cost_price = fields.Float(string="Unit Price", track_visibility='onchange')
cost_gst = fields.Selection([('zer', 'ZER')], string="Tax", default="zer", track_visibility='onchange')
vendor_id = fields.Many2one('res.partner', string="Vendor", track_visibility='onchange')
vendor_bill_id = fields.Many2one('account.invoice', string="Vendor Bill")
cost_currency = fields.Many2one('res.currency', string="Curr", required=True,
default=lambda self: self.env.user.company_id.currency_id.id, track_visibility='onchange')
cost_currency_rate = fields.Float(string='Rate', default="1.00", track_visibility='onchange')
cost_amount = fields.Float(string="Amt",
compute="_compute_cost_amount", store=True, track_visibility='onchange')
cost_total = fields.Float(string="Total Cost",
compute="_compute_cost_total", store=True, track_visibility='onchange')
# Invoice & Bill
billed = fields.Boolean(string='Billed', copy=False)
is_billed = fields.Char('Is Biiled?', compute='_compute_is_billed', store=True)
added_to_invoice = fields.Boolean(string='Invoiced', copy=False)
invoice_paid = fields.Boolean(string='Invoice Paid', copy=False)
paid = fields.Boolean(string='Paid', copy=False)
is_paid = fields.Char('Is Paid?', compute='_compute_is_paid', store=True)
invoice_id = fields.Many2one('account.invoice', string="Invoice")
inv_line_id = fields.Many2one('account.invoice.line', string="Invoice Line")
bill_id = fields.Many2one('account.invoice', string="Bill")
bill_line_id = fields.Many2one('account.invoice.line', string="Bill Line")
route_service = fields.Boolean(string='Is Route Service', default=False)
profit_total = fields.Float(string="Total Profit", compute="_compute_profit_total", store=True)
margin_total = fields.Float(string="Margin %", compute="_compute_margin_total", digits=(8,2), store=True, group_operator="avg")
vendor_id_ids = fields.Many2many('res.partner', string="Vendor List", copy=False)
vendor_bill_ids = fields.Many2many('account.invoice', string="Vendor Bill List", copy=False)
@api.one
def _set_access_for_invoiced(self):
if self.env['res.users'].has_group('account.group_account_manager'):
self.invoiced_readonly = False
else:
self.invoiced_readonly = True
invoiced_readonly = fields.Boolean(compute="_set_access_for_invoiced",
string='Is user able to modify invoiced?')
@api.depends('profit_qty', 'list_price')
def _compute_profit_amount(self):
for service in self:
if service.product_id:
service.profit_amount = service.profit_qty * service.list_price or 0.0
@api.depends('cost_qty', 'cost_price')
def _compute_cost_amount(self):
for service in self:
if service.product_id:
service.cost_amount = service.cost_qty * service.cost_price or 0.0
@api.depends('profit_amount', 'profit_currency_rate')
def _compute_sale_total(self):
for service in self:
if service.product_id:
service.sale_total = service.profit_amount * service.profit_currency_rate or 0.0
@api.onchange('profit_currency_rate')
def _onchange_profit_currency_rate(self):
for service in self:
if service.product_id:
service.sale_total = service.profit_amount * service.profit_currency_rate or 0.0
@api.onchange('profit_amount')
def _onchange_profit_amount(self):
for service in self:
if service.product_id:
service.sale_total = service.profit_amount * service.profit_currency_rate or 0.0
service.profit_total = service.sale_total - service.cost_total or 0.0
@api.depends('cost_amount', 'cost_currency_rate')
def _compute_cost_total(self):
for service in self:
if service.product_id:
service.cost_total = service.cost_amount * service.cost_currency_rate or 0.0
service.profit_total = service.sale_total - service.cost_total or 0.0
@api.depends('cost_total', 'sale_total')
def _compute_profit_total(self):
for service in self:
if service.product_id:
service.profit_total = service.sale_total - service.cost_total or 0.0
@api.depends('profit_total', 'sale_total')
def _compute_margin_total(self):
for service in self:
if service.product_id:
if service.sale_total > 0:
service.margin_total = service.profit_total / service.sale_total * 100
@api.onchange('cost_amount')
def _onchange_cost_amount(self):
for service in self:
if service.product_id:
service.cost_total = service.cost_amount * service.cost_currency_rate or 0.0
service.profit_total = service.sale_total - service.cost_total or 0.0
@api.onchange('cost_currency_rate')
def _onchange_cost_currency_rate(self):
for service in self:
if service.product_id:
service.cost_total = service.cost_amount * service.cost_currency_rate or 0.0
service.profit_total = service.sale_total - service.cost_total or 0.0
@api.onchange('product_id')
def _onchange_product_id(self):
if not self.product_id:
return {'domain': {'uom_id': []}}
vals = {}
domain = {'uom_id': [('category_id', '=', self.product_id.uom_id.category_id.id)]}
if not self.uom_id or (self.product_id.uom_id.id != self.uom_id.id):
vals['uom_id'] = self.product_id.uom_id
vals['product_name'] = self.product_id.name
self.update(vals)
if self.product_id:
self.update({
'list_price': self.product_id.list_price or 0.0,
'cost_price': self.product_id.standard_price or 0.0
})
@api.onchange('vendor_id')
def _onchange_vendor_id(self):
print('OnChange Vendor_ID')
if self.vendor_id:
if not self.billed:
self.billed = False
print('Invoiced False')
@api.multi
@api.depends('billed')
def _compute_is_billed(self):
for cost_profit_line in self:
if cost_profit_line.vendor_id:
if cost_profit_line.billed:
cost_profit_line.is_billed = 'Y'
elif not cost_profit_line.billed:
cost_profit_line.is_billed = 'N'
@api.multi
@api.depends('paid')
def _compute_is_paid(self):
for cost_profit_line in self:
if cost_profit_line.vendor_id:
if cost_profit_line.paid:
cost_profit_line.is_paid = 'Y'
elif not cost_profit_line.paid:
cost_profit_line.is_paid = 'N'
|
normal
|
{
"blob_id": "f8e287abc7e1a2af005aa93c25d95ce770e29bf9",
"index": 7378,
"step-1": "<mask token>\n\n\nclass BillOfLading(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.multi\n def _get_default_commodity_category(self):\n commodity_lines = self.env['freight.product.category'].search([(\n 'type', '=ilike', 'commodity')])\n for commodity_line in commodity_lines:\n _logger.warning('_get_default_commodity_category=' + str(\n commodity_line.product_category))\n return commodity_line.product_category\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def action_copy_from_booking(self):\n booking = self.env['freight.booking'].search([('id', '=', self.\n booking_ref.id)])\n for line in booking.cost_profit_ids:\n operation_line_obj = self.env['freight.bol.cost.profit']\n op_line = operation_line_obj.create({'bol_id': self.id,\n 'product_id': line.product_id.id or False, 'product_name': \n line.product_name or '', 'profit_qty': line.profit_qty or 0,\n 'list_price': line.list_price or 0, 'profit_amount': line.\n profit_amount or 0, 'profit_currency': line.profit_currency\n .id or False, 'profit_currency_rate': line.\n profit_currency_rate or 0, 'cost_qty': line.cost_qty or 0,\n 'cost_price': line.cost_price or 0, 'cost_amount': line.\n cost_amount or 0, 'vendor_id': line.vendor_id.id or False,\n 'cost_currency': line.cost_currency.id or False,\n 'cost_currency_rate': line.cost_currency_rate or 0})\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CargoLine(models.Model):\n _name = 'freight.bol.cargo'\n _description = 'Cargo Line'\n cargo_line = fields.Many2one('freight.bol', string='Cargo Line',\n required=True, ondelete='cascade', index=True, copy=False)\n sequence = fields.Integer(string='sequence')\n marks = fields.Text(string='Marks and Numbers')\n container_no = fields.Char(string='Container No.')\n container_product_id = fields.Many2one('product.product', string=\n 'Container', track_visibility='onchange')\n seal_no = fields.Char(string='Seal No.')\n container_product_name = fields.Text(string='Description of Goods')\n packages_no_value = fields.Integer(string='No. of Packages')\n packages_no_uom = fields.Many2one('uom.uom', string='UoM')\n exp_net_weight = fields.Float(string='Net Weight(KG)', help=\n 'Expected Weight in kg.', track_visibility='onchange')\n exp_gross_weight = fields.Float(string='Gross Weight(KG)', digits=(12, \n 4), help='Expected Weight in kg.')\n dim_length = fields.Float(string='Length', help='Length in cm', default\n ='0.00', track_visibility='onchange')\n dim_width = fields.Float(string='Width', default='0.00', help=\n 'Width in cm', track_visibility='onchange')\n dim_height = fields.Float(string='Height', default='0.00', help=\n 'Height in cm', track_visibility='onchange')\n exp_vol = fields.Float(string='Measurement (M3)', digits=(12, 4), help=\n 'Expected Volume in m3 Measure')\n packages_no = fields.Char(string='No. of Packages')\n\n @api.multi\n def _get_default_container_category(self):\n container_lines = self.env['freight.product.category'].search([(\n 'type', '=ilike', 'container')])\n for container_line in container_lines:\n return container_line.product_category\n container_category_id = fields.Many2one('product.category', string=\n 'Container Product Id', default=_get_default_container_category)\n\n @api.onchange('container_product_name')\n def _onchange_description(self):\n bl = self.env['freight.bol'].search([('bol_no', '=', self.\n cargo_line.bol_no)])\n if self.container_product_name:\n lines_description = self.container_product_name.count('\\n') / 20\n lines_description = math.ceil(lines_description)\n x = self.container_product_name.split('\\n')\n count = 0\n line_description1 = ''\n line_description2 = ''\n for line in x:\n if count < 20:\n line_description1 = line_description1 + line + '\\n'\n count = count + 1\n else:\n line_description2 = line_description2 + line + '\\n'\n count = count + 1\n bl.write({'lines_description': lines_description,\n 'line_description1': line_description1, 'line_description2':\n line_description2})\n\n @api.model\n def create(self, vals):\n res = super(CargoLine, self).create(vals)\n content = ''\n if vals.get('marks'):\n content = content + ' • Marks and Numbers: ' + str(vals.get(\n 'marks')) + '<br/>'\n if vals.get('container_product_name'):\n content = content + ' • Description of Goods: ' + str(vals.get\n ('container_product_name')) + '<br/>'\n if vals.get('packages_no'):\n content = content + ' • No. of Packages: ' + str(vals.get(\n 'packages_no')) + '<br/>'\n if vals.get('seal_no'):\n content = content + ' • Seal no: ' + str(vals.get('seal_no')\n ) + '<br/>'\n if vals.get('container_no'):\n content = content + ' • Container No.: ' + str(vals.get(\n 'container_no')) + '<br/>'\n if vals.get('exp_gross_weight'):\n content = content + ' • Gross Weight(KG): ' + str(vals.get(\n 'exp_gross_weight')) + '<br/>'\n if vals.get('exp_vol'):\n content = content + ' • Measurement (M3): ' + str(vals.get(\n 'exp_vol')) + '<br/>'\n res.cargo_line.message_post(body=content)\n return res\n\n @api.multi\n def write(self, vals):\n res = super(CargoLine, self).write(vals)\n content = ''\n if vals.get('marks'):\n content = content + ' • Marks and Numbers: ' + str(vals.get(\n 'marks')) + '<br/>'\n if vals.get('container_product_name'):\n content = content + ' • Description of Goods: ' + str(vals.get\n ('container_product_name')) + '<br/>'\n if vals.get('packages_no'):\n content = content + ' • No. of Packages: ' + str(vals.get(\n 'packages_no')) + '<br/>'\n if vals.get('seal_no'):\n content = content + ' • Seal no: ' + str(vals.get('seal_no')\n ) + '<br/>'\n if vals.get('container_no'):\n content = content + ' • Container No.: ' + str(vals.get(\n 'container_no')) + '<br/>'\n if vals.get('exp_gross_weight'):\n content = content + ' • Gross Weight(KG): ' + str(vals.get(\n 'exp_gross_weight')) + '<br/>'\n if vals.get('exp_vol'):\n content = content + ' • Measurement (M3): ' + str(vals.get(\n 'exp_vol')) + '<br/>'\n self.cargo_line.message_post(body=content)\n return res\n\n\nclass ChargeLine(models.Model):\n _name = 'freight.bol.charge'\n _description = 'Charge Line'\n charge_line = fields.Many2one('freight.bol', string='Charge Line',\n required=True, ondelete='cascade', index=True, copy=False)\n sequence = fields.Integer(string='sequence')\n freight_charges = fields.Text(string='Freight & Charges')\n rate = fields.Char(string='Rate')\n per = fields.Char(string='Per')\n amount = fields.Char(string='Amount')\n prepaid = fields.Char(string='Prepaid')\n collect = fields.Char(string='Collect')\n payable_at_by = fields.Char(string='Payable at/by')\n revenue_tons = fields.Char(string='Revenue Tons')\n\n @api.model\n def create(self, vals):\n res = super(ChargeLine, self).create(vals)\n content = ''\n if vals.get('freight_charges'):\n content = content + ' • Freight & Charges: ' + str(vals.get(\n 'freight_charges')) + '<br/>'\n if vals.get('revenue_tons'):\n content = content + ' • Revenue Tons: ' + str(vals.get(\n 'revenue_tons')) + '<br/>'\n if vals.get('rate'):\n content = content + ' • Rate: ' + str(vals.get('rate')) + '<br/>'\n if vals.get('per'):\n content = content + ' • Per: ' + str(vals.get('per')) + '<br/>'\n if vals.get('amount'):\n content = content + ' • Amount: ' + str(vals.get('amount')\n ) + '<br/>'\n if vals.get('prepaid'):\n content = content + ' • Prepaid: ' + str(vals.get('prepaid')\n ) + '<br/>'\n if vals.get('collect'):\n content = content + ' • Collect: ' + str(vals.get('collect')\n ) + '<br/>'\n if vals.get('payable_at_by'):\n content = content + ' • Payable at/by: ' + str(vals.get(\n 'payable_at_by')) + '<br/>'\n res.charge_line.message_post(body=content)\n return res\n\n @api.multi\n def write(self, vals):\n res = super(ChargeLine, self).write(vals)\n content = ''\n if vals.get('freight_charges'):\n content = content + ' • Freight & Charges: ' + str(vals.get(\n 'freight_charges')) + '<br/>'\n if vals.get('revenue_tons'):\n content = content + ' • Revenue Tons: ' + str(vals.get(\n 'revenue_tons')) + '<br/>'\n if vals.get('rate'):\n content = content + ' • Rate: ' + str(vals.get('rate')) + '<br/>'\n if vals.get('per'):\n content = content + ' • Per: ' + str(vals.get('per')) + '<br/>'\n if vals.get('amount'):\n content = content + ' • Amount: ' + str(vals.get('amount')\n ) + '<br/>'\n if vals.get('prepaid'):\n content = content + ' • Prepaid: ' + str(vals.get('prepaid')\n ) + '<br/>'\n if vals.get('collect'):\n content = content + ' • Collect: ' + str(vals.get('collect')\n ) + '<br/>'\n if vals.get('payable_at_by'):\n content = content + ' • Payable at/by: ' + str(vals.get(\n 'payable_at_by')) + '<br/>'\n self.charge_line.message_post(body=content)\n return res\n\n\nclass CostProfit(models.Model):\n _name = 'freight.bol.cost.profit'\n _description = 'BOL Cost & Profit'\n sequence = fields.Integer(string='sequence')\n bol_id = fields.Many2one('freight.bol', string='BOL ID', required=True,\n ondelete='cascade', index=True, copy=False)\n product_id = fields.Many2one('product.product', string='Product')\n product_name = fields.Text(string='Description')\n list_price = fields.Float(string='Unit Price')\n uom_id = fields.Many2one('uom.uom', string='UoM')\n profit_gst = fields.Selection([('zer', 'ZER')], string='GST', default=\n 'zer', track_visibility='onchange')\n tax_id = fields.Many2many('account.tax', string='Taxes', domain=['|', (\n 'active', '=', False), ('active', '=', True)])\n profit_currency = fields.Many2one('res.currency', 'Currency', default=\n lambda self: self.env.user.company_id.currency_id.id,\n track_visibility='onchange')\n profit_currency_rate = fields.Float(string='Rate', default='1.00',\n track_visibility='onchange')\n profit_amount = fields.Float(string='Amt', compute=\n '_compute_profit_amount', store=True, track_visibility='onchange')\n sale_total = fields.Float(string='Total Sales', compute=\n '_compute_sale_total', store=True, track_visibility='onchange')\n profit_qty = fields.Float(string='Qty', default='1.000', digit=(12, 3))\n cost_qty = fields.Float(string='Qty', default='1.000', digit=(12, 3))\n cost_price = fields.Float(string='Unit Price', track_visibility='onchange')\n cost_gst = fields.Selection([('zer', 'ZER')], string='Tax', default=\n 'zer', track_visibility='onchange')\n vendor_id = fields.Many2one('res.partner', string='Vendor',\n track_visibility='onchange')\n vendor_bill_id = fields.Many2one('account.invoice', string='Vendor Bill')\n cost_currency = fields.Many2one('res.currency', string='Curr', required\n =True, default=lambda self: self.env.user.company_id.currency_id.id,\n track_visibility='onchange')\n cost_currency_rate = fields.Float(string='Rate', default='1.00',\n track_visibility='onchange')\n cost_amount = fields.Float(string='Amt', compute='_compute_cost_amount',\n store=True, track_visibility='onchange')\n cost_total = fields.Float(string='Total Cost', compute=\n '_compute_cost_total', store=True, track_visibility='onchange')\n billed = fields.Boolean(string='Billed', copy=False)\n is_billed = fields.Char('Is Biiled?', compute='_compute_is_billed',\n store=True)\n added_to_invoice = fields.Boolean(string='Invoiced', copy=False)\n invoice_paid = fields.Boolean(string='Invoice Paid', copy=False)\n paid = fields.Boolean(string='Paid', copy=False)\n is_paid = fields.Char('Is Paid?', compute='_compute_is_paid', store=True)\n invoice_id = fields.Many2one('account.invoice', string='Invoice')\n inv_line_id = fields.Many2one('account.invoice.line', string='Invoice Line'\n )\n bill_id = fields.Many2one('account.invoice', string='Bill')\n bill_line_id = fields.Many2one('account.invoice.line', string='Bill Line')\n route_service = fields.Boolean(string='Is Route Service', default=False)\n profit_total = fields.Float(string='Total Profit', compute=\n '_compute_profit_total', store=True)\n margin_total = fields.Float(string='Margin %', compute=\n '_compute_margin_total', digits=(8, 2), store=True, group_operator=\n 'avg')\n vendor_id_ids = fields.Many2many('res.partner', string='Vendor List',\n copy=False)\n vendor_bill_ids = fields.Many2many('account.invoice', string=\n 'Vendor Bill List', copy=False)\n\n @api.one\n def _set_access_for_invoiced(self):\n if self.env['res.users'].has_group('account.group_account_manager'):\n self.invoiced_readonly = False\n else:\n self.invoiced_readonly = True\n invoiced_readonly = fields.Boolean(compute='_set_access_for_invoiced',\n string='Is user able to modify invoiced?')\n\n @api.depends('profit_qty', 'list_price')\n def _compute_profit_amount(self):\n for service in self:\n if service.product_id:\n service.profit_amount = (service.profit_qty * service.\n list_price or 0.0)\n\n @api.depends('cost_qty', 'cost_price')\n def _compute_cost_amount(self):\n for service in self:\n if service.product_id:\n service.cost_amount = (service.cost_qty * service.\n cost_price or 0.0)\n\n @api.depends('profit_amount', 'profit_currency_rate')\n def _compute_sale_total(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n\n @api.onchange('profit_currency_rate')\n def _onchange_profit_currency_rate(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n\n @api.onchange('profit_amount')\n def _onchange_profit_amount(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('cost_amount', 'cost_currency_rate')\n def _compute_cost_total(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('cost_total', 'sale_total')\n def _compute_profit_total(self):\n for service in self:\n if service.product_id:\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('profit_total', 'sale_total')\n def _compute_margin_total(self):\n for service in self:\n if service.product_id:\n if service.sale_total > 0:\n service.margin_total = (service.profit_total / service.\n sale_total * 100)\n\n @api.onchange('cost_amount')\n def _onchange_cost_amount(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.onchange('cost_currency_rate')\n def _onchange_cost_currency_rate(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.onchange('product_id')\n def _onchange_product_id(self):\n if not self.product_id:\n return {'domain': {'uom_id': []}}\n vals = {}\n domain = {'uom_id': [('category_id', '=', self.product_id.uom_id.\n category_id.id)]}\n if not self.uom_id or self.product_id.uom_id.id != self.uom_id.id:\n vals['uom_id'] = self.product_id.uom_id\n vals['product_name'] = self.product_id.name\n self.update(vals)\n if self.product_id:\n self.update({'list_price': self.product_id.list_price or 0.0,\n 'cost_price': self.product_id.standard_price or 0.0})\n\n @api.onchange('vendor_id')\n def _onchange_vendor_id(self):\n print('OnChange Vendor_ID')\n if self.vendor_id:\n if not self.billed:\n self.billed = False\n print('Invoiced False')\n\n @api.multi\n @api.depends('billed')\n def _compute_is_billed(self):\n for cost_profit_line in self:\n if cost_profit_line.vendor_id:\n if cost_profit_line.billed:\n cost_profit_line.is_billed = 'Y'\n elif not cost_profit_line.billed:\n cost_profit_line.is_billed = 'N'\n\n @api.multi\n @api.depends('paid')\n def _compute_is_paid(self):\n for cost_profit_line in self:\n if cost_profit_line.vendor_id:\n if cost_profit_line.paid:\n cost_profit_line.is_paid = 'Y'\n elif not cost_profit_line.paid:\n cost_profit_line.is_paid = 'N'\n",
"step-2": "<mask token>\n\n\nclass BillOfLading(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.multi\n def _get_default_commodity_category(self):\n commodity_lines = self.env['freight.product.category'].search([(\n 'type', '=ilike', 'commodity')])\n for commodity_line in commodity_lines:\n _logger.warning('_get_default_commodity_category=' + str(\n commodity_line.product_category))\n return commodity_line.product_category\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.model\n def create(self, vals):\n vals['bol_no'] = self.env['ir.sequence'].next_by_code('hbl')\n res = super(BillOfLading, self).create(vals)\n return res\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def action_copy_from_booking(self):\n booking = self.env['freight.booking'].search([('id', '=', self.\n booking_ref.id)])\n for line in booking.cost_profit_ids:\n operation_line_obj = self.env['freight.bol.cost.profit']\n op_line = operation_line_obj.create({'bol_id': self.id,\n 'product_id': line.product_id.id or False, 'product_name': \n line.product_name or '', 'profit_qty': line.profit_qty or 0,\n 'list_price': line.list_price or 0, 'profit_amount': line.\n profit_amount or 0, 'profit_currency': line.profit_currency\n .id or False, 'profit_currency_rate': line.\n profit_currency_rate or 0, 'cost_qty': line.cost_qty or 0,\n 'cost_price': line.cost_price or 0, 'cost_amount': line.\n cost_amount or 0, 'vendor_id': line.vendor_id.id or False,\n 'cost_currency': line.cost_currency.id or False,\n 'cost_currency_rate': line.cost_currency_rate or 0})\n <mask token>\n <mask token>\n\n @api.multi\n def operation_bill(self):\n for operation in self:\n vendor_bill_list = []\n for cost_profit_line in operation.cost_profit_ids:\n for vendor_bill_line in cost_profit_line.vendor_bill_ids:\n if vendor_bill_line.type in ['in_invoice', 'in_refund']:\n vendor_bill_list.append(vendor_bill_line.id)\n invoices = self.env['account.invoice'].search([('freight_hbl',\n '=', operation.id), ('type', 'in', ['in_invoice',\n 'in_refund']), ('state', '!=', 'cancel')])\n invoice_name_list = []\n for x in invoices:\n invoice_name_list.append(x.id)\n unique_list = []\n for y in vendor_bill_list:\n if invoice_name_list and len(invoice_name_list) > 0:\n if y not in invoice_name_list:\n unique_list.append(y)\n else:\n unique_list.append(y)\n for z in invoice_name_list:\n unique_list.append(z)\n if len(unique_list) > 1:\n views = [(self.env.ref('account.invoice_supplier_tree').id,\n 'tree'), (self.env.ref('account.invoice_supplier_form').id,\n 'form')]\n return {'name': 'Vendor bills', 'view_type': 'form',\n 'view_mode': 'tree,form', 'view_id': False, 'res_model':\n 'account.invoice', 'views': views, 'domain': [('id', 'in',\n unique_list)], 'type': 'ir.actions.act_window'}\n elif len(unique_list) == 1:\n return {'view_type': 'form', 'view_mode': 'form', 'res_model':\n 'account.invoice', 'res_id': unique_list[0] or False,\n 'type': 'ir.actions.act_window', 'target': 'popup'}\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CargoLine(models.Model):\n _name = 'freight.bol.cargo'\n _description = 'Cargo Line'\n cargo_line = fields.Many2one('freight.bol', string='Cargo Line',\n required=True, ondelete='cascade', index=True, copy=False)\n sequence = fields.Integer(string='sequence')\n marks = fields.Text(string='Marks and Numbers')\n container_no = fields.Char(string='Container No.')\n container_product_id = fields.Many2one('product.product', string=\n 'Container', track_visibility='onchange')\n seal_no = fields.Char(string='Seal No.')\n container_product_name = fields.Text(string='Description of Goods')\n packages_no_value = fields.Integer(string='No. of Packages')\n packages_no_uom = fields.Many2one('uom.uom', string='UoM')\n exp_net_weight = fields.Float(string='Net Weight(KG)', help=\n 'Expected Weight in kg.', track_visibility='onchange')\n exp_gross_weight = fields.Float(string='Gross Weight(KG)', digits=(12, \n 4), help='Expected Weight in kg.')\n dim_length = fields.Float(string='Length', help='Length in cm', default\n ='0.00', track_visibility='onchange')\n dim_width = fields.Float(string='Width', default='0.00', help=\n 'Width in cm', track_visibility='onchange')\n dim_height = fields.Float(string='Height', default='0.00', help=\n 'Height in cm', track_visibility='onchange')\n exp_vol = fields.Float(string='Measurement (M3)', digits=(12, 4), help=\n 'Expected Volume in m3 Measure')\n packages_no = fields.Char(string='No. of Packages')\n\n @api.multi\n def _get_default_container_category(self):\n container_lines = self.env['freight.product.category'].search([(\n 'type', '=ilike', 'container')])\n for container_line in container_lines:\n return container_line.product_category\n container_category_id = fields.Many2one('product.category', string=\n 'Container Product Id', default=_get_default_container_category)\n\n @api.onchange('container_product_name')\n def _onchange_description(self):\n bl = self.env['freight.bol'].search([('bol_no', '=', self.\n cargo_line.bol_no)])\n if self.container_product_name:\n lines_description = self.container_product_name.count('\\n') / 20\n lines_description = math.ceil(lines_description)\n x = self.container_product_name.split('\\n')\n count = 0\n line_description1 = ''\n line_description2 = ''\n for line in x:\n if count < 20:\n line_description1 = line_description1 + line + '\\n'\n count = count + 1\n else:\n line_description2 = line_description2 + line + '\\n'\n count = count + 1\n bl.write({'lines_description': lines_description,\n 'line_description1': line_description1, 'line_description2':\n line_description2})\n\n @api.model\n def create(self, vals):\n res = super(CargoLine, self).create(vals)\n content = ''\n if vals.get('marks'):\n content = content + ' • Marks and Numbers: ' + str(vals.get(\n 'marks')) + '<br/>'\n if vals.get('container_product_name'):\n content = content + ' • Description of Goods: ' + str(vals.get\n ('container_product_name')) + '<br/>'\n if vals.get('packages_no'):\n content = content + ' • No. of Packages: ' + str(vals.get(\n 'packages_no')) + '<br/>'\n if vals.get('seal_no'):\n content = content + ' • Seal no: ' + str(vals.get('seal_no')\n ) + '<br/>'\n if vals.get('container_no'):\n content = content + ' • Container No.: ' + str(vals.get(\n 'container_no')) + '<br/>'\n if vals.get('exp_gross_weight'):\n content = content + ' • Gross Weight(KG): ' + str(vals.get(\n 'exp_gross_weight')) + '<br/>'\n if vals.get('exp_vol'):\n content = content + ' • Measurement (M3): ' + str(vals.get(\n 'exp_vol')) + '<br/>'\n res.cargo_line.message_post(body=content)\n return res\n\n @api.multi\n def write(self, vals):\n res = super(CargoLine, self).write(vals)\n content = ''\n if vals.get('marks'):\n content = content + ' • Marks and Numbers: ' + str(vals.get(\n 'marks')) + '<br/>'\n if vals.get('container_product_name'):\n content = content + ' • Description of Goods: ' + str(vals.get\n ('container_product_name')) + '<br/>'\n if vals.get('packages_no'):\n content = content + ' • No. of Packages: ' + str(vals.get(\n 'packages_no')) + '<br/>'\n if vals.get('seal_no'):\n content = content + ' • Seal no: ' + str(vals.get('seal_no')\n ) + '<br/>'\n if vals.get('container_no'):\n content = content + ' • Container No.: ' + str(vals.get(\n 'container_no')) + '<br/>'\n if vals.get('exp_gross_weight'):\n content = content + ' • Gross Weight(KG): ' + str(vals.get(\n 'exp_gross_weight')) + '<br/>'\n if vals.get('exp_vol'):\n content = content + ' • Measurement (M3): ' + str(vals.get(\n 'exp_vol')) + '<br/>'\n self.cargo_line.message_post(body=content)\n return res\n\n\nclass ChargeLine(models.Model):\n _name = 'freight.bol.charge'\n _description = 'Charge Line'\n charge_line = fields.Many2one('freight.bol', string='Charge Line',\n required=True, ondelete='cascade', index=True, copy=False)\n sequence = fields.Integer(string='sequence')\n freight_charges = fields.Text(string='Freight & Charges')\n rate = fields.Char(string='Rate')\n per = fields.Char(string='Per')\n amount = fields.Char(string='Amount')\n prepaid = fields.Char(string='Prepaid')\n collect = fields.Char(string='Collect')\n payable_at_by = fields.Char(string='Payable at/by')\n revenue_tons = fields.Char(string='Revenue Tons')\n\n @api.model\n def create(self, vals):\n res = super(ChargeLine, self).create(vals)\n content = ''\n if vals.get('freight_charges'):\n content = content + ' • Freight & Charges: ' + str(vals.get(\n 'freight_charges')) + '<br/>'\n if vals.get('revenue_tons'):\n content = content + ' • Revenue Tons: ' + str(vals.get(\n 'revenue_tons')) + '<br/>'\n if vals.get('rate'):\n content = content + ' • Rate: ' + str(vals.get('rate')) + '<br/>'\n if vals.get('per'):\n content = content + ' • Per: ' + str(vals.get('per')) + '<br/>'\n if vals.get('amount'):\n content = content + ' • Amount: ' + str(vals.get('amount')\n ) + '<br/>'\n if vals.get('prepaid'):\n content = content + ' • Prepaid: ' + str(vals.get('prepaid')\n ) + '<br/>'\n if vals.get('collect'):\n content = content + ' • Collect: ' + str(vals.get('collect')\n ) + '<br/>'\n if vals.get('payable_at_by'):\n content = content + ' • Payable at/by: ' + str(vals.get(\n 'payable_at_by')) + '<br/>'\n res.charge_line.message_post(body=content)\n return res\n\n @api.multi\n def write(self, vals):\n res = super(ChargeLine, self).write(vals)\n content = ''\n if vals.get('freight_charges'):\n content = content + ' • Freight & Charges: ' + str(vals.get(\n 'freight_charges')) + '<br/>'\n if vals.get('revenue_tons'):\n content = content + ' • Revenue Tons: ' + str(vals.get(\n 'revenue_tons')) + '<br/>'\n if vals.get('rate'):\n content = content + ' • Rate: ' + str(vals.get('rate')) + '<br/>'\n if vals.get('per'):\n content = content + ' • Per: ' + str(vals.get('per')) + '<br/>'\n if vals.get('amount'):\n content = content + ' • Amount: ' + str(vals.get('amount')\n ) + '<br/>'\n if vals.get('prepaid'):\n content = content + ' • Prepaid: ' + str(vals.get('prepaid')\n ) + '<br/>'\n if vals.get('collect'):\n content = content + ' • Collect: ' + str(vals.get('collect')\n ) + '<br/>'\n if vals.get('payable_at_by'):\n content = content + ' • Payable at/by: ' + str(vals.get(\n 'payable_at_by')) + '<br/>'\n self.charge_line.message_post(body=content)\n return res\n\n\nclass CostProfit(models.Model):\n _name = 'freight.bol.cost.profit'\n _description = 'BOL Cost & Profit'\n sequence = fields.Integer(string='sequence')\n bol_id = fields.Many2one('freight.bol', string='BOL ID', required=True,\n ondelete='cascade', index=True, copy=False)\n product_id = fields.Many2one('product.product', string='Product')\n product_name = fields.Text(string='Description')\n list_price = fields.Float(string='Unit Price')\n uom_id = fields.Many2one('uom.uom', string='UoM')\n profit_gst = fields.Selection([('zer', 'ZER')], string='GST', default=\n 'zer', track_visibility='onchange')\n tax_id = fields.Many2many('account.tax', string='Taxes', domain=['|', (\n 'active', '=', False), ('active', '=', True)])\n profit_currency = fields.Many2one('res.currency', 'Currency', default=\n lambda self: self.env.user.company_id.currency_id.id,\n track_visibility='onchange')\n profit_currency_rate = fields.Float(string='Rate', default='1.00',\n track_visibility='onchange')\n profit_amount = fields.Float(string='Amt', compute=\n '_compute_profit_amount', store=True, track_visibility='onchange')\n sale_total = fields.Float(string='Total Sales', compute=\n '_compute_sale_total', store=True, track_visibility='onchange')\n profit_qty = fields.Float(string='Qty', default='1.000', digit=(12, 3))\n cost_qty = fields.Float(string='Qty', default='1.000', digit=(12, 3))\n cost_price = fields.Float(string='Unit Price', track_visibility='onchange')\n cost_gst = fields.Selection([('zer', 'ZER')], string='Tax', default=\n 'zer', track_visibility='onchange')\n vendor_id = fields.Many2one('res.partner', string='Vendor',\n track_visibility='onchange')\n vendor_bill_id = fields.Many2one('account.invoice', string='Vendor Bill')\n cost_currency = fields.Many2one('res.currency', string='Curr', required\n =True, default=lambda self: self.env.user.company_id.currency_id.id,\n track_visibility='onchange')\n cost_currency_rate = fields.Float(string='Rate', default='1.00',\n track_visibility='onchange')\n cost_amount = fields.Float(string='Amt', compute='_compute_cost_amount',\n store=True, track_visibility='onchange')\n cost_total = fields.Float(string='Total Cost', compute=\n '_compute_cost_total', store=True, track_visibility='onchange')\n billed = fields.Boolean(string='Billed', copy=False)\n is_billed = fields.Char('Is Biiled?', compute='_compute_is_billed',\n store=True)\n added_to_invoice = fields.Boolean(string='Invoiced', copy=False)\n invoice_paid = fields.Boolean(string='Invoice Paid', copy=False)\n paid = fields.Boolean(string='Paid', copy=False)\n is_paid = fields.Char('Is Paid?', compute='_compute_is_paid', store=True)\n invoice_id = fields.Many2one('account.invoice', string='Invoice')\n inv_line_id = fields.Many2one('account.invoice.line', string='Invoice Line'\n )\n bill_id = fields.Many2one('account.invoice', string='Bill')\n bill_line_id = fields.Many2one('account.invoice.line', string='Bill Line')\n route_service = fields.Boolean(string='Is Route Service', default=False)\n profit_total = fields.Float(string='Total Profit', compute=\n '_compute_profit_total', store=True)\n margin_total = fields.Float(string='Margin %', compute=\n '_compute_margin_total', digits=(8, 2), store=True, group_operator=\n 'avg')\n vendor_id_ids = fields.Many2many('res.partner', string='Vendor List',\n copy=False)\n vendor_bill_ids = fields.Many2many('account.invoice', string=\n 'Vendor Bill List', copy=False)\n\n @api.one\n def _set_access_for_invoiced(self):\n if self.env['res.users'].has_group('account.group_account_manager'):\n self.invoiced_readonly = False\n else:\n self.invoiced_readonly = True\n invoiced_readonly = fields.Boolean(compute='_set_access_for_invoiced',\n string='Is user able to modify invoiced?')\n\n @api.depends('profit_qty', 'list_price')\n def _compute_profit_amount(self):\n for service in self:\n if service.product_id:\n service.profit_amount = (service.profit_qty * service.\n list_price or 0.0)\n\n @api.depends('cost_qty', 'cost_price')\n def _compute_cost_amount(self):\n for service in self:\n if service.product_id:\n service.cost_amount = (service.cost_qty * service.\n cost_price or 0.0)\n\n @api.depends('profit_amount', 'profit_currency_rate')\n def _compute_sale_total(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n\n @api.onchange('profit_currency_rate')\n def _onchange_profit_currency_rate(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n\n @api.onchange('profit_amount')\n def _onchange_profit_amount(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('cost_amount', 'cost_currency_rate')\n def _compute_cost_total(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('cost_total', 'sale_total')\n def _compute_profit_total(self):\n for service in self:\n if service.product_id:\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('profit_total', 'sale_total')\n def _compute_margin_total(self):\n for service in self:\n if service.product_id:\n if service.sale_total > 0:\n service.margin_total = (service.profit_total / service.\n sale_total * 100)\n\n @api.onchange('cost_amount')\n def _onchange_cost_amount(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.onchange('cost_currency_rate')\n def _onchange_cost_currency_rate(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.onchange('product_id')\n def _onchange_product_id(self):\n if not self.product_id:\n return {'domain': {'uom_id': []}}\n vals = {}\n domain = {'uom_id': [('category_id', '=', self.product_id.uom_id.\n category_id.id)]}\n if not self.uom_id or self.product_id.uom_id.id != self.uom_id.id:\n vals['uom_id'] = self.product_id.uom_id\n vals['product_name'] = self.product_id.name\n self.update(vals)\n if self.product_id:\n self.update({'list_price': self.product_id.list_price or 0.0,\n 'cost_price': self.product_id.standard_price or 0.0})\n\n @api.onchange('vendor_id')\n def _onchange_vendor_id(self):\n print('OnChange Vendor_ID')\n if self.vendor_id:\n if not self.billed:\n self.billed = False\n print('Invoiced False')\n\n @api.multi\n @api.depends('billed')\n def _compute_is_billed(self):\n for cost_profit_line in self:\n if cost_profit_line.vendor_id:\n if cost_profit_line.billed:\n cost_profit_line.is_billed = 'Y'\n elif not cost_profit_line.billed:\n cost_profit_line.is_billed = 'N'\n\n @api.multi\n @api.depends('paid')\n def _compute_is_paid(self):\n for cost_profit_line in self:\n if cost_profit_line.vendor_id:\n if cost_profit_line.paid:\n cost_profit_line.is_paid = 'Y'\n elif not cost_profit_line.paid:\n cost_profit_line.is_paid = 'N'\n",
"step-3": "<mask token>\n\n\nclass BillOfLading(models.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.multi\n def _get_default_commodity_category(self):\n commodity_lines = self.env['freight.product.category'].search([(\n 'type', '=ilike', 'commodity')])\n for commodity_line in commodity_lines:\n _logger.warning('_get_default_commodity_category=' + str(\n commodity_line.product_category))\n return commodity_line.product_category\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.model\n def create(self, vals):\n vals['bol_no'] = self.env['ir.sequence'].next_by_code('hbl')\n res = super(BillOfLading, self).create(vals)\n return res\n <mask token>\n <mask token>\n\n @api.multi\n def action_invoice(self):\n self.ensure_one()\n view = self.env.ref('sci_goexcel_freight.invoice_view_form')\n return {'name': 'Create Invoice', 'type': 'ir.actions.act_window',\n 'view_type': 'form', 'view_mode': 'form', 'res_model':\n 'invoice.wizard', 'views': [(view.id, 'form')], 'view_id': view\n .id, 'target': 'new', 'context': dict(bl_id=self.id)}\n\n @api.multi\n def action_create_vendor_bill(self):\n vendor_po = self.cost_profit_ids.filtered(lambda c: c.vendor_id)\n po_lines = vendor_po.sorted(key=lambda p: p.vendor_id.id)\n vendor_count = False\n vendor_id = False\n if not self.analytic_account_id:\n values = {'name': '%s' % self.booking_ref.booking_no,\n 'partner_id': self.booking_ref.customer_name.id, 'code':\n self.bol_no, 'company_id': self.booking_ref.company_id.id}\n analytic_account = self.env['account.analytic.account'].sudo(\n ).create(values)\n self.booking_ref.write({'analytic_account_id': analytic_account.id}\n )\n self.write({'analytic_account_id': analytic_account.id})\n for line in po_lines:\n if line.vendor_id != vendor_id:\n vb = self.env['account.invoice']\n vendor_count = True\n vendor_id = line.vendor_id\n value = []\n vendor_bill_created = []\n filtered_vb_lines = po_lines.filtered(lambda r: r.vendor_id ==\n vendor_id)\n for vb_line in filtered_vb_lines:\n if not vb_line.billed:\n account_id = False\n price_after_converted = (vb_line.cost_price *\n vb_line.cost_currency_rate)\n if vb_line.product_id.property_account_expense_id:\n account_id = (vb_line.product_id.\n property_account_expense_id)\n elif vb_line.product_id.categ_id.property_account_expense_categ_id:\n account_id = (vb_line.product_id.categ_id.\n property_account_expense_categ_id)\n value.append([0, 0, {'account_id': account_id.id or\n False, 'name': vb_line.product_id.name or '',\n 'product_id': vb_line.product_id.id or False,\n 'quantity': vb_line.cost_qty or 0.0, 'uom_id': \n vb_line.uom_id.id or False, 'price_unit': \n price_after_converted or 0.0,\n 'account_analytic_id': self.analytic_account_id\n .id, 'bl_line_id': vb_line.id}])\n vendor_bill_created.append(vb_line)\n vb_line.billed = True\n if value:\n vendor_bill_id = vb.create({'type': 'in_invoice',\n 'invoice_line_ids': value, 'default_currency_id':\n self.env.user.company_id.currency_id.id,\n 'company_id': self.company_id.id, 'date_invoice':\n fields.Date.context_today(self), 'origin': self.\n bol_no, 'partner_id': vendor_id.id, 'account_id': \n vb_line.vendor_id.property_account_payable_id.id or\n False, 'freight_booking': self.booking_ref.id})\n for new_vendor_bill in vendor_bill_created:\n new_vendor_bill.vendor_bill_id = vendor_bill_id.id\n if vendor_count is False:\n raise exceptions.ValidationError('No Vendor in Cost & Profit!!!')\n <mask token>\n\n def action_copy_from_booking(self):\n booking = self.env['freight.booking'].search([('id', '=', self.\n booking_ref.id)])\n for line in booking.cost_profit_ids:\n operation_line_obj = self.env['freight.bol.cost.profit']\n op_line = operation_line_obj.create({'bol_id': self.id,\n 'product_id': line.product_id.id or False, 'product_name': \n line.product_name or '', 'profit_qty': line.profit_qty or 0,\n 'list_price': line.list_price or 0, 'profit_amount': line.\n profit_amount or 0, 'profit_currency': line.profit_currency\n .id or False, 'profit_currency_rate': line.\n profit_currency_rate or 0, 'cost_qty': line.cost_qty or 0,\n 'cost_price': line.cost_price or 0, 'cost_amount': line.\n cost_amount or 0, 'vendor_id': line.vendor_id.id or False,\n 'cost_currency': line.cost_currency.id or False,\n 'cost_currency_rate': line.cost_currency_rate or 0})\n <mask token>\n\n @api.multi\n def operation_invoices(self):\n \"\"\"Show Invoice for specific Freight Operation smart Button.\"\"\"\n for operation in self:\n invoices = self.env['account.invoice'].search([('freight_hbl',\n '=', operation.id), ('type', 'in', ['out_invoice',\n 'out_refund']), ('state', '!=', 'cancel')])\n action = self.env.ref('account.action_invoice_tree1').read()[0]\n if len(invoices) > 1:\n action['domain'] = [('id', 'in', invoices.ids)]\n elif len(invoices) == 1:\n action['views'] = [(self.env.ref('account.invoice_form').id,\n 'form')]\n action['res_id'] = invoices.ids[0]\n else:\n action = {'type': 'ir.actions.act_window_close'}\n return action\n\n @api.multi\n def operation_bill(self):\n for operation in self:\n vendor_bill_list = []\n for cost_profit_line in operation.cost_profit_ids:\n for vendor_bill_line in cost_profit_line.vendor_bill_ids:\n if vendor_bill_line.type in ['in_invoice', 'in_refund']:\n vendor_bill_list.append(vendor_bill_line.id)\n invoices = self.env['account.invoice'].search([('freight_hbl',\n '=', operation.id), ('type', 'in', ['in_invoice',\n 'in_refund']), ('state', '!=', 'cancel')])\n invoice_name_list = []\n for x in invoices:\n invoice_name_list.append(x.id)\n unique_list = []\n for y in vendor_bill_list:\n if invoice_name_list and len(invoice_name_list) > 0:\n if y not in invoice_name_list:\n unique_list.append(y)\n else:\n unique_list.append(y)\n for z in invoice_name_list:\n unique_list.append(z)\n if len(unique_list) > 1:\n views = [(self.env.ref('account.invoice_supplier_tree').id,\n 'tree'), (self.env.ref('account.invoice_supplier_form').id,\n 'form')]\n return {'name': 'Vendor bills', 'view_type': 'form',\n 'view_mode': 'tree,form', 'view_id': False, 'res_model':\n 'account.invoice', 'views': views, 'domain': [('id', 'in',\n unique_list)], 'type': 'ir.actions.act_window'}\n elif len(unique_list) == 1:\n return {'view_type': 'form', 'view_mode': 'form', 'res_model':\n 'account.invoice', 'res_id': unique_list[0] or False,\n 'type': 'ir.actions.act_window', 'target': 'popup'}\n\n @api.multi\n def operation_si(self):\n for operation in self:\n si = self.env['freight.website.si'].search([('bl_ref', '=',\n operation.id)])\n if len(si) > 1:\n views = [(self.env.ref('sci_goexcel_freight.view_tree_si').id,\n 'tree'), (self.env.ref('sci_goexcel_freight.view_form_si').\n id, 'form')]\n return {'name': 'Shipping Instruction', 'view_type': 'form',\n 'view_mode': 'tree,form', 'view_id': False, 'res_model':\n 'freight.website.si', 'views': views, 'domain': [('id',\n 'in', si.ids)], 'type': 'ir.actions.act_window'}\n elif len(si) == 1:\n return {'view_type': 'form', 'view_mode': 'form', 'res_model':\n 'freight.website.si', 'res_id': si.id or False, 'type':\n 'ir.actions.act_window', 'target': 'popup'}\n else:\n action = {'type': 'ir.actions.act_window_close'}\n return action\n\n def _get_invoiced_count(self):\n for operation in self:\n invoices = self.env['account.invoice'].search([('freight_hbl',\n '=', operation.id), ('type', 'in', ['out_invoice',\n 'out_refund']), ('state', '!=', 'cancel')])\n self.update({'invoice_count': len(invoices)})\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @api.multi\n def operation_pr(self):\n for operation in self:\n for operation in self:\n pr_lines = self.env['account.voucher.line'].search([(\n 'freight_hbl', '=', operation.id)])\n pr_list = []\n for pr_line in pr_lines:\n if (pr_line.voucher_id.state != 'cancel' and pr_line.\n voucher_id.voucher_type == 'purchase'):\n pr_list.append(pr_line.voucher_id.id)\n unique_list = []\n for i in pr_list:\n if i not in unique_list:\n unique_list.append(i)\n if len(unique_list) > 1:\n views = [(self.env.ref('account_voucher.view_voucher_tree').id,\n 'tree'), (self.env.ref(\n 'account_voucher.view_purchase_receipt_form').id, 'form')]\n return {'name': 'Purchase Receipt', 'view_type': 'form',\n 'view_mode': 'tree,form', 'view_id': False, 'res_model':\n 'account.voucher', 'views': views, 'domain': [('id', 'in',\n unique_list)], 'type': 'ir.actions.act_window'}\n elif len(unique_list) == 1:\n return {'view_type': 'form', 'view_mode': 'form', 'res_model':\n 'account.voucher', 'res_id': unique_list[0] or False,\n 'type': 'ir.actions.act_window', 'target': 'popup'}\n\n\nclass CargoLine(models.Model):\n _name = 'freight.bol.cargo'\n _description = 'Cargo Line'\n cargo_line = fields.Many2one('freight.bol', string='Cargo Line',\n required=True, ondelete='cascade', index=True, copy=False)\n sequence = fields.Integer(string='sequence')\n marks = fields.Text(string='Marks and Numbers')\n container_no = fields.Char(string='Container No.')\n container_product_id = fields.Many2one('product.product', string=\n 'Container', track_visibility='onchange')\n seal_no = fields.Char(string='Seal No.')\n container_product_name = fields.Text(string='Description of Goods')\n packages_no_value = fields.Integer(string='No. of Packages')\n packages_no_uom = fields.Many2one('uom.uom', string='UoM')\n exp_net_weight = fields.Float(string='Net Weight(KG)', help=\n 'Expected Weight in kg.', track_visibility='onchange')\n exp_gross_weight = fields.Float(string='Gross Weight(KG)', digits=(12, \n 4), help='Expected Weight in kg.')\n dim_length = fields.Float(string='Length', help='Length in cm', default\n ='0.00', track_visibility='onchange')\n dim_width = fields.Float(string='Width', default='0.00', help=\n 'Width in cm', track_visibility='onchange')\n dim_height = fields.Float(string='Height', default='0.00', help=\n 'Height in cm', track_visibility='onchange')\n exp_vol = fields.Float(string='Measurement (M3)', digits=(12, 4), help=\n 'Expected Volume in m3 Measure')\n packages_no = fields.Char(string='No. of Packages')\n\n @api.multi\n def _get_default_container_category(self):\n container_lines = self.env['freight.product.category'].search([(\n 'type', '=ilike', 'container')])\n for container_line in container_lines:\n return container_line.product_category\n container_category_id = fields.Many2one('product.category', string=\n 'Container Product Id', default=_get_default_container_category)\n\n @api.onchange('container_product_name')\n def _onchange_description(self):\n bl = self.env['freight.bol'].search([('bol_no', '=', self.\n cargo_line.bol_no)])\n if self.container_product_name:\n lines_description = self.container_product_name.count('\\n') / 20\n lines_description = math.ceil(lines_description)\n x = self.container_product_name.split('\\n')\n count = 0\n line_description1 = ''\n line_description2 = ''\n for line in x:\n if count < 20:\n line_description1 = line_description1 + line + '\\n'\n count = count + 1\n else:\n line_description2 = line_description2 + line + '\\n'\n count = count + 1\n bl.write({'lines_description': lines_description,\n 'line_description1': line_description1, 'line_description2':\n line_description2})\n\n @api.model\n def create(self, vals):\n res = super(CargoLine, self).create(vals)\n content = ''\n if vals.get('marks'):\n content = content + ' • Marks and Numbers: ' + str(vals.get(\n 'marks')) + '<br/>'\n if vals.get('container_product_name'):\n content = content + ' • Description of Goods: ' + str(vals.get\n ('container_product_name')) + '<br/>'\n if vals.get('packages_no'):\n content = content + ' • No. of Packages: ' + str(vals.get(\n 'packages_no')) + '<br/>'\n if vals.get('seal_no'):\n content = content + ' • Seal no: ' + str(vals.get('seal_no')\n ) + '<br/>'\n if vals.get('container_no'):\n content = content + ' • Container No.: ' + str(vals.get(\n 'container_no')) + '<br/>'\n if vals.get('exp_gross_weight'):\n content = content + ' • Gross Weight(KG): ' + str(vals.get(\n 'exp_gross_weight')) + '<br/>'\n if vals.get('exp_vol'):\n content = content + ' • Measurement (M3): ' + str(vals.get(\n 'exp_vol')) + '<br/>'\n res.cargo_line.message_post(body=content)\n return res\n\n @api.multi\n def write(self, vals):\n res = super(CargoLine, self).write(vals)\n content = ''\n if vals.get('marks'):\n content = content + ' • Marks and Numbers: ' + str(vals.get(\n 'marks')) + '<br/>'\n if vals.get('container_product_name'):\n content = content + ' • Description of Goods: ' + str(vals.get\n ('container_product_name')) + '<br/>'\n if vals.get('packages_no'):\n content = content + ' • No. of Packages: ' + str(vals.get(\n 'packages_no')) + '<br/>'\n if vals.get('seal_no'):\n content = content + ' • Seal no: ' + str(vals.get('seal_no')\n ) + '<br/>'\n if vals.get('container_no'):\n content = content + ' • Container No.: ' + str(vals.get(\n 'container_no')) + '<br/>'\n if vals.get('exp_gross_weight'):\n content = content + ' • Gross Weight(KG): ' + str(vals.get(\n 'exp_gross_weight')) + '<br/>'\n if vals.get('exp_vol'):\n content = content + ' • Measurement (M3): ' + str(vals.get(\n 'exp_vol')) + '<br/>'\n self.cargo_line.message_post(body=content)\n return res\n\n\nclass ChargeLine(models.Model):\n _name = 'freight.bol.charge'\n _description = 'Charge Line'\n charge_line = fields.Many2one('freight.bol', string='Charge Line',\n required=True, ondelete='cascade', index=True, copy=False)\n sequence = fields.Integer(string='sequence')\n freight_charges = fields.Text(string='Freight & Charges')\n rate = fields.Char(string='Rate')\n per = fields.Char(string='Per')\n amount = fields.Char(string='Amount')\n prepaid = fields.Char(string='Prepaid')\n collect = fields.Char(string='Collect')\n payable_at_by = fields.Char(string='Payable at/by')\n revenue_tons = fields.Char(string='Revenue Tons')\n\n @api.model\n def create(self, vals):\n res = super(ChargeLine, self).create(vals)\n content = ''\n if vals.get('freight_charges'):\n content = content + ' • Freight & Charges: ' + str(vals.get(\n 'freight_charges')) + '<br/>'\n if vals.get('revenue_tons'):\n content = content + ' • Revenue Tons: ' + str(vals.get(\n 'revenue_tons')) + '<br/>'\n if vals.get('rate'):\n content = content + ' • Rate: ' + str(vals.get('rate')) + '<br/>'\n if vals.get('per'):\n content = content + ' • Per: ' + str(vals.get('per')) + '<br/>'\n if vals.get('amount'):\n content = content + ' • Amount: ' + str(vals.get('amount')\n ) + '<br/>'\n if vals.get('prepaid'):\n content = content + ' • Prepaid: ' + str(vals.get('prepaid')\n ) + '<br/>'\n if vals.get('collect'):\n content = content + ' • Collect: ' + str(vals.get('collect')\n ) + '<br/>'\n if vals.get('payable_at_by'):\n content = content + ' • Payable at/by: ' + str(vals.get(\n 'payable_at_by')) + '<br/>'\n res.charge_line.message_post(body=content)\n return res\n\n @api.multi\n def write(self, vals):\n res = super(ChargeLine, self).write(vals)\n content = ''\n if vals.get('freight_charges'):\n content = content + ' • Freight & Charges: ' + str(vals.get(\n 'freight_charges')) + '<br/>'\n if vals.get('revenue_tons'):\n content = content + ' • Revenue Tons: ' + str(vals.get(\n 'revenue_tons')) + '<br/>'\n if vals.get('rate'):\n content = content + ' • Rate: ' + str(vals.get('rate')) + '<br/>'\n if vals.get('per'):\n content = content + ' • Per: ' + str(vals.get('per')) + '<br/>'\n if vals.get('amount'):\n content = content + ' • Amount: ' + str(vals.get('amount')\n ) + '<br/>'\n if vals.get('prepaid'):\n content = content + ' • Prepaid: ' + str(vals.get('prepaid')\n ) + '<br/>'\n if vals.get('collect'):\n content = content + ' • Collect: ' + str(vals.get('collect')\n ) + '<br/>'\n if vals.get('payable_at_by'):\n content = content + ' • Payable at/by: ' + str(vals.get(\n 'payable_at_by')) + '<br/>'\n self.charge_line.message_post(body=content)\n return res\n\n\nclass CostProfit(models.Model):\n _name = 'freight.bol.cost.profit'\n _description = 'BOL Cost & Profit'\n sequence = fields.Integer(string='sequence')\n bol_id = fields.Many2one('freight.bol', string='BOL ID', required=True,\n ondelete='cascade', index=True, copy=False)\n product_id = fields.Many2one('product.product', string='Product')\n product_name = fields.Text(string='Description')\n list_price = fields.Float(string='Unit Price')\n uom_id = fields.Many2one('uom.uom', string='UoM')\n profit_gst = fields.Selection([('zer', 'ZER')], string='GST', default=\n 'zer', track_visibility='onchange')\n tax_id = fields.Many2many('account.tax', string='Taxes', domain=['|', (\n 'active', '=', False), ('active', '=', True)])\n profit_currency = fields.Many2one('res.currency', 'Currency', default=\n lambda self: self.env.user.company_id.currency_id.id,\n track_visibility='onchange')\n profit_currency_rate = fields.Float(string='Rate', default='1.00',\n track_visibility='onchange')\n profit_amount = fields.Float(string='Amt', compute=\n '_compute_profit_amount', store=True, track_visibility='onchange')\n sale_total = fields.Float(string='Total Sales', compute=\n '_compute_sale_total', store=True, track_visibility='onchange')\n profit_qty = fields.Float(string='Qty', default='1.000', digit=(12, 3))\n cost_qty = fields.Float(string='Qty', default='1.000', digit=(12, 3))\n cost_price = fields.Float(string='Unit Price', track_visibility='onchange')\n cost_gst = fields.Selection([('zer', 'ZER')], string='Tax', default=\n 'zer', track_visibility='onchange')\n vendor_id = fields.Many2one('res.partner', string='Vendor',\n track_visibility='onchange')\n vendor_bill_id = fields.Many2one('account.invoice', string='Vendor Bill')\n cost_currency = fields.Many2one('res.currency', string='Curr', required\n =True, default=lambda self: self.env.user.company_id.currency_id.id,\n track_visibility='onchange')\n cost_currency_rate = fields.Float(string='Rate', default='1.00',\n track_visibility='onchange')\n cost_amount = fields.Float(string='Amt', compute='_compute_cost_amount',\n store=True, track_visibility='onchange')\n cost_total = fields.Float(string='Total Cost', compute=\n '_compute_cost_total', store=True, track_visibility='onchange')\n billed = fields.Boolean(string='Billed', copy=False)\n is_billed = fields.Char('Is Biiled?', compute='_compute_is_billed',\n store=True)\n added_to_invoice = fields.Boolean(string='Invoiced', copy=False)\n invoice_paid = fields.Boolean(string='Invoice Paid', copy=False)\n paid = fields.Boolean(string='Paid', copy=False)\n is_paid = fields.Char('Is Paid?', compute='_compute_is_paid', store=True)\n invoice_id = fields.Many2one('account.invoice', string='Invoice')\n inv_line_id = fields.Many2one('account.invoice.line', string='Invoice Line'\n )\n bill_id = fields.Many2one('account.invoice', string='Bill')\n bill_line_id = fields.Many2one('account.invoice.line', string='Bill Line')\n route_service = fields.Boolean(string='Is Route Service', default=False)\n profit_total = fields.Float(string='Total Profit', compute=\n '_compute_profit_total', store=True)\n margin_total = fields.Float(string='Margin %', compute=\n '_compute_margin_total', digits=(8, 2), store=True, group_operator=\n 'avg')\n vendor_id_ids = fields.Many2many('res.partner', string='Vendor List',\n copy=False)\n vendor_bill_ids = fields.Many2many('account.invoice', string=\n 'Vendor Bill List', copy=False)\n\n @api.one\n def _set_access_for_invoiced(self):\n if self.env['res.users'].has_group('account.group_account_manager'):\n self.invoiced_readonly = False\n else:\n self.invoiced_readonly = True\n invoiced_readonly = fields.Boolean(compute='_set_access_for_invoiced',\n string='Is user able to modify invoiced?')\n\n @api.depends('profit_qty', 'list_price')\n def _compute_profit_amount(self):\n for service in self:\n if service.product_id:\n service.profit_amount = (service.profit_qty * service.\n list_price or 0.0)\n\n @api.depends('cost_qty', 'cost_price')\n def _compute_cost_amount(self):\n for service in self:\n if service.product_id:\n service.cost_amount = (service.cost_qty * service.\n cost_price or 0.0)\n\n @api.depends('profit_amount', 'profit_currency_rate')\n def _compute_sale_total(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n\n @api.onchange('profit_currency_rate')\n def _onchange_profit_currency_rate(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n\n @api.onchange('profit_amount')\n def _onchange_profit_amount(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('cost_amount', 'cost_currency_rate')\n def _compute_cost_total(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('cost_total', 'sale_total')\n def _compute_profit_total(self):\n for service in self:\n if service.product_id:\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('profit_total', 'sale_total')\n def _compute_margin_total(self):\n for service in self:\n if service.product_id:\n if service.sale_total > 0:\n service.margin_total = (service.profit_total / service.\n sale_total * 100)\n\n @api.onchange('cost_amount')\n def _onchange_cost_amount(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.onchange('cost_currency_rate')\n def _onchange_cost_currency_rate(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.onchange('product_id')\n def _onchange_product_id(self):\n if not self.product_id:\n return {'domain': {'uom_id': []}}\n vals = {}\n domain = {'uom_id': [('category_id', '=', self.product_id.uom_id.\n category_id.id)]}\n if not self.uom_id or self.product_id.uom_id.id != self.uom_id.id:\n vals['uom_id'] = self.product_id.uom_id\n vals['product_name'] = self.product_id.name\n self.update(vals)\n if self.product_id:\n self.update({'list_price': self.product_id.list_price or 0.0,\n 'cost_price': self.product_id.standard_price or 0.0})\n\n @api.onchange('vendor_id')\n def _onchange_vendor_id(self):\n print('OnChange Vendor_ID')\n if self.vendor_id:\n if not self.billed:\n self.billed = False\n print('Invoiced False')\n\n @api.multi\n @api.depends('billed')\n def _compute_is_billed(self):\n for cost_profit_line in self:\n if cost_profit_line.vendor_id:\n if cost_profit_line.billed:\n cost_profit_line.is_billed = 'Y'\n elif not cost_profit_line.billed:\n cost_profit_line.is_billed = 'N'\n\n @api.multi\n @api.depends('paid')\n def _compute_is_paid(self):\n for cost_profit_line in self:\n if cost_profit_line.vendor_id:\n if cost_profit_line.paid:\n cost_profit_line.is_paid = 'Y'\n elif not cost_profit_line.paid:\n cost_profit_line.is_paid = 'N'\n",
"step-4": "<mask token>\n_logger = logging.getLogger(__name__)\n\n\nclass BillOfLading(models.Model):\n _name = 'freight.bol'\n _description = 'Bill Of Lading'\n _order = 'date_of_issue desc, write_date desc'\n _rec_name = 'bol_no'\n _inherit = ['mail.thread', 'mail.activity.mixin']\n bol_status = fields.Selection([('01', 'Draft'), ('02', 'Original'), (\n '03', 'Surrender'), ('04', 'Telex Release')], string='B/L Status',\n default='01', copy=False, track_visibility='onchange', store=True)\n service_type = fields.Selection([('ocean', 'Ocean'), ('air', 'Air'), (\n 'land', 'Land')], string='Shipment Mode', default='ocean',\n track_visibility='onchange')\n direction = fields.Selection([('import', 'Import'), ('export', 'Export'\n )], string='Direction', default='export', track_visibility='onchange')\n cargo_type = fields.Selection([('fcl', 'FCL'), ('lcl', 'LCL')], string=\n 'Cargo Type', default='fcl', track_visibility='onchange')\n type_of_movement = fields.Selection([('cy-cy', 'CY/CY'), ('cy-cfs',\n 'CY/CFS'), ('cfs-cfs', 'CFS/CFS'), ('cfs-cy', 'CFS/CY')], string=\n 'Type Of Movement', track_visibility='onchange')\n booking_ref = fields.Many2one('freight.booking', string=\n 'Booking Job Ref', track_visibility='onchange', copy=False, index=True)\n no_of_original_bl = fields.Selection([('0', '0'), ('1', '1'), ('3', '3'\n )], string='No Of original B/L', default='0', track_visibility=\n 'onchange')\n doc_form_no = fields.Char(string='Doc. Form No.', track_visibility=\n 'onchange')\n service_contract_no = fields.Char(string='Service Contract No',\n track_visibility='onchange')\n bol_no = fields.Char(string='HBL No', copy=False, readonly=True, index=True\n )\n carrier_booking_no = fields.Char(string='Carrier Booking No', copy=\n False, readonly=True)\n date_of_issue = fields.Date(string='Shipment Date', copy=False, default\n =datetime.now().date(), track_visibility='onchange', index=True)\n date_laden_on_board = fields.Date(string='Shipped on Board Date')\n place_of_issue = fields.Char(string='Place of Issue', track_visibility=\n 'onchange')\n export_reference = fields.Char(string='Export Reference',\n track_visibility='onchange')\n fa_reference = fields.Char(string='Forwarding Agent and References',\n track_visibility='onchange')\n point_country_origin = fields.Text(string='Point and Country of Origin',\n track_visibility='onchange')\n term = fields.Char(string='Term', track_visibility='onchange', help=\n 'eg, CY-CY')\n commodity = fields.Many2one('product.product', string='Commodity',\n track_visibility='onchange')\n commodity1 = fields.Many2one('freight.commodity1', string='Commodity',\n track_visibility='onchange')\n shipper_load = fields.Boolean('Shipper Load, Seal and Count')\n analytic_account_id = fields.Many2one('account.analytic.account',\n string='Analytic Account', track_visibility='always', copy=False)\n\n @api.multi\n def _get_default_commodity_category(self):\n commodity_lines = self.env['freight.product.category'].search([(\n 'type', '=ilike', 'commodity')])\n for commodity_line in commodity_lines:\n _logger.warning('_get_default_commodity_category=' + str(\n commodity_line.product_category))\n return commodity_line.product_category\n commodity_category_id = fields.Many2one('product.category', string=\n 'Commodity Product Id', default=_get_default_commodity_category)\n invoice_status = fields.Selection([('01', 'New'), ('02',\n 'Partially Invoiced'), ('03', 'Fully Invoiced')], string=\n 'Invoice Status', default='01', copy=False, track_visibility='onchange'\n )\n invoice_paid_status = fields.Selection([('01', 'New'), ('02',\n 'Partially Paid'), ('03', 'Fully Paid')], string=\n 'Invoice Paid Status', default='01', copy=False, track_visibility=\n 'onchange')\n customer_name = fields.Many2one('res.partner', string='Customer Name',\n track_visibility='onchange')\n contact_name = fields.Many2one('res.partner', string='Contact Name',\n track_visibility='onchange')\n shipper = fields.Text(string='Shipper', track_visibility='onchange',\n help='The Party who shipped the freight, eg Exporter')\n notify_party = fields.Text(string='Notify Party', help=\n 'The Party who will be notified by Liner when the freight arrived',\n track_visibility='onchange')\n carrier_c = fields.Many2one('res.partner', string='Carrier')\n consignee = fields.Text(string='Consignee', help=\n 'The Party who received the freight', track_visibility='onchange')\n routing_instruction = fields.Text(string=\n 'For Delivery Of Goods Please Apply To', track_visibility='onchange')\n delivery_contact = fields.Text(string='Contact for Delivery', help=\n 'Contact information for delivery of goods', track_visibility=\n 'onchange')\n unstuff_at = fields.Char(string='Unstuff At', track_visibility='onchange')\n voyage_no = fields.Char(string='Voyage No', track_visibility='onchange')\n vessel = fields.Char(string='Vessel Name', track_visibility='onchange')\n manifest_no = fields.Char(string='Manifest No', track_visibility='onchange'\n )\n port_of_loading_input = fields.Char(string='Port of Loading',\n track_visibility='onchange')\n port_of_discharge_input = fields.Char(string='Port of Discharge',\n track_visibility='onchange')\n port_of_discharge_eta = fields.Date(string='Loading ETA',\n track_visibility='onchange')\n place_of_delivery = fields.Char(string='Final Destination',\n track_visibility='onchange')\n place_of_receipt = fields.Char(string='Place of Receipt',\n track_visibility='onchange')\n pre_carriage_by = fields.Char(string='Pre-Carriage By',\n track_visibility='onchange')\n note = fields.Text(string='Remarks', track_visibility='onchange')\n sales_person = fields.Many2one('res.users', string='Salesperson',\n track_visibility='onchange')\n company_id = fields.Many2one('res.company', 'Company', required=True,\n index=True, readonly=1, default=lambda self: self.env.user.\n company_id.id)\n cargo_line_ids = fields.One2many('freight.bol.cargo', 'cargo_line',\n string='Cargo Line', copy=True, auto_join=True, track_visibility=\n 'always')\n charge_line_ids = fields.One2many('freight.bol.charge', 'charge_line',\n string='Charge Line', copy=True, auto_join=True, track_visibility=\n 'always')\n cost_profit_ids = fields.One2many('freight.bol.cost.profit', 'bol_id',\n string='Cost & Profit', copy=True, auto_join=True, track_visibility\n ='always')\n invoice_count = fields.Integer(string='Invoice Count', compute=\n '_get_invoiced_count', copy=False)\n vendor_bill_count = fields.Integer(string='Vendor Bill Count', compute=\n '_get_bill_count', copy=False)\n si_count = fields.Integer(string='SI Count', compute='_get_si_count',\n copy=False)\n shipper_c = fields.Many2one('res.partner', string='Shipper')\n consignee_c = fields.Many2one('res.partner', string='Consignee Name')\n notify_party_c = fields.Many2one('res.partner', string='Notify Party')\n total_no_of_packages_words = fields.Char(string='Total Packages',\n track_visibility='onchange', help=\n 'Total no of packages or container in Words')\n lines_description = fields.Integer()\n line_description1 = fields.Text()\n line_description2 = fields.Text()\n\n @api.model\n def create(self, vals):\n vals['bol_no'] = self.env['ir.sequence'].next_by_code('hbl')\n res = super(BillOfLading, self).create(vals)\n return res\n\n @api.multi\n def name_get(self):\n result = []\n for bol in self:\n name = str(bol.bol_no)\n result.append((bol.id, name))\n return result\n\n @api.multi\n def action_send_bl(self):\n self.ensure_one()\n ir_model_data = self.env['ir.model.data']\n try:\n template_id = ir_model_data.get_object_reference(\n 'sci_goexcel_freight', 'email_template_bol')[1]\n except ValueError:\n template_id = False\n try:\n compose_form_id = ir_model_data.get_object_reference('mail',\n 'email_compose_message_wizard_form')[1]\n except ValueError:\n compose_form_id = False\n ctx = {'default_model': 'freight.bol', 'default_res_id': self.ids[0\n ], 'default_use_template': bool(template_id),\n 'default_template_id': template_id, 'default_composition_mode':\n 'comment', 'mark_so_as_sent': True, 'custom_layout':\n 'mail.mail_notification_light', 'force_email': True}\n return {'type': 'ir.actions.act_window', 'view_type': 'form',\n 'view_mode': 'form', 'res_model': 'mail.compose.message',\n 'views': [(compose_form_id, 'form')], 'view_id':\n compose_form_id, 'target': 'new', 'context': ctx}\n\n @api.multi\n def action_invoice(self):\n self.ensure_one()\n view = self.env.ref('sci_goexcel_freight.invoice_view_form')\n return {'name': 'Create Invoice', 'type': 'ir.actions.act_window',\n 'view_type': 'form', 'view_mode': 'form', 'res_model':\n 'invoice.wizard', 'views': [(view.id, 'form')], 'view_id': view\n .id, 'target': 'new', 'context': dict(bl_id=self.id)}\n\n @api.multi\n def action_create_vendor_bill(self):\n vendor_po = self.cost_profit_ids.filtered(lambda c: c.vendor_id)\n po_lines = vendor_po.sorted(key=lambda p: p.vendor_id.id)\n vendor_count = False\n vendor_id = False\n if not self.analytic_account_id:\n values = {'name': '%s' % self.booking_ref.booking_no,\n 'partner_id': self.booking_ref.customer_name.id, 'code':\n self.bol_no, 'company_id': self.booking_ref.company_id.id}\n analytic_account = self.env['account.analytic.account'].sudo(\n ).create(values)\n self.booking_ref.write({'analytic_account_id': analytic_account.id}\n )\n self.write({'analytic_account_id': analytic_account.id})\n for line in po_lines:\n if line.vendor_id != vendor_id:\n vb = self.env['account.invoice']\n vendor_count = True\n vendor_id = line.vendor_id\n value = []\n vendor_bill_created = []\n filtered_vb_lines = po_lines.filtered(lambda r: r.vendor_id ==\n vendor_id)\n for vb_line in filtered_vb_lines:\n if not vb_line.billed:\n account_id = False\n price_after_converted = (vb_line.cost_price *\n vb_line.cost_currency_rate)\n if vb_line.product_id.property_account_expense_id:\n account_id = (vb_line.product_id.\n property_account_expense_id)\n elif vb_line.product_id.categ_id.property_account_expense_categ_id:\n account_id = (vb_line.product_id.categ_id.\n property_account_expense_categ_id)\n value.append([0, 0, {'account_id': account_id.id or\n False, 'name': vb_line.product_id.name or '',\n 'product_id': vb_line.product_id.id or False,\n 'quantity': vb_line.cost_qty or 0.0, 'uom_id': \n vb_line.uom_id.id or False, 'price_unit': \n price_after_converted or 0.0,\n 'account_analytic_id': self.analytic_account_id\n .id, 'bl_line_id': vb_line.id}])\n vendor_bill_created.append(vb_line)\n vb_line.billed = True\n if value:\n vendor_bill_id = vb.create({'type': 'in_invoice',\n 'invoice_line_ids': value, 'default_currency_id':\n self.env.user.company_id.currency_id.id,\n 'company_id': self.company_id.id, 'date_invoice':\n fields.Date.context_today(self), 'origin': self.\n bol_no, 'partner_id': vendor_id.id, 'account_id': \n vb_line.vendor_id.property_account_payable_id.id or\n False, 'freight_booking': self.booking_ref.id})\n for new_vendor_bill in vendor_bill_created:\n new_vendor_bill.vendor_bill_id = vendor_bill_id.id\n if vendor_count is False:\n raise exceptions.ValidationError('No Vendor in Cost & Profit!!!')\n\n def action_copy_to_booking(self):\n booking = self.env['freight.booking'].search([('id', '=', self.\n booking_ref.id)])\n booking_val = {'cargo_type': self.cargo_type or False,\n 'shipper_address_input': self.shipper,\n 'consignee_address_input': self.consignee,\n 'notify_party_address_input': self.notify_party,\n 'carrier_booking_no': self.carrier_booking_no or False,\n 'voyage_no': self.voyage_no, 'port_of_loading_input': self.\n port_of_loading_input, 'port_of_discharge_input': self.\n port_of_discharge_input, 'place_of_delivery': self.\n place_of_delivery, 'note': self.note, 'bol_status': self.\n bol_status, 'no_of_original_bl': self.no_of_original_bl,\n 'carrier': self.carrier_c.id}\n booking.sudo().write(booking_val)\n for booking_line in booking.operation_line_ids:\n booking_line.sudo().unlink()\n for booking_line in booking.operation_line_ids2:\n booking_line.sudo().unlink()\n for line in self.cargo_line_ids:\n if self.cargo_type == 'fcl':\n if line.container_product_name:\n operation_line_obj = self.env['freight.operations.line']\n op_line = operation_line_obj.create({'operation_id':\n booking.id, 'container_no': line.container_no or '',\n 'container_product_id': line.container_product_id.\n id or False, 'seal_no': line.seal_no or '',\n 'container_product_name': line.\n container_product_name or '', 'packages_no': line.\n packages_no_value or '', 'packages_no_uom': line.\n packages_no_uom.id, 'exp_net_weight': line.\n exp_net_weight or '', 'exp_gross_weight': line.\n exp_gross_weight or '', 'dim_length': line.\n dim_length or '', 'dim_width': line.dim_width or '',\n 'dim_height': line.dim_height or '', 'exp_vol': \n line.exp_vol or '', 'remark': line.marks or ''})\n booking.operation_line_ids = op_line\n elif line.container_product_name:\n operation_line_obj = self.env['freight.operations.line2']\n op_line = operation_line_obj.create({'operation_id2':\n booking.id, 'container_no': line.container_no or '',\n 'container_product_id': line.container_product_id.id or\n False, 'seal_no': line.seal_no or '',\n 'container_product_name': line.container_product_name or\n '', 'packages_no': line.packages_no_value or '',\n 'packages_no_uom': line.packages_no_uom.id,\n 'exp_net_weight': line.exp_net_weight or '',\n 'exp_gross_weight': line.exp_gross_weight or '',\n 'dim_length': line.dim_length or '', 'dim_width': line.\n dim_width or '', 'dim_height': line.dim_height or '',\n 'exp_vol': line.exp_vol or '', 'shipping_mark': line.\n marks or ''})\n booking.operation_line_ids2 = op_line\n\n def action_copy_from_booking(self):\n booking = self.env['freight.booking'].search([('id', '=', self.\n booking_ref.id)])\n for line in booking.cost_profit_ids:\n operation_line_obj = self.env['freight.bol.cost.profit']\n op_line = operation_line_obj.create({'bol_id': self.id,\n 'product_id': line.product_id.id or False, 'product_name': \n line.product_name or '', 'profit_qty': line.profit_qty or 0,\n 'list_price': line.list_price or 0, 'profit_amount': line.\n profit_amount or 0, 'profit_currency': line.profit_currency\n .id or False, 'profit_currency_rate': line.\n profit_currency_rate or 0, 'cost_qty': line.cost_qty or 0,\n 'cost_price': line.cost_price or 0, 'cost_amount': line.\n cost_amount or 0, 'vendor_id': line.vendor_id.id or False,\n 'cost_currency': line.cost_currency.id or False,\n 'cost_currency_rate': line.cost_currency_rate or 0})\n\n def action_create_si(self):\n si_obj = self.env['freight.website.si']\n si_val = {'si_status': '01', 'carrier': self.carrier_c.id or False,\n 'direction': self.direction or False, 'cargo_type': self.\n cargo_type or False, 'service_type': self.service_type or False,\n 'customer_name': self.customer_name.id or False, 'shipper':\n self.shipper, 'consignee': self.consignee, 'notify_party': self\n .notify_party, 'carrier_booking_ref': self.carrier_booking_no,\n 'voyage_no': self.voyage_no, 'port_of_loading_input': self.\n port_of_loading_input, 'port_of_discharge_input': self.\n port_of_discharge_input, 'place_of_delivery': self.\n place_of_delivery, 'bl_ref': self.id}\n si = si_obj.create(si_val)\n if self.cargo_type == 'fcl':\n container_line = self.cargo_line_ids\n si_line_obj = self.env['freight.website.si.fcl']\n for line in container_line:\n if line.container_product_id or line.container_no:\n si_line = si_line_obj.create({'container_product_id': \n line.container_product_id.id or False,\n 'container_product_name': line.\n container_product_name or False, 'fcl_line': si.id or\n '', 'container_no': line.container_no or '',\n 'packages_no': line.packages_no_value or 0.0,\n 'packages_no_uom': line.packages_no_uom.id,\n 'exp_gross_weight': line.exp_gross_weight or 0.0,\n 'exp_vol': line.exp_vol or 0.0})\n si.write({'fcl_line_ids': si_line or False})\n else:\n container_line = self.cargo_line_ids\n si_line_obj = self.env['freight.website.si.lcl']\n for line in container_line:\n if line.container_product_id or line.container_no:\n si_line = si_line_obj.create({'container_product_name':\n line.container_product_name or False, 'lcl_line': \n si.id or '', 'container_no': line.container_no or\n '', 'packages_no': line.packages_no_value or 0.0,\n 'packages_no_uom': line.packages_no_uom.id,\n 'exp_gross_weight': line.exp_gross_weight or 0.0,\n 'exp_net_weight': line.exp_net_weight or 0.0,\n 'exp_vol': line.exp_vol or 0.0})\n si.write({'lcl_line_ids': si_line or False})\n\n @api.multi\n def operation_invoices(self):\n \"\"\"Show Invoice for specific Freight Operation smart Button.\"\"\"\n for operation in self:\n invoices = self.env['account.invoice'].search([('freight_hbl',\n '=', operation.id), ('type', 'in', ['out_invoice',\n 'out_refund']), ('state', '!=', 'cancel')])\n action = self.env.ref('account.action_invoice_tree1').read()[0]\n if len(invoices) > 1:\n action['domain'] = [('id', 'in', invoices.ids)]\n elif len(invoices) == 1:\n action['views'] = [(self.env.ref('account.invoice_form').id,\n 'form')]\n action['res_id'] = invoices.ids[0]\n else:\n action = {'type': 'ir.actions.act_window_close'}\n return action\n\n @api.multi\n def operation_bill(self):\n for operation in self:\n vendor_bill_list = []\n for cost_profit_line in operation.cost_profit_ids:\n for vendor_bill_line in cost_profit_line.vendor_bill_ids:\n if vendor_bill_line.type in ['in_invoice', 'in_refund']:\n vendor_bill_list.append(vendor_bill_line.id)\n invoices = self.env['account.invoice'].search([('freight_hbl',\n '=', operation.id), ('type', 'in', ['in_invoice',\n 'in_refund']), ('state', '!=', 'cancel')])\n invoice_name_list = []\n for x in invoices:\n invoice_name_list.append(x.id)\n unique_list = []\n for y in vendor_bill_list:\n if invoice_name_list and len(invoice_name_list) > 0:\n if y not in invoice_name_list:\n unique_list.append(y)\n else:\n unique_list.append(y)\n for z in invoice_name_list:\n unique_list.append(z)\n if len(unique_list) > 1:\n views = [(self.env.ref('account.invoice_supplier_tree').id,\n 'tree'), (self.env.ref('account.invoice_supplier_form').id,\n 'form')]\n return {'name': 'Vendor bills', 'view_type': 'form',\n 'view_mode': 'tree,form', 'view_id': False, 'res_model':\n 'account.invoice', 'views': views, 'domain': [('id', 'in',\n unique_list)], 'type': 'ir.actions.act_window'}\n elif len(unique_list) == 1:\n return {'view_type': 'form', 'view_mode': 'form', 'res_model':\n 'account.invoice', 'res_id': unique_list[0] or False,\n 'type': 'ir.actions.act_window', 'target': 'popup'}\n\n @api.multi\n def operation_si(self):\n for operation in self:\n si = self.env['freight.website.si'].search([('bl_ref', '=',\n operation.id)])\n if len(si) > 1:\n views = [(self.env.ref('sci_goexcel_freight.view_tree_si').id,\n 'tree'), (self.env.ref('sci_goexcel_freight.view_form_si').\n id, 'form')]\n return {'name': 'Shipping Instruction', 'view_type': 'form',\n 'view_mode': 'tree,form', 'view_id': False, 'res_model':\n 'freight.website.si', 'views': views, 'domain': [('id',\n 'in', si.ids)], 'type': 'ir.actions.act_window'}\n elif len(si) == 1:\n return {'view_type': 'form', 'view_mode': 'form', 'res_model':\n 'freight.website.si', 'res_id': si.id or False, 'type':\n 'ir.actions.act_window', 'target': 'popup'}\n else:\n action = {'type': 'ir.actions.act_window_close'}\n return action\n\n def _get_invoiced_count(self):\n for operation in self:\n invoices = self.env['account.invoice'].search([('freight_hbl',\n '=', operation.id), ('type', 'in', ['out_invoice',\n 'out_refund']), ('state', '!=', 'cancel')])\n self.update({'invoice_count': len(invoices)})\n\n def _get_bill_count(self):\n for operation in self:\n vendor_bill_list = []\n for cost_profit_line in operation.cost_profit_ids:\n for vendor_bill_line in cost_profit_line.vendor_bill_ids:\n if vendor_bill_line.type in ['in_invoice', 'in_refund']:\n vendor_bill_list.append(vendor_bill_line.id)\n unique_vendor_bill_list = []\n for i in vendor_bill_list:\n if i not in unique_vendor_bill_list:\n unique_vendor_bill_list.append(i)\n invoices = self.env['account.invoice'].search([('freight_hbl',\n '=', operation.id), ('type', 'in', ['in_invoice',\n 'in_refund']), ('state', '!=', 'cancel')])\n invoice_name_list = []\n for x in invoices:\n invoice_name_list.append(x.id)\n unique_list = []\n for y in unique_vendor_bill_list:\n if invoice_name_list and len(invoice_name_list) > 0:\n if y not in invoice_name_list:\n unique_list.append(y)\n else:\n unique_list.append(y)\n for z in invoice_name_list:\n unique_list.append(z)\n if len(unique_list) > 0:\n self.update({'vendor_bill_count': len(unique_list)})\n\n def _get_si_count(self):\n for operation in self:\n si = self.env['freight.website.si'].search([('bl_ref', '=',\n operation.id)])\n self.update({'si_count': len(si)})\n purchase_receipt_count = fields.Integer(string='Purchase Receipt Count',\n compute='_get_pr_count', copy=False)\n\n def _get_pr_count(self):\n for operation in self:\n pr_lines = self.env['account.voucher.line'].search([(\n 'freight_hbl', '=', operation.id)])\n pr_list = []\n for pr_line in pr_lines:\n if (pr_line.voucher_id.state != 'cancel' and pr_line.\n voucher_id.voucher_type == 'purchase'):\n pr_list.append(pr_line.voucher_id.id)\n unique_list = []\n for i in pr_list:\n if i not in unique_list:\n unique_list.append(i)\n if len(unique_list) > 0:\n self.update({'purchase_receipt_count': len(unique_list)})\n\n @api.multi\n def operation_pr(self):\n for operation in self:\n for operation in self:\n pr_lines = self.env['account.voucher.line'].search([(\n 'freight_hbl', '=', operation.id)])\n pr_list = []\n for pr_line in pr_lines:\n if (pr_line.voucher_id.state != 'cancel' and pr_line.\n voucher_id.voucher_type == 'purchase'):\n pr_list.append(pr_line.voucher_id.id)\n unique_list = []\n for i in pr_list:\n if i not in unique_list:\n unique_list.append(i)\n if len(unique_list) > 1:\n views = [(self.env.ref('account_voucher.view_voucher_tree').id,\n 'tree'), (self.env.ref(\n 'account_voucher.view_purchase_receipt_form').id, 'form')]\n return {'name': 'Purchase Receipt', 'view_type': 'form',\n 'view_mode': 'tree,form', 'view_id': False, 'res_model':\n 'account.voucher', 'views': views, 'domain': [('id', 'in',\n unique_list)], 'type': 'ir.actions.act_window'}\n elif len(unique_list) == 1:\n return {'view_type': 'form', 'view_mode': 'form', 'res_model':\n 'account.voucher', 'res_id': unique_list[0] or False,\n 'type': 'ir.actions.act_window', 'target': 'popup'}\n\n\nclass CargoLine(models.Model):\n _name = 'freight.bol.cargo'\n _description = 'Cargo Line'\n cargo_line = fields.Many2one('freight.bol', string='Cargo Line',\n required=True, ondelete='cascade', index=True, copy=False)\n sequence = fields.Integer(string='sequence')\n marks = fields.Text(string='Marks and Numbers')\n container_no = fields.Char(string='Container No.')\n container_product_id = fields.Many2one('product.product', string=\n 'Container', track_visibility='onchange')\n seal_no = fields.Char(string='Seal No.')\n container_product_name = fields.Text(string='Description of Goods')\n packages_no_value = fields.Integer(string='No. of Packages')\n packages_no_uom = fields.Many2one('uom.uom', string='UoM')\n exp_net_weight = fields.Float(string='Net Weight(KG)', help=\n 'Expected Weight in kg.', track_visibility='onchange')\n exp_gross_weight = fields.Float(string='Gross Weight(KG)', digits=(12, \n 4), help='Expected Weight in kg.')\n dim_length = fields.Float(string='Length', help='Length in cm', default\n ='0.00', track_visibility='onchange')\n dim_width = fields.Float(string='Width', default='0.00', help=\n 'Width in cm', track_visibility='onchange')\n dim_height = fields.Float(string='Height', default='0.00', help=\n 'Height in cm', track_visibility='onchange')\n exp_vol = fields.Float(string='Measurement (M3)', digits=(12, 4), help=\n 'Expected Volume in m3 Measure')\n packages_no = fields.Char(string='No. of Packages')\n\n @api.multi\n def _get_default_container_category(self):\n container_lines = self.env['freight.product.category'].search([(\n 'type', '=ilike', 'container')])\n for container_line in container_lines:\n return container_line.product_category\n container_category_id = fields.Many2one('product.category', string=\n 'Container Product Id', default=_get_default_container_category)\n\n @api.onchange('container_product_name')\n def _onchange_description(self):\n bl = self.env['freight.bol'].search([('bol_no', '=', self.\n cargo_line.bol_no)])\n if self.container_product_name:\n lines_description = self.container_product_name.count('\\n') / 20\n lines_description = math.ceil(lines_description)\n x = self.container_product_name.split('\\n')\n count = 0\n line_description1 = ''\n line_description2 = ''\n for line in x:\n if count < 20:\n line_description1 = line_description1 + line + '\\n'\n count = count + 1\n else:\n line_description2 = line_description2 + line + '\\n'\n count = count + 1\n bl.write({'lines_description': lines_description,\n 'line_description1': line_description1, 'line_description2':\n line_description2})\n\n @api.model\n def create(self, vals):\n res = super(CargoLine, self).create(vals)\n content = ''\n if vals.get('marks'):\n content = content + ' • Marks and Numbers: ' + str(vals.get(\n 'marks')) + '<br/>'\n if vals.get('container_product_name'):\n content = content + ' • Description of Goods: ' + str(vals.get\n ('container_product_name')) + '<br/>'\n if vals.get('packages_no'):\n content = content + ' • No. of Packages: ' + str(vals.get(\n 'packages_no')) + '<br/>'\n if vals.get('seal_no'):\n content = content + ' • Seal no: ' + str(vals.get('seal_no')\n ) + '<br/>'\n if vals.get('container_no'):\n content = content + ' • Container No.: ' + str(vals.get(\n 'container_no')) + '<br/>'\n if vals.get('exp_gross_weight'):\n content = content + ' • Gross Weight(KG): ' + str(vals.get(\n 'exp_gross_weight')) + '<br/>'\n if vals.get('exp_vol'):\n content = content + ' • Measurement (M3): ' + str(vals.get(\n 'exp_vol')) + '<br/>'\n res.cargo_line.message_post(body=content)\n return res\n\n @api.multi\n def write(self, vals):\n res = super(CargoLine, self).write(vals)\n content = ''\n if vals.get('marks'):\n content = content + ' • Marks and Numbers: ' + str(vals.get(\n 'marks')) + '<br/>'\n if vals.get('container_product_name'):\n content = content + ' • Description of Goods: ' + str(vals.get\n ('container_product_name')) + '<br/>'\n if vals.get('packages_no'):\n content = content + ' • No. of Packages: ' + str(vals.get(\n 'packages_no')) + '<br/>'\n if vals.get('seal_no'):\n content = content + ' • Seal no: ' + str(vals.get('seal_no')\n ) + '<br/>'\n if vals.get('container_no'):\n content = content + ' • Container No.: ' + str(vals.get(\n 'container_no')) + '<br/>'\n if vals.get('exp_gross_weight'):\n content = content + ' • Gross Weight(KG): ' + str(vals.get(\n 'exp_gross_weight')) + '<br/>'\n if vals.get('exp_vol'):\n content = content + ' • Measurement (M3): ' + str(vals.get(\n 'exp_vol')) + '<br/>'\n self.cargo_line.message_post(body=content)\n return res\n\n\nclass ChargeLine(models.Model):\n _name = 'freight.bol.charge'\n _description = 'Charge Line'\n charge_line = fields.Many2one('freight.bol', string='Charge Line',\n required=True, ondelete='cascade', index=True, copy=False)\n sequence = fields.Integer(string='sequence')\n freight_charges = fields.Text(string='Freight & Charges')\n rate = fields.Char(string='Rate')\n per = fields.Char(string='Per')\n amount = fields.Char(string='Amount')\n prepaid = fields.Char(string='Prepaid')\n collect = fields.Char(string='Collect')\n payable_at_by = fields.Char(string='Payable at/by')\n revenue_tons = fields.Char(string='Revenue Tons')\n\n @api.model\n def create(self, vals):\n res = super(ChargeLine, self).create(vals)\n content = ''\n if vals.get('freight_charges'):\n content = content + ' • Freight & Charges: ' + str(vals.get(\n 'freight_charges')) + '<br/>'\n if vals.get('revenue_tons'):\n content = content + ' • Revenue Tons: ' + str(vals.get(\n 'revenue_tons')) + '<br/>'\n if vals.get('rate'):\n content = content + ' • Rate: ' + str(vals.get('rate')) + '<br/>'\n if vals.get('per'):\n content = content + ' • Per: ' + str(vals.get('per')) + '<br/>'\n if vals.get('amount'):\n content = content + ' • Amount: ' + str(vals.get('amount')\n ) + '<br/>'\n if vals.get('prepaid'):\n content = content + ' • Prepaid: ' + str(vals.get('prepaid')\n ) + '<br/>'\n if vals.get('collect'):\n content = content + ' • Collect: ' + str(vals.get('collect')\n ) + '<br/>'\n if vals.get('payable_at_by'):\n content = content + ' • Payable at/by: ' + str(vals.get(\n 'payable_at_by')) + '<br/>'\n res.charge_line.message_post(body=content)\n return res\n\n @api.multi\n def write(self, vals):\n res = super(ChargeLine, self).write(vals)\n content = ''\n if vals.get('freight_charges'):\n content = content + ' • Freight & Charges: ' + str(vals.get(\n 'freight_charges')) + '<br/>'\n if vals.get('revenue_tons'):\n content = content + ' • Revenue Tons: ' + str(vals.get(\n 'revenue_tons')) + '<br/>'\n if vals.get('rate'):\n content = content + ' • Rate: ' + str(vals.get('rate')) + '<br/>'\n if vals.get('per'):\n content = content + ' • Per: ' + str(vals.get('per')) + '<br/>'\n if vals.get('amount'):\n content = content + ' • Amount: ' + str(vals.get('amount')\n ) + '<br/>'\n if vals.get('prepaid'):\n content = content + ' • Prepaid: ' + str(vals.get('prepaid')\n ) + '<br/>'\n if vals.get('collect'):\n content = content + ' • Collect: ' + str(vals.get('collect')\n ) + '<br/>'\n if vals.get('payable_at_by'):\n content = content + ' • Payable at/by: ' + str(vals.get(\n 'payable_at_by')) + '<br/>'\n self.charge_line.message_post(body=content)\n return res\n\n\nclass CostProfit(models.Model):\n _name = 'freight.bol.cost.profit'\n _description = 'BOL Cost & Profit'\n sequence = fields.Integer(string='sequence')\n bol_id = fields.Many2one('freight.bol', string='BOL ID', required=True,\n ondelete='cascade', index=True, copy=False)\n product_id = fields.Many2one('product.product', string='Product')\n product_name = fields.Text(string='Description')\n list_price = fields.Float(string='Unit Price')\n uom_id = fields.Many2one('uom.uom', string='UoM')\n profit_gst = fields.Selection([('zer', 'ZER')], string='GST', default=\n 'zer', track_visibility='onchange')\n tax_id = fields.Many2many('account.tax', string='Taxes', domain=['|', (\n 'active', '=', False), ('active', '=', True)])\n profit_currency = fields.Many2one('res.currency', 'Currency', default=\n lambda self: self.env.user.company_id.currency_id.id,\n track_visibility='onchange')\n profit_currency_rate = fields.Float(string='Rate', default='1.00',\n track_visibility='onchange')\n profit_amount = fields.Float(string='Amt', compute=\n '_compute_profit_amount', store=True, track_visibility='onchange')\n sale_total = fields.Float(string='Total Sales', compute=\n '_compute_sale_total', store=True, track_visibility='onchange')\n profit_qty = fields.Float(string='Qty', default='1.000', digit=(12, 3))\n cost_qty = fields.Float(string='Qty', default='1.000', digit=(12, 3))\n cost_price = fields.Float(string='Unit Price', track_visibility='onchange')\n cost_gst = fields.Selection([('zer', 'ZER')], string='Tax', default=\n 'zer', track_visibility='onchange')\n vendor_id = fields.Many2one('res.partner', string='Vendor',\n track_visibility='onchange')\n vendor_bill_id = fields.Many2one('account.invoice', string='Vendor Bill')\n cost_currency = fields.Many2one('res.currency', string='Curr', required\n =True, default=lambda self: self.env.user.company_id.currency_id.id,\n track_visibility='onchange')\n cost_currency_rate = fields.Float(string='Rate', default='1.00',\n track_visibility='onchange')\n cost_amount = fields.Float(string='Amt', compute='_compute_cost_amount',\n store=True, track_visibility='onchange')\n cost_total = fields.Float(string='Total Cost', compute=\n '_compute_cost_total', store=True, track_visibility='onchange')\n billed = fields.Boolean(string='Billed', copy=False)\n is_billed = fields.Char('Is Biiled?', compute='_compute_is_billed',\n store=True)\n added_to_invoice = fields.Boolean(string='Invoiced', copy=False)\n invoice_paid = fields.Boolean(string='Invoice Paid', copy=False)\n paid = fields.Boolean(string='Paid', copy=False)\n is_paid = fields.Char('Is Paid?', compute='_compute_is_paid', store=True)\n invoice_id = fields.Many2one('account.invoice', string='Invoice')\n inv_line_id = fields.Many2one('account.invoice.line', string='Invoice Line'\n )\n bill_id = fields.Many2one('account.invoice', string='Bill')\n bill_line_id = fields.Many2one('account.invoice.line', string='Bill Line')\n route_service = fields.Boolean(string='Is Route Service', default=False)\n profit_total = fields.Float(string='Total Profit', compute=\n '_compute_profit_total', store=True)\n margin_total = fields.Float(string='Margin %', compute=\n '_compute_margin_total', digits=(8, 2), store=True, group_operator=\n 'avg')\n vendor_id_ids = fields.Many2many('res.partner', string='Vendor List',\n copy=False)\n vendor_bill_ids = fields.Many2many('account.invoice', string=\n 'Vendor Bill List', copy=False)\n\n @api.one\n def _set_access_for_invoiced(self):\n if self.env['res.users'].has_group('account.group_account_manager'):\n self.invoiced_readonly = False\n else:\n self.invoiced_readonly = True\n invoiced_readonly = fields.Boolean(compute='_set_access_for_invoiced',\n string='Is user able to modify invoiced?')\n\n @api.depends('profit_qty', 'list_price')\n def _compute_profit_amount(self):\n for service in self:\n if service.product_id:\n service.profit_amount = (service.profit_qty * service.\n list_price or 0.0)\n\n @api.depends('cost_qty', 'cost_price')\n def _compute_cost_amount(self):\n for service in self:\n if service.product_id:\n service.cost_amount = (service.cost_qty * service.\n cost_price or 0.0)\n\n @api.depends('profit_amount', 'profit_currency_rate')\n def _compute_sale_total(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n\n @api.onchange('profit_currency_rate')\n def _onchange_profit_currency_rate(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n\n @api.onchange('profit_amount')\n def _onchange_profit_amount(self):\n for service in self:\n if service.product_id:\n service.sale_total = (service.profit_amount * service.\n profit_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('cost_amount', 'cost_currency_rate')\n def _compute_cost_total(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('cost_total', 'sale_total')\n def _compute_profit_total(self):\n for service in self:\n if service.product_id:\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.depends('profit_total', 'sale_total')\n def _compute_margin_total(self):\n for service in self:\n if service.product_id:\n if service.sale_total > 0:\n service.margin_total = (service.profit_total / service.\n sale_total * 100)\n\n @api.onchange('cost_amount')\n def _onchange_cost_amount(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.onchange('cost_currency_rate')\n def _onchange_cost_currency_rate(self):\n for service in self:\n if service.product_id:\n service.cost_total = (service.cost_amount * service.\n cost_currency_rate or 0.0)\n service.profit_total = (service.sale_total - service.\n cost_total or 0.0)\n\n @api.onchange('product_id')\n def _onchange_product_id(self):\n if not self.product_id:\n return {'domain': {'uom_id': []}}\n vals = {}\n domain = {'uom_id': [('category_id', '=', self.product_id.uom_id.\n category_id.id)]}\n if not self.uom_id or self.product_id.uom_id.id != self.uom_id.id:\n vals['uom_id'] = self.product_id.uom_id\n vals['product_name'] = self.product_id.name\n self.update(vals)\n if self.product_id:\n self.update({'list_price': self.product_id.list_price or 0.0,\n 'cost_price': self.product_id.standard_price or 0.0})\n\n @api.onchange('vendor_id')\n def _onchange_vendor_id(self):\n print('OnChange Vendor_ID')\n if self.vendor_id:\n if not self.billed:\n self.billed = False\n print('Invoiced False')\n\n @api.multi\n @api.depends('billed')\n def _compute_is_billed(self):\n for cost_profit_line in self:\n if cost_profit_line.vendor_id:\n if cost_profit_line.billed:\n cost_profit_line.is_billed = 'Y'\n elif not cost_profit_line.billed:\n cost_profit_line.is_billed = 'N'\n\n @api.multi\n @api.depends('paid')\n def _compute_is_paid(self):\n for cost_profit_line in self:\n if cost_profit_line.vendor_id:\n if cost_profit_line.paid:\n cost_profit_line.is_paid = 'Y'\n elif not cost_profit_line.paid:\n cost_profit_line.is_paid = 'N'\n",
"step-5": "from odoo import models, fields, api\r\nfrom datetime import datetime, timedelta\r\nfrom odoo import exceptions\r\nimport logging\r\nimport math\r\n\r\n_logger = logging.getLogger(__name__)\r\n\r\n\r\nclass BillOfLading(models.Model):\r\n _name = 'freight.bol'\r\n _description = 'Bill Of Lading'\r\n _order = 'date_of_issue desc, write_date desc'\r\n _rec_name = 'bol_no'\r\n\r\n _inherit = ['mail.thread', 'mail.activity.mixin']\r\n\r\n # Header\r\n bol_status = fields.Selection([('01', 'Draft'), ('02', 'Original'), ('03', 'Surrender'), ('04', 'Telex Release')],\r\n string=\"B/L Status\", default=\"01\", copy=False, track_visibility='onchange', store=True)\r\n service_type = fields.Selection([('ocean', 'Ocean'), ('air', 'Air'), ('land', 'Land')], string=\"Shipment Mode\",\r\n default=\"ocean\", track_visibility='onchange')\r\n direction = fields.Selection([('import', 'Import'), ('export', 'Export')], string=\"Direction\", default=\"export\",\r\n track_visibility='onchange')\r\n cargo_type = fields.Selection([('fcl', 'FCL'), ('lcl', 'LCL')], string='Cargo Type', default=\"fcl\",\r\n track_visibility='onchange')\r\n type_of_movement = fields.Selection([('cy-cy', 'CY/CY'), ('cy-cfs', 'CY/CFS'), ('cfs-cfs', 'CFS/CFS'), ('cfs-cy', 'CFS/CY')],\r\n string='Type Of Movement', track_visibility='onchange')\r\n booking_ref = fields.Many2one('freight.booking', string='Booking Job Ref', track_visibility='onchange',\r\n copy=False, index=True)\r\n no_of_original_bl = fields.Selection([('0', '0'), ('1', '1'), ('3', '3')], string=\"No Of original B/L\",\r\n default=\"0\", track_visibility='onchange')\r\n doc_form_no = fields.Char(string='Doc. Form No.', track_visibility='onchange')\r\n service_contract_no = fields.Char(string='Service Contract No', track_visibility='onchange')\r\n bol_no = fields.Char(string='HBL No', copy=False, readonly=True, index=True)\r\n carrier_booking_no = fields.Char(string='Carrier Booking No', copy=False, readonly=True)\r\n date_of_issue = fields.Date(string='Shipment Date', copy=False, default=datetime.now().date(),\r\n track_visibility='onchange', index=True)\r\n date_laden_on_board = fields.Date(string='Shipped on Board Date')\r\n place_of_issue = fields.Char(string='Place of Issue', track_visibility='onchange')\r\n export_reference = fields.Char(string='Export Reference', track_visibility='onchange')\r\n fa_reference = fields.Char(string='Forwarding Agent and References', track_visibility='onchange')\r\n point_country_origin = fields.Text(string='Point and Country of Origin', track_visibility='onchange')\r\n term = fields.Char(string='Term', track_visibility='onchange', help='eg, CY-CY')\r\n commodity = fields.Many2one('product.product', string='Commodity', track_visibility='onchange')\r\n commodity1 = fields.Many2one('freight.commodity1', string='Commodity', track_visibility='onchange')\r\n shipper_load = fields.Boolean('Shipper Load, Seal and Count')\r\n analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic Account\",\r\n track_visibility='always', copy=False)\r\n\r\n\r\n @api.multi\r\n def _get_default_commodity_category(self):\r\n commodity_lines = self.env['freight.product.category'].search([('type', '=ilike', 'commodity')])\r\n for commodity_line in commodity_lines:\r\n _logger.warning('_get_default_commodity_category=' + str(commodity_line.product_category))\r\n return commodity_line.product_category\r\n\r\n commodity_category_id = fields.Many2one('product.category', string=\"Commodity Product Id\",\r\n default=_get_default_commodity_category)\r\n\r\n # Invoice Status\r\n invoice_status = fields.Selection([('01', 'New'),\r\n ('02', 'Partially Invoiced'),\r\n ('03', 'Fully Invoiced')],\r\n string=\"Invoice Status\", default=\"01\", copy=False,\r\n track_visibility='onchange')\r\n invoice_paid_status = fields.Selection([('01', 'New'),\r\n ('02', 'Partially Paid'),\r\n ('03', 'Fully Paid')],\r\n string=\"Invoice Paid Status\", default=\"01\", copy=False,\r\n track_visibility='onchange')\r\n\r\n # Party Info\r\n customer_name = fields.Many2one('res.partner', string='Customer Name', track_visibility='onchange')\r\n contact_name = fields.Many2one('res.partner', string='Contact Name', track_visibility='onchange')\r\n shipper = fields.Text(string='Shipper', track_visibility='onchange',\r\n help=\"The Party who shipped the freight, eg Exporter\")\r\n notify_party = fields.Text(string='Notify Party',\r\n help=\"The Party who will be notified by Liner when the freight arrived\",\r\n track_visibility='onchange')\r\n carrier_c = fields.Many2one('res.partner', string=\"Carrier\")\r\n consignee = fields.Text(string='Consignee', help=\"The Party who received the freight\", track_visibility='onchange')\r\n routing_instruction = fields.Text(string='For Delivery Of Goods Please Apply To', track_visibility='onchange')\r\n delivery_contact = fields.Text(string='Contact for Delivery', help=\"Contact information for delivery of goods\",\r\n track_visibility='onchange')\r\n unstuff_at = fields.Char(string='Unstuff At', track_visibility='onchange')\r\n\r\n # Shipment Info\r\n voyage_no = fields.Char(string='Voyage No', track_visibility='onchange')\r\n vessel = fields.Char(string='Vessel Name', track_visibility='onchange')\r\n manifest_no = fields.Char(string='Manifest No', track_visibility='onchange')\r\n\r\n port_of_loading_input = fields.Char(string='Port of Loading', track_visibility='onchange')\r\n port_of_discharge_input = fields.Char(string='Port of Discharge', track_visibility='onchange')\r\n port_of_discharge_eta = fields.Date(string='Loading ETA', track_visibility='onchange')\r\n place_of_delivery = fields.Char(string='Final Destination', track_visibility='onchange')\r\n place_of_receipt = fields.Char(string='Place of Receipt', track_visibility='onchange')\r\n pre_carriage_by = fields.Char(string='Pre-Carriage By', track_visibility='onchange')\r\n\r\n # Remark\r\n note = fields.Text(string='Remarks', track_visibility='onchange')\r\n\r\n # System Info\r\n sales_person = fields.Many2one('res.users', string=\"Salesperson\", track_visibility='onchange')\r\n company_id = fields.Many2one('res.company', 'Company', required=True, index=True, readonly=1,\r\n default=lambda self: self.env.user.company_id.id)\r\n # analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic Account\",\r\n # track_visibility='always', copy=False)\r\n\r\n # Line Item\r\n cargo_line_ids = fields.One2many('freight.bol.cargo', 'cargo_line', string=\"Cargo Line\",\r\n copy=True, auto_join=True, track_visibility='always')\r\n charge_line_ids = fields.One2many('freight.bol.charge', 'charge_line', string=\"Charge Line\",\r\n copy=True, auto_join=True, track_visibility='always')\r\n cost_profit_ids = fields.One2many('freight.bol.cost.profit', 'bol_id', string=\"Cost & Profit\",\r\n copy=True, auto_join=True, track_visibility='always')\r\n\r\n # Not Used\r\n invoice_count = fields.Integer(string='Invoice Count', compute='_get_invoiced_count', copy=False)\r\n vendor_bill_count = fields.Integer(string='Vendor Bill Count', compute='_get_bill_count', copy=False)\r\n\r\n si_count = fields.Integer(string='SI Count', compute='_get_si_count', copy=False)\r\n\r\n shipper_c = fields.Many2one('res.partner', string='Shipper')\r\n consignee_c = fields.Many2one('res.partner', string='Consignee Name')\r\n notify_party_c = fields.Many2one('res.partner', string='Notify Party')\r\n\r\n total_no_of_packages_words = fields.Char(string='Total Packages', track_visibility='onchange',\r\n help='Total no of packages or container in Words')\r\n lines_description = fields.Integer()\r\n line_description1 = fields.Text()\r\n line_description2 = fields.Text()\r\n\r\n @api.model\r\n def create(self, vals):\r\n vals['bol_no'] = self.env['ir.sequence'].next_by_code('hbl')\r\n res = super(BillOfLading, self).create(vals)\r\n return res\r\n\r\n @api.multi\r\n def name_get(self):\r\n result = []\r\n for bol in self:\r\n name = str(bol.bol_no)\r\n result.append((bol.id, name))\r\n return result\r\n\r\n @api.multi\r\n def action_send_bl(self):\r\n self.ensure_one()\r\n ir_model_data = self.env['ir.model.data']\r\n try:\r\n template_id = \\\r\n ir_model_data.get_object_reference('sci_goexcel_freight', 'email_template_bol')[1]\r\n except ValueError:\r\n template_id = False\r\n try:\r\n compose_form_id = ir_model_data.get_object_reference('mail', 'email_compose_message_wizard_form')[1]\r\n except ValueError:\r\n compose_form_id = False\r\n\r\n ctx = {\r\n 'default_model': 'freight.bol',\r\n 'default_res_id': self.ids[0],\r\n 'default_use_template': bool(template_id),\r\n 'default_template_id': template_id,\r\n 'default_composition_mode': 'comment',\r\n 'mark_so_as_sent': True,\r\n 'custom_layout': \"mail.mail_notification_light\",\r\n # 'proforma': self.env.context.get('proforma', False),\r\n 'force_email': True\r\n }\r\n # base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')\r\n # ctx['action_url'] = \"{}/web?db={}\".format(base_url, self.env.cr.dbname)\r\n return {\r\n 'type': 'ir.actions.act_window',\r\n 'view_type': 'form',\r\n 'view_mode': 'form',\r\n 'res_model': 'mail.compose.message',\r\n 'views': [(compose_form_id, 'form')],\r\n 'view_id': compose_form_id,\r\n 'target': 'new',\r\n 'context': ctx,\r\n }\r\n\r\n @api.multi\r\n def action_invoice(self):\r\n self.ensure_one()\r\n view = self.env.ref('sci_goexcel_freight.invoice_view_form')\r\n return {\r\n 'name': 'Create Invoice',\r\n 'type': 'ir.actions.act_window',\r\n 'view_type': 'form',\r\n 'view_mode': 'form',\r\n 'res_model': 'invoice.wizard',\r\n 'views': [(view.id, 'form')],\r\n 'view_id': view.id,\r\n 'target': 'new',\r\n\r\n 'context': dict(bl_id=self.id),\r\n }\r\n\r\n @api.multi\r\n def action_create_vendor_bill(self):\r\n # only lines with vendor\r\n vendor_po = self.cost_profit_ids.filtered(lambda c: c.vendor_id)\r\n po_lines = vendor_po.sorted(key=lambda p: p.vendor_id.id)\r\n vendor_count = False\r\n vendor_id = False\r\n if not self.analytic_account_id:\r\n values = {\r\n 'name': '%s' % self.booking_ref.booking_no,\r\n 'partner_id': self.booking_ref.customer_name.id,\r\n 'code': self.bol_no,\r\n 'company_id': self.booking_ref.company_id.id,\r\n }\r\n\r\n analytic_account = self.env['account.analytic.account'].sudo().create(values)\r\n self.booking_ref.write({'analytic_account_id': analytic_account.id})\r\n self.write({'analytic_account_id': analytic_account.id})\r\n for line in po_lines:\r\n if line.vendor_id != vendor_id:\r\n vb = self.env['account.invoice']\r\n vendor_count = True\r\n vendor_id = line.vendor_id\r\n value = []\r\n vendor_bill_created = []\r\n filtered_vb_lines = po_lines.filtered(lambda r: r.vendor_id == vendor_id)\r\n for vb_line in filtered_vb_lines:\r\n if not vb_line.billed:\r\n account_id = False\r\n price_after_converted = vb_line.cost_price * vb_line.cost_currency_rate\r\n if vb_line.product_id.property_account_expense_id:\r\n account_id = vb_line.product_id.property_account_expense_id\r\n elif vb_line.product_id.categ_id.property_account_expense_categ_id:\r\n account_id = vb_line.product_id.categ_id.property_account_expense_categ_id\r\n value.append([0, 0, {\r\n # 'invoice_id': vendor_bill.id or False,\r\n 'account_id': account_id.id or False,\r\n 'name': vb_line.product_id.name or '',\r\n 'product_id': vb_line.product_id.id or False,\r\n 'quantity': vb_line.cost_qty or 0.0,\r\n 'uom_id': vb_line.uom_id.id or False,\r\n 'price_unit': price_after_converted or 0.0,\r\n 'account_analytic_id': self.analytic_account_id.id,\r\n 'bl_line_id': vb_line.id,\r\n }])\r\n vendor_bill_created.append(vb_line)\r\n vb_line.billed = True\r\n # print('vendor_id=' + vendor_id.name)\r\n if value:\r\n vendor_bill_id = vb.create({\r\n 'type': 'in_invoice',\r\n 'invoice_line_ids': value,\r\n 'default_currency_id': self.env.user.company_id.currency_id.id,\r\n 'company_id': self.company_id.id,\r\n 'date_invoice': fields.Date.context_today(self),\r\n 'origin': self.bol_no,\r\n 'partner_id': vendor_id.id,\r\n 'account_id': vb_line.vendor_id.property_account_payable_id.id or False,\r\n 'freight_booking': self.booking_ref.id,\r\n })\r\n for new_vendor_bill in vendor_bill_created:\r\n new_vendor_bill.vendor_bill_id = vendor_bill_id.id\r\n if vendor_count is False:\r\n raise exceptions.ValidationError('No Vendor in Cost & Profit!!!')\r\n\r\n def action_copy_to_booking(self):\r\n booking = self.env['freight.booking'].search([('id', '=', self.booking_ref.id),])\r\n booking_val = {\r\n 'cargo_type': self.cargo_type or False,\r\n 'shipper_address_input': self.shipper,\r\n 'consignee_address_input': self.consignee,\r\n 'notify_party_address_input': self.notify_party,\r\n 'carrier_booking_no' : self.carrier_booking_no or False,\r\n 'voyage_no': self.voyage_no,\r\n 'port_of_loading_input': self.port_of_loading_input,\r\n 'port_of_discharge_input': self.port_of_discharge_input,\r\n 'place_of_delivery': self.place_of_delivery,\r\n 'note': self.note,\r\n 'bol_status': self.bol_status,\r\n 'no_of_original_bl': self.no_of_original_bl,\r\n 'carrier': self.carrier_c.id,\r\n }\r\n booking.sudo().write(booking_val)\r\n for booking_line in booking.operation_line_ids:\r\n booking_line.sudo().unlink()\r\n for booking_line in booking.operation_line_ids2:\r\n booking_line.sudo().unlink()\r\n\r\n for line in self.cargo_line_ids:\r\n if self.cargo_type == 'fcl':\r\n if line.container_product_name:\r\n operation_line_obj = self.env['freight.operations.line']\r\n op_line = operation_line_obj.create({\r\n 'operation_id': booking.id,\r\n 'container_no': line.container_no or '',\r\n 'container_product_id': line.container_product_id.id or False,\r\n 'seal_no': line.seal_no or '',\r\n 'container_product_name': line.container_product_name or '',\r\n 'packages_no': line.packages_no_value or '',\r\n 'packages_no_uom': line.packages_no_uom.id,\r\n 'exp_net_weight': line.exp_net_weight or '',\r\n 'exp_gross_weight': line.exp_gross_weight or '',\r\n 'dim_length': line.dim_length or '',\r\n 'dim_width': line.dim_width or '',\r\n 'dim_height': line.dim_height or '',\r\n 'exp_vol': line.exp_vol or '',\r\n 'remark': line.marks or '',\r\n })\r\n booking.operation_line_ids = op_line\r\n else:\r\n if line.container_product_name:\r\n operation_line_obj = self.env['freight.operations.line2']\r\n op_line = operation_line_obj.create({\r\n 'operation_id2': booking.id,\r\n 'container_no': line.container_no or '',\r\n 'container_product_id': line.container_product_id.id or False,\r\n 'seal_no': line.seal_no or '',\r\n 'container_product_name': line.container_product_name or '',\r\n 'packages_no': line.packages_no_value or '',\r\n 'packages_no_uom': line.packages_no_uom.id,\r\n 'exp_net_weight': line.exp_net_weight or '',\r\n 'exp_gross_weight': line.exp_gross_weight or '',\r\n 'dim_length': line.dim_length or '',\r\n 'dim_width': line.dim_width or '',\r\n 'dim_height': line.dim_height or '',\r\n 'exp_vol': line.exp_vol or '',\r\n 'shipping_mark': line.marks or '',\r\n })\r\n booking.operation_line_ids2 = op_line\r\n\r\n def action_copy_from_booking(self):\r\n booking = self.env['freight.booking'].search([('id', '=', self.booking_ref.id)])\r\n for line in booking.cost_profit_ids:\r\n operation_line_obj = self.env['freight.bol.cost.profit']\r\n op_line = operation_line_obj.create({\r\n 'bol_id': self.id,\r\n 'product_id': line.product_id.id or False,\r\n 'product_name': line.product_name or '',\r\n 'profit_qty': line.profit_qty or 0,\r\n 'list_price': line.list_price or 0,\r\n 'profit_amount': line.profit_amount or 0,\r\n 'profit_currency': line.profit_currency.id or False,\r\n 'profit_currency_rate': line.profit_currency_rate or 0,\r\n 'cost_qty': line.cost_qty or 0,\r\n 'cost_price': line.cost_price or 0,\r\n 'cost_amount': line.cost_amount or 0,\r\n 'vendor_id': line.vendor_id.id or False,\r\n 'cost_currency': line.cost_currency.id or False,\r\n 'cost_currency_rate': line.cost_currency_rate or 0,\r\n })\r\n\r\n def action_create_si(self):\r\n si_obj = self.env['freight.website.si']\r\n si_val = {\r\n 'si_status': '01',\r\n 'carrier': self.carrier_c.id or False,\r\n 'direction': self.direction or False,\r\n 'cargo_type': self.cargo_type or False,\r\n 'service_type': self.service_type or False,\r\n 'customer_name': self.customer_name.id or False,\r\n 'shipper': self.shipper,\r\n 'consignee': self.consignee,\r\n 'notify_party': self.notify_party,\r\n 'carrier_booking_ref': self.carrier_booking_no,\r\n 'voyage_no': self.voyage_no,\r\n 'port_of_loading_input': self.port_of_loading_input,\r\n 'port_of_discharge_input': self.port_of_discharge_input,\r\n 'place_of_delivery': self.place_of_delivery,\r\n 'bl_ref': self.id,\r\n }\r\n si = si_obj.create(si_val)\r\n if self.cargo_type == 'fcl':\r\n container_line = self.cargo_line_ids\r\n si_line_obj = self.env['freight.website.si.fcl']\r\n for line in container_line:\r\n if line.container_product_id or line.container_no:\r\n si_line = si_line_obj.create({\r\n 'container_product_id': line.container_product_id.id or False,\r\n 'container_product_name': line.container_product_name or False,\r\n 'fcl_line': si.id or '',\r\n 'container_no': line.container_no or '',\r\n 'packages_no': line.packages_no_value or 0.0,\r\n 'packages_no_uom': line.packages_no_uom.id,\r\n 'exp_gross_weight': line.exp_gross_weight or 0.0,\r\n 'exp_vol': line.exp_vol or 0.0,\r\n })\r\n si.write({'fcl_line_ids': si_line or False})\r\n else:\r\n container_line = self.cargo_line_ids\r\n si_line_obj = self.env['freight.website.si.lcl']\r\n for line in container_line:\r\n if line.container_product_id or line.container_no:\r\n si_line = si_line_obj.create({\r\n 'container_product_name': line.container_product_name or False,\r\n #'container_product_id': line.container_commodity_id.id or False,\r\n 'lcl_line': si.id or '',\r\n 'container_no': line.container_no or '',\r\n 'packages_no': line.packages_no_value or 0.0,\r\n 'packages_no_uom': line.packages_no_uom.id,\r\n 'exp_gross_weight': line.exp_gross_weight or 0.0,\r\n 'exp_net_weight': line.exp_net_weight or 0.0,\r\n 'exp_vol': line.exp_vol or 0.0,\r\n # 'remark_line': line.remark or '',\r\n })\r\n si.write({'lcl_line_ids': si_line or False})\r\n\r\n @api.multi\r\n def operation_invoices(self):\r\n \"\"\"Show Invoice for specific Freight Operation smart Button.\"\"\"\r\n for operation in self:\r\n invoices = self.env['account.invoice'].search([\r\n ('freight_hbl', '=', operation.id),\r\n ('type', 'in', ['out_invoice', 'out_refund']),\r\n ('state', '!=', 'cancel'),\r\n ])\r\n action = self.env.ref('account.action_invoice_tree1').read()[0]\r\n if len(invoices) > 1:\r\n action['domain'] = [('id', 'in', invoices.ids)]\r\n elif len(invoices) == 1:\r\n action['views'] = [(self.env.ref('account.invoice_form').id, 'form')]\r\n action['res_id'] = invoices.ids[0]\r\n else:\r\n action = {'type': 'ir.actions.act_window_close'}\r\n return action\r\n\r\n @api.multi\r\n def operation_bill(self):\r\n for operation in self:\r\n # Get from the vendor bill list\r\n vendor_bill_list = []\r\n for cost_profit_line in operation.cost_profit_ids:\r\n for vendor_bill_line in cost_profit_line.vendor_bill_ids:\r\n if vendor_bill_line.type in ['in_invoice', 'in_refund']:\r\n vendor_bill_list.append(vendor_bill_line.id)\r\n\r\n invoices = self.env['account.invoice'].search([\r\n ('freight_hbl', '=', operation.id),\r\n ('type', 'in', ['in_invoice', 'in_refund']),\r\n ('state', '!=', 'cancel'),\r\n ])\r\n invoice_name_list = []\r\n for x in invoices:\r\n invoice_name_list.append(x.id)\r\n\r\n unique_list = []\r\n for y in vendor_bill_list:\r\n if invoice_name_list and len(invoice_name_list) > 0:\r\n if y not in invoice_name_list:\r\n unique_list.append(y)\r\n else:\r\n unique_list.append(y)\r\n for z in invoice_name_list:\r\n # if z not in vendor_bill_list:\r\n unique_list.append(z)\r\n\r\n if len(unique_list) > 1:\r\n views = [(self.env.ref('account.invoice_supplier_tree').id, 'tree'),\r\n (self.env.ref('account.invoice_supplier_form').id, 'form')]\r\n return {\r\n 'name': 'Vendor bills',\r\n 'view_type': 'form',\r\n 'view_mode': 'tree,form',\r\n # 'view_id': self.env.ref('account.invoice_supplier_tree').id,\r\n 'view_id': False,\r\n 'res_model': 'account.invoice',\r\n 'views': views,\r\n # 'context': \"{'type':'in_invoice'}\",\r\n 'domain': [('id', 'in', unique_list)],\r\n 'type': 'ir.actions.act_window',\r\n # 'target': 'new',\r\n }\r\n elif len(unique_list) == 1:\r\n # print('in vendor bill length =1')\r\n return {\r\n # 'name': self.booking_no,\r\n 'view_type': 'form',\r\n 'view_mode': 'form',\r\n 'res_model': 'account.invoice',\r\n 'res_id': unique_list[0] or False, # readonly mode\r\n # 'domain': [('id', 'in', purchase_order.ids)],\r\n 'type': 'ir.actions.act_window',\r\n 'target': 'popup', # readonly mode\r\n }\r\n\r\n @api.multi\r\n def operation_si(self):\r\n for operation in self:\r\n si = self.env['freight.website.si'].search([('bl_ref', '=', operation.id), ])\r\n if len(si) > 1:\r\n views = [(self.env.ref('sci_goexcel_freight.view_tree_si').id, 'tree'),\r\n (self.env.ref('sci_goexcel_freight.view_form_si').id, 'form')]\r\n return {\r\n 'name': 'Shipping Instruction',\r\n 'view_type': 'form',\r\n 'view_mode': 'tree,form',\r\n 'view_id': False,\r\n 'res_model': 'freight.website.si',\r\n 'views': views,\r\n 'domain': [('id', 'in', si.ids)],\r\n 'type': 'ir.actions.act_window',\r\n }\r\n elif len(si) == 1:\r\n return {\r\n 'view_type': 'form',\r\n 'view_mode': 'form',\r\n 'res_model': 'freight.website.si',\r\n 'res_id': si.id or False,\r\n 'type': 'ir.actions.act_window',\r\n 'target': 'popup', # readonly mode\r\n }\r\n else:\r\n action = {'type': 'ir.actions.act_window_close'}\r\n return action\r\n\r\n def _get_invoiced_count(self):\r\n for operation in self:\r\n invoices = self.env['account.invoice'].search([\r\n ('freight_hbl', '=', operation.id),\r\n ('type', 'in', ['out_invoice','out_refund']),\r\n ('state', '!=', 'cancel'),\r\n ])\r\n\r\n self.update({\r\n 'invoice_count': len(invoices),\r\n #'invoice_ids': invoices,\r\n })\r\n\r\n def _get_bill_count(self):\r\n # vendor bill is created from booking job, vendor bill header will have the booking job id\r\n for operation in self:\r\n # Get from the vendor bill list\r\n vendor_bill_list = []\r\n # vendor_bill_list_temp = []\r\n for cost_profit_line in operation.cost_profit_ids:\r\n for vendor_bill_line in cost_profit_line.vendor_bill_ids:\r\n if vendor_bill_line.type in ['in_invoice', 'in_refund']:\r\n vendor_bill_list.append(vendor_bill_line.id)\r\n # vendor_bill_list_temp.append(vendor_bill_line.id)\r\n # print('vendor_bill_list: ', len(vendor_bill_list))\r\n # remove the duplicates in the vendor bill list\r\n unique_vendor_bill_list = []\r\n for i in vendor_bill_list:\r\n if i not in unique_vendor_bill_list:\r\n unique_vendor_bill_list.append(i)\r\n # print('unique_vendor_bill_list: ', len(unique_vendor_bill_list))\r\n # Get the vendor list (Create the vendor from the job)\r\n invoices = self.env['account.invoice'].search([\r\n ('freight_hbl', '=', operation.id),\r\n ('type', 'in', ['in_invoice', 'in_refund']),\r\n ('state', '!=', 'cancel'),\r\n ])\r\n # print('vendor bills:', len(invoices))\r\n invoice_name_list = []\r\n for x in invoices:\r\n invoice_name_list.append(x.id)\r\n unique_list = []\r\n # for x in invoices:\r\n # invoice_name_list.append(x.vendor_bill_id.id)\r\n # unique_list = []\r\n for y in unique_vendor_bill_list:\r\n if invoice_name_list and len(invoice_name_list) > 0:\r\n if y not in invoice_name_list:\r\n unique_list.append(y)\r\n else:\r\n unique_list.append(y)\r\n for z in invoice_name_list:\r\n # if z not in vendor_bill_list:\r\n unique_list.append(z)\r\n if len(unique_list) > 0:\r\n self.update({\r\n 'vendor_bill_count': len(unique_list),\r\n })\r\n\r\n def _get_si_count(self):\r\n for operation in self:\r\n si = self.env['freight.website.si'].search([\r\n ('bl_ref', '=', operation.id),\r\n ])\r\n\r\n self.update({\r\n 'si_count': len(si),\r\n })\r\n\r\n # TS - add for Purchase Receipt\r\n purchase_receipt_count = fields.Integer(string='Purchase Receipt Count', compute='_get_pr_count', copy=False)\r\n\r\n def _get_pr_count(self):\r\n # get purchase receipt (Account Voucher) on the lines\r\n for operation in self:\r\n # Get PR list\r\n pr_lines = self.env['account.voucher.line'].search([\r\n ('freight_hbl', '=', operation.id),\r\n ])\r\n pr_list = []\r\n for pr_line in pr_lines:\r\n if pr_line.voucher_id.state != 'cancel' and pr_line.voucher_id.voucher_type == 'purchase':\r\n pr_list.append(pr_line.voucher_id.id)\r\n # pr_name_list = []\r\n # for x in pr_list:\r\n # pr_name_list.append(x.id)\r\n unique_list = []\r\n for i in pr_list:\r\n if i not in unique_list:\r\n unique_list.append(i)\r\n\r\n if len(unique_list) > 0:\r\n self.update({\r\n 'purchase_receipt_count': len(unique_list),\r\n })\r\n\r\n @api.multi\r\n def operation_pr(self):\r\n for operation in self:\r\n for operation in self:\r\n # Get PR list\r\n pr_lines = self.env['account.voucher.line'].search([\r\n ('freight_hbl', '=', operation.id),\r\n ])\r\n pr_list = []\r\n for pr_line in pr_lines:\r\n if pr_line.voucher_id.state != 'cancel' and pr_line.voucher_id.voucher_type == 'purchase':\r\n pr_list.append(pr_line.voucher_id.id)\r\n # pr_name_list = []\r\n # for x in pr_list:\r\n # pr_name_list.append(x.id)\r\n unique_list = []\r\n for i in pr_list:\r\n if i not in unique_list:\r\n unique_list.append(i)\r\n\r\n if len(unique_list) > 1:\r\n views = [(self.env.ref('account_voucher.view_voucher_tree').id, 'tree'),\r\n (self.env.ref('account_voucher.view_purchase_receipt_form').id, 'form')]\r\n return {\r\n 'name': 'Purchase Receipt',\r\n 'view_type': 'form',\r\n 'view_mode': 'tree,form',\r\n # 'view_id': self.env.ref('account.invoice_supplier_tree').id,\r\n 'view_id': False,\r\n 'res_model': 'account.voucher',\r\n 'views': views,\r\n # 'context': \"{'type':'in_invoice'}\",\r\n 'domain': [('id', 'in', unique_list)],\r\n 'type': 'ir.actions.act_window',\r\n # 'target': 'new',\r\n }\r\n elif len(unique_list) == 1:\r\n # print('in vendor bill length =1')\r\n return {\r\n # 'name': self.booking_no,\r\n 'view_type': 'form',\r\n 'view_mode': 'form',\r\n 'res_model': 'account.voucher',\r\n 'res_id': unique_list[0] or False, # readonly mode\r\n # 'domain': [('id', 'in', purchase_order.ids)],\r\n 'type': 'ir.actions.act_window',\r\n 'target': 'popup', # readonly mode\r\n }\r\n\r\n\r\nclass CargoLine(models.Model):\r\n _name = 'freight.bol.cargo'\r\n _description = 'Cargo Line'\r\n\r\n cargo_line = fields.Many2one('freight.bol', string='Cargo Line', required=True, ondelete='cascade',\r\n index=True, copy=False)\r\n sequence = fields.Integer(string=\"sequence\")\r\n marks = fields.Text(string='Marks and Numbers')\r\n container_no = fields.Char(string=\"Container No.\")\r\n container_product_id = fields.Many2one('product.product', string='Container', track_visibility='onchange')\r\n seal_no = fields.Char(string=\"Seal No.\")\r\n container_product_name = fields.Text(string='Description of Goods')\r\n packages_no_value = fields.Integer(string=\"No. of Packages\")\r\n packages_no_uom = fields.Many2one('uom.uom', string=\"UoM\")\r\n exp_net_weight = fields.Float(string=\"Net Weight(KG)\", help=\"Expected Weight in kg.\", track_visibility='onchange')\r\n exp_gross_weight = fields.Float(string=\"Gross Weight(KG)\", digits=(12, 4), help=\"Expected Weight in kg.\")\r\n\r\n dim_length = fields.Float(string='Length', help=\"Length in cm\", default=\"0.00\", track_visibility='onchange')\r\n dim_width = fields.Float(string='Width', default=\"0.00\", help=\"Width in cm\", track_visibility='onchange')\r\n dim_height = fields.Float(string='Height', default=\"0.00\", help=\"Height in cm\", track_visibility='onchange')\r\n exp_vol = fields.Float(string=\"Measurement (M3)\", digits=(12, 4),\r\n help=\"Expected Volume in m3 Measure\")\r\n packages_no = fields.Char(string=\"No. of Packages\")\r\n\r\n @api.multi\r\n def _get_default_container_category(self):\r\n container_lines = self.env['freight.product.category'].search([('type', '=ilike', 'container')])\r\n for container_line in container_lines:\r\n # _logger.warning('_get_default_container_category=' + str(container_line.product_category))\r\n return container_line.product_category\r\n\r\n container_category_id = fields.Many2one('product.category', string=\"Container Product Id\",\r\n default=_get_default_container_category)\r\n\r\n\r\n @api.onchange('container_product_name')\r\n def _onchange_description(self):\r\n bl = self.env['freight.bol'].search([('bol_no', '=', self.cargo_line.bol_no)])\r\n if self.container_product_name:\r\n lines_description = self.container_product_name.count('\\n')/20\r\n lines_description = math.ceil(lines_description)\r\n x = self.container_product_name.split('\\n')\r\n count = 0\r\n line_description1 = ''\r\n line_description2 = ''\r\n for line in x:\r\n if count < 20:\r\n line_description1 = line_description1 + line + '\\n'\r\n count = count + 1\r\n else:\r\n line_description2 = line_description2 + line + '\\n'\r\n count = count + 1\r\n bl.write({'lines_description': lines_description,\r\n 'line_description1': line_description1,\r\n 'line_description2': line_description2,\r\n })\r\n\r\n @api.model\r\n def create(self, vals):\r\n # _logger.warning(\"in create\")\r\n res = super(CargoLine, self).create(vals)\r\n content = \"\"\r\n if vals.get(\"marks\"):\r\n content = content + \" \\u2022 Marks and Numbers: \" + str(vals.get(\"marks\")) + \"<br/>\"\r\n if vals.get(\"container_product_name\"):\r\n content = content + \" \\u2022 Description of Goods: \" + str(vals.get(\"container_product_name\")) + \"<br/>\"\r\n if vals.get(\"packages_no\"):\r\n content = content + \" \\u2022 No. of Packages: \" + str(vals.get(\"packages_no\")) + \"<br/>\"\r\n if vals.get(\"seal_no\"):\r\n content = content + \" \\u2022 Seal no: \" + str(vals.get(\"seal_no\")) + \"<br/>\"\r\n if vals.get(\"container_no\"):\r\n content = content + \" \\u2022 Container No.: \" + str(vals.get(\"container_no\")) + \"<br/>\"\r\n if vals.get(\"exp_gross_weight\"):\r\n content = content + \" \\u2022 Gross Weight(KG): \" + str(vals.get(\"exp_gross_weight\")) + \"<br/>\"\r\n if vals.get(\"exp_vol\"):\r\n content = content + \" \\u2022 Measurement (M3): \" + str(vals.get(\"exp_vol\")) + \"<br/>\"\r\n res.cargo_line.message_post(body=content)\r\n\r\n return res\r\n\r\n @api.multi\r\n def write(self, vals):\r\n # _logger.warning(\"in write\")\r\n res = super(CargoLine, self).write(vals)\r\n # _logger.warning(\"after super write\")\r\n content = \"\"\r\n if vals.get(\"marks\"):\r\n content = content + \" \\u2022 Marks and Numbers: \" + str(vals.get(\"marks\")) + \"<br/>\"\r\n if vals.get(\"container_product_name\"):\r\n content = content + \" \\u2022 Description of Goods: \" + str(vals.get(\"container_product_name\")) + \"<br/>\"\r\n if vals.get(\"packages_no\"):\r\n content = content + \" \\u2022 No. of Packages: \" + str(vals.get(\"packages_no\")) + \"<br/>\"\r\n if vals.get(\"seal_no\"):\r\n content = content + \" \\u2022 Seal no: \" + str(vals.get(\"seal_no\")) + \"<br/>\"\r\n if vals.get(\"container_no\"):\r\n content = content + \" \\u2022 Container No.: \" + str(vals.get(\"container_no\")) + \"<br/>\"\r\n if vals.get(\"exp_gross_weight\"):\r\n content = content + \" \\u2022 Gross Weight(KG): \" + str(vals.get(\"exp_gross_weight\")) + \"<br/>\"\r\n if vals.get(\"exp_vol\"):\r\n content = content + \" \\u2022 Measurement (M3): \" + str(vals.get(\"exp_vol\")) + \"<br/>\"\r\n self.cargo_line.message_post(body=content)\r\n\r\n return res\r\n\r\n\r\nclass ChargeLine(models.Model):\r\n _name = 'freight.bol.charge'\r\n _description = 'Charge Line'\r\n\r\n charge_line = fields.Many2one('freight.bol', string='Charge Line', required=True, ondelete='cascade',\r\n index=True, copy=False)\r\n sequence = fields.Integer(string=\"sequence\")\r\n freight_charges = fields.Text(string='Freight & Charges')\r\n rate = fields.Char(string='Rate')\r\n per = fields.Char(string=\"Per\")\r\n amount = fields.Char(string=\"Amount\")\r\n prepaid = fields.Char(string=\"Prepaid\")\r\n collect = fields.Char(string=\"Collect\")\r\n payable_at_by = fields.Char(string=\"Payable at/by\")\r\n # fcl_container_qty = fields.Float(string=\"Qty\", digits=(8, 0), track_visibility='onchange')\r\n\r\n revenue_tons = fields.Char(string='Revenue Tons')\r\n\r\n @api.model\r\n def create(self, vals):\r\n # _logger.warning(\"in create\")\r\n res = super(ChargeLine, self).create(vals)\r\n content = \"\"\r\n if vals.get(\"freight_charges\"):\r\n content = content + \" \\u2022 Freight & Charges: \" + str(vals.get(\"freight_charges\")) + \"<br/>\"\r\n if vals.get(\"revenue_tons\"):\r\n content = content + \" \\u2022 Revenue Tons: \" + str(vals.get(\"revenue_tons\")) + \"<br/>\"\r\n if vals.get(\"rate\"):\r\n content = content + \" \\u2022 Rate: \" + str(vals.get(\"rate\")) + \"<br/>\"\r\n if vals.get(\"per\"):\r\n content = content + \" \\u2022 Per: \" + str(vals.get(\"per\")) + \"<br/>\"\r\n if vals.get(\"amount\"):\r\n content = content + \" \\u2022 Amount: \" + str(vals.get(\"amount\")) + \"<br/>\"\r\n if vals.get(\"prepaid\"):\r\n content = content + \" \\u2022 Prepaid: \" + str(vals.get(\"prepaid\")) + \"<br/>\"\r\n if vals.get(\"collect\"):\r\n content = content + \" \\u2022 Collect: \" + str(vals.get(\"collect\")) + \"<br/>\"\r\n if vals.get(\"payable_at_by\"):\r\n content = content + \" \\u2022 Payable at/by: \" + str(vals.get(\"payable_at_by\")) + \"<br/>\"\r\n res.charge_line.message_post(body=content)\r\n\r\n return res\r\n\r\n @api.multi\r\n def write(self, vals):\r\n # _logger.warning(\"in write\")\r\n res = super(ChargeLine, self).write(vals)\r\n # _logger.warning(\"after super write\")\r\n content = \"\"\r\n if vals.get(\"freight_charges\"):\r\n content = content + \" \\u2022 Freight & Charges: \" + str(vals.get(\"freight_charges\")) + \"<br/>\"\r\n if vals.get(\"revenue_tons\"):\r\n content = content + \" \\u2022 Revenue Tons: \" + str(vals.get(\"revenue_tons\")) + \"<br/>\"\r\n if vals.get(\"rate\"):\r\n content = content + \" \\u2022 Rate: \" + str(vals.get(\"rate\")) + \"<br/>\"\r\n if vals.get(\"per\"):\r\n content = content + \" \\u2022 Per: \" + str(vals.get(\"per\")) + \"<br/>\"\r\n if vals.get(\"amount\"):\r\n content = content + \" \\u2022 Amount: \" + str(vals.get(\"amount\")) + \"<br/>\"\r\n if vals.get(\"prepaid\"):\r\n content = content + \" \\u2022 Prepaid: \" + str(vals.get(\"prepaid\")) + \"<br/>\"\r\n if vals.get(\"collect\"):\r\n content = content + \" \\u2022 Collect: \" + str(vals.get(\"collect\")) + \"<br/>\"\r\n if vals.get(\"payable_at_by\"):\r\n content = content + \" \\u2022 Payable at/by: \" + str(vals.get(\"payable_at_by\")) + \"<br/>\"\r\n self.charge_line.message_post(body=content)\r\n\r\n return res\r\n\r\n\r\nclass CostProfit(models.Model):\r\n _name = 'freight.bol.cost.profit'\r\n _description = \"BOL Cost & Profit\"\r\n\r\n sequence = fields.Integer(string=\"sequence\")\r\n bol_id = fields.Many2one('freight.bol', string='BOL ID', required=True, ondelete='cascade',\r\n index=True, copy=False)\r\n product_id = fields.Many2one('product.product', string=\"Product\")\r\n product_name = fields.Text(string=\"Description\")\r\n\r\n #Profit\r\n #profit_qty = fields.Integer(string='Qty', default=\"1\")\r\n #profit_qty = fields.Float(string='Qty', default=\"1\", digits=(12, 2))\r\n list_price = fields.Float(string=\"Unit Price\")\r\n uom_id = fields.Many2one('uom.uom', string=\"UoM\")\r\n profit_gst = fields.Selection([('zer', 'ZER')], string=\"GST\", default=\"zer\", track_visibility='onchange')\r\n tax_id = fields.Many2many('account.tax', string='Taxes', domain=['|', ('active', '=', False), ('active', '=', True)])\r\n profit_currency = fields.Many2one('res.currency', 'Currency',\r\n default=lambda self: self.env.user.company_id.currency_id.id, track_visibility='onchange')\r\n profit_currency_rate = fields.Float(string='Rate', default=\"1.00\", track_visibility='onchange')\r\n profit_amount = fields.Float(string=\"Amt\", compute=\"_compute_profit_amount\", store=True, track_visibility='onchange')\r\n sale_total = fields.Float(string=\"Total Sales\", compute=\"_compute_sale_total\", store=True, track_visibility='onchange')\r\n\r\n #Cost\r\n #cost_qty = fields.Integer(string='Qty', default=\"1\", track_visibility='onchange')\r\n profit_qty = fields.Float(string='Qty', default=\"1.000\", digit=(12, 3))\r\n cost_qty = fields.Float(string='Qty', default=\"1.000\", digit=(12, 3))\r\n cost_price = fields.Float(string=\"Unit Price\", track_visibility='onchange')\r\n cost_gst = fields.Selection([('zer', 'ZER')], string=\"Tax\", default=\"zer\", track_visibility='onchange')\r\n vendor_id = fields.Many2one('res.partner', string=\"Vendor\", track_visibility='onchange')\r\n vendor_bill_id = fields.Many2one('account.invoice', string=\"Vendor Bill\")\r\n cost_currency = fields.Many2one('res.currency', string=\"Curr\", required=True,\r\n default=lambda self: self.env.user.company_id.currency_id.id, track_visibility='onchange')\r\n cost_currency_rate = fields.Float(string='Rate', default=\"1.00\", track_visibility='onchange')\r\n cost_amount = fields.Float(string=\"Amt\",\r\n compute=\"_compute_cost_amount\", store=True, track_visibility='onchange')\r\n cost_total = fields.Float(string=\"Total Cost\",\r\n compute=\"_compute_cost_total\", store=True, track_visibility='onchange')\r\n\r\n # Invoice & Bill\r\n billed = fields.Boolean(string='Billed', copy=False)\r\n is_billed = fields.Char('Is Biiled?', compute='_compute_is_billed', store=True)\r\n\r\n added_to_invoice = fields.Boolean(string='Invoiced', copy=False)\r\n invoice_paid = fields.Boolean(string='Invoice Paid', copy=False)\r\n\r\n paid = fields.Boolean(string='Paid', copy=False)\r\n is_paid = fields.Char('Is Paid?', compute='_compute_is_paid', store=True)\r\n\r\n invoice_id = fields.Many2one('account.invoice', string=\"Invoice\")\r\n inv_line_id = fields.Many2one('account.invoice.line', string=\"Invoice Line\")\r\n\r\n bill_id = fields.Many2one('account.invoice', string=\"Bill\")\r\n bill_line_id = fields.Many2one('account.invoice.line', string=\"Bill Line\")\r\n\r\n route_service = fields.Boolean(string='Is Route Service', default=False)\r\n\r\n profit_total = fields.Float(string=\"Total Profit\", compute=\"_compute_profit_total\", store=True)\r\n margin_total = fields.Float(string=\"Margin %\", compute=\"_compute_margin_total\", digits=(8,2), store=True, group_operator=\"avg\")\r\n vendor_id_ids = fields.Many2many('res.partner', string=\"Vendor List\", copy=False)\r\n vendor_bill_ids = fields.Many2many('account.invoice', string=\"Vendor Bill List\", copy=False)\r\n\r\n\r\n @api.one\r\n def _set_access_for_invoiced(self):\r\n if self.env['res.users'].has_group('account.group_account_manager'):\r\n self.invoiced_readonly = False\r\n else:\r\n self.invoiced_readonly = True\r\n\r\n invoiced_readonly = fields.Boolean(compute=\"_set_access_for_invoiced\",\r\n string='Is user able to modify invoiced?')\r\n\r\n\r\n @api.depends('profit_qty', 'list_price')\r\n def _compute_profit_amount(self):\r\n for service in self:\r\n if service.product_id:\r\n service.profit_amount = service.profit_qty * service.list_price or 0.0\r\n\r\n @api.depends('cost_qty', 'cost_price')\r\n def _compute_cost_amount(self):\r\n for service in self:\r\n if service.product_id:\r\n service.cost_amount = service.cost_qty * service.cost_price or 0.0\r\n\r\n @api.depends('profit_amount', 'profit_currency_rate')\r\n def _compute_sale_total(self):\r\n for service in self:\r\n if service.product_id:\r\n service.sale_total = service.profit_amount * service.profit_currency_rate or 0.0\r\n\r\n @api.onchange('profit_currency_rate')\r\n def _onchange_profit_currency_rate(self):\r\n for service in self:\r\n if service.product_id:\r\n service.sale_total = service.profit_amount * service.profit_currency_rate or 0.0\r\n\r\n @api.onchange('profit_amount')\r\n def _onchange_profit_amount(self):\r\n for service in self:\r\n if service.product_id:\r\n service.sale_total = service.profit_amount * service.profit_currency_rate or 0.0\r\n service.profit_total = service.sale_total - service.cost_total or 0.0\r\n\r\n @api.depends('cost_amount', 'cost_currency_rate')\r\n def _compute_cost_total(self):\r\n for service in self:\r\n if service.product_id:\r\n service.cost_total = service.cost_amount * service.cost_currency_rate or 0.0\r\n service.profit_total = service.sale_total - service.cost_total or 0.0\r\n\r\n @api.depends('cost_total', 'sale_total')\r\n def _compute_profit_total(self):\r\n for service in self:\r\n if service.product_id:\r\n service.profit_total = service.sale_total - service.cost_total or 0.0\r\n\r\n @api.depends('profit_total', 'sale_total')\r\n def _compute_margin_total(self):\r\n for service in self:\r\n if service.product_id:\r\n if service.sale_total > 0:\r\n service.margin_total = service.profit_total / service.sale_total * 100\r\n\r\n @api.onchange('cost_amount')\r\n def _onchange_cost_amount(self):\r\n for service in self:\r\n if service.product_id:\r\n service.cost_total = service.cost_amount * service.cost_currency_rate or 0.0\r\n service.profit_total = service.sale_total - service.cost_total or 0.0\r\n\r\n @api.onchange('cost_currency_rate')\r\n def _onchange_cost_currency_rate(self):\r\n for service in self:\r\n if service.product_id:\r\n service.cost_total = service.cost_amount * service.cost_currency_rate or 0.0\r\n service.profit_total = service.sale_total - service.cost_total or 0.0\r\n\r\n @api.onchange('product_id')\r\n def _onchange_product_id(self):\r\n if not self.product_id:\r\n return {'domain': {'uom_id': []}}\r\n\r\n vals = {}\r\n domain = {'uom_id': [('category_id', '=', self.product_id.uom_id.category_id.id)]}\r\n if not self.uom_id or (self.product_id.uom_id.id != self.uom_id.id):\r\n vals['uom_id'] = self.product_id.uom_id\r\n vals['product_name'] = self.product_id.name\r\n\r\n self.update(vals)\r\n\r\n if self.product_id:\r\n self.update({\r\n 'list_price': self.product_id.list_price or 0.0,\r\n 'cost_price': self.product_id.standard_price or 0.0\r\n })\r\n\r\n @api.onchange('vendor_id')\r\n def _onchange_vendor_id(self):\r\n print('OnChange Vendor_ID')\r\n if self.vendor_id:\r\n if not self.billed:\r\n self.billed = False\r\n print('Invoiced False')\r\n\r\n\r\n @api.multi\r\n @api.depends('billed')\r\n def _compute_is_billed(self):\r\n for cost_profit_line in self:\r\n if cost_profit_line.vendor_id:\r\n if cost_profit_line.billed:\r\n cost_profit_line.is_billed = 'Y'\r\n elif not cost_profit_line.billed:\r\n cost_profit_line.is_billed = 'N'\r\n\r\n @api.multi\r\n @api.depends('paid')\r\n def _compute_is_paid(self):\r\n for cost_profit_line in self:\r\n if cost_profit_line.vendor_id:\r\n if cost_profit_line.paid:\r\n cost_profit_line.is_paid = 'Y'\r\n elif not cost_profit_line.paid:\r\n cost_profit_line.is_paid = 'N'\r\n",
"step-ids": [
30,
32,
38,
47,
49
]
}
|
[
30,
32,
38,
47,
49
] |
import pandas as pd
import numpy as np
import geopandas as gp
from sys import argv
import os
import subprocess
n, e, s, w = map(int, argv[1:5])
output_dir = argv[5]
print(f'{(n, e, s, w)=}')
for lat in range(s, n + 1):
for lon in range(w, e + 1):
latdir = 'n' if lat >= 0 else 's'
londir = 'e' if lon >= 0 else 'w'
fname = f'{latdir}{abs(lat):02d}{londir}{abs(lon):03d}'
print(fname)
url = f'https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/13/TIFF/{fname}/USGS_13_{fname}.tif'
print(url)
outf = os.path.join(output_dir, f'{fname}.tif')
subprocess.run(['curl', '--output', outf, url])
|
normal
|
{
"blob_id": "9f36b846619ca242426041f577ab7d9e4dad6a43",
"index": 3797,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f'(n, e, s, w)={n, e, s, w!r}')\nfor lat in range(s, n + 1):\n for lon in range(w, e + 1):\n latdir = 'n' if lat >= 0 else 's'\n londir = 'e' if lon >= 0 else 'w'\n fname = f'{latdir}{abs(lat):02d}{londir}{abs(lon):03d}'\n print(fname)\n url = (\n f'https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/13/TIFF/{fname}/USGS_13_{fname}.tif'\n )\n print(url)\n outf = os.path.join(output_dir, f'{fname}.tif')\n subprocess.run(['curl', '--output', outf, url])\n",
"step-3": "<mask token>\nn, e, s, w = map(int, argv[1:5])\noutput_dir = argv[5]\nprint(f'(n, e, s, w)={n, e, s, w!r}')\nfor lat in range(s, n + 1):\n for lon in range(w, e + 1):\n latdir = 'n' if lat >= 0 else 's'\n londir = 'e' if lon >= 0 else 'w'\n fname = f'{latdir}{abs(lat):02d}{londir}{abs(lon):03d}'\n print(fname)\n url = (\n f'https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/13/TIFF/{fname}/USGS_13_{fname}.tif'\n )\n print(url)\n outf = os.path.join(output_dir, f'{fname}.tif')\n subprocess.run(['curl', '--output', outf, url])\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport geopandas as gp\nfrom sys import argv\nimport os\nimport subprocess\nn, e, s, w = map(int, argv[1:5])\noutput_dir = argv[5]\nprint(f'(n, e, s, w)={n, e, s, w!r}')\nfor lat in range(s, n + 1):\n for lon in range(w, e + 1):\n latdir = 'n' if lat >= 0 else 's'\n londir = 'e' if lon >= 0 else 'w'\n fname = f'{latdir}{abs(lat):02d}{londir}{abs(lon):03d}'\n print(fname)\n url = (\n f'https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/13/TIFF/{fname}/USGS_13_{fname}.tif'\n )\n print(url)\n outf = os.path.join(output_dir, f'{fname}.tif')\n subprocess.run(['curl', '--output', outf, url])\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport geopandas as gp\nfrom sys import argv\nimport os\nimport subprocess\n\nn, e, s, w = map(int, argv[1:5])\noutput_dir = argv[5]\n\nprint(f'{(n, e, s, w)=}')\n\nfor lat in range(s, n + 1):\n for lon in range(w, e + 1):\n latdir = 'n' if lat >= 0 else 's'\n londir = 'e' if lon >= 0 else 'w'\n fname = f'{latdir}{abs(lat):02d}{londir}{abs(lon):03d}'\n print(fname)\n\n url = f'https://prd-tnm.s3.amazonaws.com/StagedProducts/Elevation/13/TIFF/{fname}/USGS_13_{fname}.tif'\n\n print(url)\n\n outf = os.path.join(output_dir, f'{fname}.tif')\n\n subprocess.run(['curl', '--output', outf, url])\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
s=int(input())
print(s+2-(s%2))
|
normal
|
{
"blob_id": "0412369f89842e2f55aa115e63f46a1b71a0f322",
"index": 2685,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(s + 2 - s % 2)\n",
"step-3": "s = int(input())\nprint(s + 2 - s % 2)\n",
"step-4": "s=int(input())\nprint(s+2-(s%2))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Generated by Django 3.1.1 on 2020-10-07 04:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('articals', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='artical',
name='thumb',
field=models.ImageField(blank=True, default='default.png', upload_to='media/'),
),
]
|
normal
|
{
"blob_id": "d69bffb85d81ab3969bfe7dfe2759fa809890208",
"index": 503,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('articals', '0001_initial')]\n operations = [migrations.AddField(model_name='artical', name='thumb',\n field=models.ImageField(blank=True, default='default.png',\n upload_to='media/'))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('articals', '0001_initial')]\n operations = [migrations.AddField(model_name='artical', name='thumb',\n field=models.ImageField(blank=True, default='default.png',\n upload_to='media/'))]\n",
"step-5": "# Generated by Django 3.1.1 on 2020-10-07 04:04\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('articals', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='artical',\n name='thumb',\n field=models.ImageField(blank=True, default='default.png', upload_to='media/'),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# flush in poker
def IsContinuous(numbers):
if not numbers or len(numbers) < 1 :
return False
numbers.sort()
number_of_zero = 0
number_of_gap = 0
for i in range(len(numbers)):
if numbers[i] == 0:
number_of_zero += 1
small = number_of_zero
big = small + 1
while(big < len(numbers)):
if numbers[small] == numbers[big]:
return False
number_of_gap += (numbers[big] - numbers[small] - 1)
small = big
big += 1
if number_of_gap <= number_of_zero:
return True
else:
return False
|
normal
|
{
"blob_id": "68a776d7fccc8d8496a944baff51d2a862fc7d31",
"index": 1259,
"step-1": "<mask token>\n",
"step-2": "def IsContinuous(numbers):\n if not numbers or len(numbers) < 1:\n return False\n numbers.sort()\n number_of_zero = 0\n number_of_gap = 0\n for i in range(len(numbers)):\n if numbers[i] == 0:\n number_of_zero += 1\n small = number_of_zero\n big = small + 1\n while big < len(numbers):\n if numbers[small] == numbers[big]:\n return False\n number_of_gap += numbers[big] - numbers[small] - 1\n small = big\n big += 1\n if number_of_gap <= number_of_zero:\n return True\n else:\n return False\n",
"step-3": "# flush in poker\ndef IsContinuous(numbers):\n if not numbers or len(numbers) < 1 :\n return False\n\n numbers.sort()\n number_of_zero = 0\n number_of_gap = 0\n for i in range(len(numbers)):\n if numbers[i] == 0:\n number_of_zero += 1\n\n small = number_of_zero\n big = small + 1\n while(big < len(numbers)):\n if numbers[small] == numbers[big]:\n return False\n\n number_of_gap += (numbers[big] - numbers[small] - 1)\n small = big\n big += 1\n\n if number_of_gap <= number_of_zero:\n return True\n else:\n return False\n\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import typ
@typ.typ(items=[int])
def gnome_sort(items):
"""
>>> gnome_sort([])
[]
>>> gnome_sort([1])
[1]
>>> gnome_sort([2,1])
[1, 2]
>>> gnome_sort([1,2])
[1, 2]
>>> gnome_sort([1,2,2])
[1, 2, 2]
"""
i = 0
n = len(items)
while i < n:
if i and items[i] < items[i - 1]:
items[i], items[i - 1] = items[i - 1], items[i]
i -= 1
else:
i += 1
return items
|
normal
|
{
"blob_id": "70aba6c94b7050113adf7ae48bd4e13aa9a34587",
"index": 1023,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected](items=[int])\ndef gnome_sort(items):\n \"\"\"\n >>> gnome_sort([])\n []\n >>> gnome_sort([1])\n [1]\n >>> gnome_sort([2,1])\n [1, 2]\n >>> gnome_sort([1,2])\n [1, 2]\n >>> gnome_sort([1,2,2])\n [1, 2, 2]\n \"\"\"\n i = 0\n n = len(items)\n while i < n:\n if i and items[i] < items[i - 1]:\n items[i], items[i - 1] = items[i - 1], items[i]\n i -= 1\n else:\n i += 1\n return items\n",
"step-3": "import typ\n\n\[email protected](items=[int])\ndef gnome_sort(items):\n \"\"\"\n >>> gnome_sort([])\n []\n >>> gnome_sort([1])\n [1]\n >>> gnome_sort([2,1])\n [1, 2]\n >>> gnome_sort([1,2])\n [1, 2]\n >>> gnome_sort([1,2,2])\n [1, 2, 2]\n \"\"\"\n i = 0\n n = len(items)\n while i < n:\n if i and items[i] < items[i - 1]:\n items[i], items[i - 1] = items[i - 1], items[i]\n i -= 1\n else:\n i += 1\n return items\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import json
from constants import *
from coattention_layer import *
from prepare_generator import *
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping
def coattention(num_embeddings):
image_input = Input(shape=(196, 512))
question_input = Input(shape=(SEQ_LENGTH,))
output = CoattentionModel(num_embeddings)(question_input, image_input)
model = Model(inputs=[question_input, image_input], outputs=output)
return model
def scheduler(epoch):
if epoch < 10:
return 0.0001
else:
return 0.0001 * tf.math.exp(0.1 * (10 - epoch))
def Train(dataset=True):
train_generator, val_generator, val_question_ids, VOCAB_SIZE = get_generator(
dataset)
save_config(dataset)
checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',
save_weights_only=True,
verbose=1)
scheduler_lr = LearningRateScheduler(scheduler, verbose=0)
earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)
model = coattention(VOCAB_SIZE)
model.compile(optimizer=Adam(learning_rate=LR),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
# Save the weights using the `checkpoint_path` format
model.save_weights(CHECKPOINT_PATH +
'/cp-{epoch: 04d}.ckpt'.format(epoch=0))
history = model.fit(x=train_generator,
epochs=EPOCHS,
validation_data=val_generator,
callbacks=[checkpoint, earlystop_callback],
workers=6,
use_multiprocessing=True)
# save history
with open(HISTORY_PATH, 'w') as file:
json.dump(history.history, file)
# prediction
predictions = model.predict(val_generator,
workers=6,
use_multiprocessing=True,
verbose=1)
ans_vocab = load_ans_vocab()
result = []
for q in range(len(val_question_ids)):
ans = ans_vocab[str(predictions[q].argmax(axis=-1))]
q_id = int(val_question_ids[q])
result.append({u'answer': ans, u'question_id': q_id})
with open(PRED_PATH, 'w') as file:
json.dump(list(result), file)
return
def save_config(dataset):
if dataset == 0:
DATASET = 'English'
if dataset == 1:
DATASET = 'Google'
if dataset == 2:
DATASET = 'Targoman'
config = {'NAME': 'coattention',
'EMBEDDING': 'keras',
"DATASET": DATASET,
"OPTIMIZER": 'Adam',
"EARLY STOPPING": 'val_loss',
"LOSS": 'categorical_crossentropy',
'DROPOUT_RATE': DROPOUT_RATE,
"EMBEDDING_DIM": EMBEDDING_DIM,
"EPOCHS": EPOCHS,
"BATCH_SIZE": BATCH_SIZE,
"SEQ_LENGTH": SEQ_LENGTH,
"NUM_CLASSES": NUM_CLASSES}
print("save config in" + str(CONFIG_PATH))
with open(CONFIG_PATH, 'w') as file:
json.dump(config, file)
return
Train(dataset=2)
|
normal
|
{
"blob_id": "a8d52d81ef6538e9cb8a0a9cab7cd0a778454c8e",
"index": 6424,
"step-1": "<mask token>\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\ndef save_config(dataset):\n if dataset == 0:\n DATASET = 'English'\n if dataset == 1:\n DATASET = 'Google'\n if dataset == 2:\n DATASET = 'Targoman'\n config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':\n DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':\n 'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,\n 'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':\n BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}\n print('save config in' + str(CONFIG_PATH))\n with open(CONFIG_PATH, 'w') as file:\n json.dump(config, file)\n return\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\ndef save_config(dataset):\n if dataset == 0:\n DATASET = 'English'\n if dataset == 1:\n DATASET = 'Google'\n if dataset == 2:\n DATASET = 'Targoman'\n config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':\n DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':\n 'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,\n 'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':\n BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}\n print('save config in' + str(CONFIG_PATH))\n with open(CONFIG_PATH, 'w') as file:\n json.dump(config, file)\n return\n\n\nTrain(dataset=2)\n",
"step-4": "import json\nfrom constants import *\nfrom coattention_layer import *\nfrom prepare_generator import *\nfrom tensorflow.keras.layers import Input\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping\n\n\ndef coattention(num_embeddings):\n image_input = Input(shape=(196, 512))\n question_input = Input(shape=(SEQ_LENGTH,))\n output = CoattentionModel(num_embeddings)(question_input, image_input)\n model = Model(inputs=[question_input, image_input], outputs=output)\n return model\n\n\ndef scheduler(epoch):\n if epoch < 10:\n return 0.0001\n else:\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\n\n\ndef Train(dataset=True):\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = (\n get_generator(dataset))\n save_config(dataset)\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\n save_weights_only=True, verbose=1)\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\n model = coattention(VOCAB_SIZE)\n model.compile(optimizer=Adam(learning_rate=LR), loss=\n 'categorical_crossentropy', metrics=['accuracy'])\n model.summary()\n model.save_weights(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt'.format(\n epoch=0))\n history = model.fit(x=train_generator, epochs=EPOCHS, validation_data=\n val_generator, callbacks=[checkpoint, earlystop_callback], workers=\n 6, use_multiprocessing=True)\n with open(HISTORY_PATH, 'w') as file:\n json.dump(history.history, file)\n predictions = model.predict(val_generator, workers=6,\n use_multiprocessing=True, verbose=1)\n ans_vocab = load_ans_vocab()\n result = []\n for q in range(len(val_question_ids)):\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\n q_id = int(val_question_ids[q])\n result.append({u'answer': ans, u'question_id': q_id})\n with open(PRED_PATH, 'w') as file:\n json.dump(list(result), file)\n return\n\n\ndef save_config(dataset):\n if dataset == 0:\n DATASET = 'English'\n if dataset == 1:\n DATASET = 'Google'\n if dataset == 2:\n DATASET = 'Targoman'\n config = {'NAME': 'coattention', 'EMBEDDING': 'keras', 'DATASET':\n DATASET, 'OPTIMIZER': 'Adam', 'EARLY STOPPING': 'val_loss', 'LOSS':\n 'categorical_crossentropy', 'DROPOUT_RATE': DROPOUT_RATE,\n 'EMBEDDING_DIM': EMBEDDING_DIM, 'EPOCHS': EPOCHS, 'BATCH_SIZE':\n BATCH_SIZE, 'SEQ_LENGTH': SEQ_LENGTH, 'NUM_CLASSES': NUM_CLASSES}\n print('save config in' + str(CONFIG_PATH))\n with open(CONFIG_PATH, 'w') as file:\n json.dump(config, file)\n return\n\n\nTrain(dataset=2)\n",
"step-5": "import json\r\nfrom constants import *\r\nfrom coattention_layer import *\r\nfrom prepare_generator import *\r\nfrom tensorflow.keras.layers import Input\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.optimizers import Adam\r\nfrom tensorflow.keras.callbacks import LearningRateScheduler, ModelCheckpoint, EarlyStopping\r\n\r\n\r\ndef coattention(num_embeddings):\r\n image_input = Input(shape=(196, 512))\r\n question_input = Input(shape=(SEQ_LENGTH,))\r\n\r\n output = CoattentionModel(num_embeddings)(question_input, image_input)\r\n\r\n model = Model(inputs=[question_input, image_input], outputs=output)\r\n\r\n return model\r\n\r\n\r\ndef scheduler(epoch):\r\n if epoch < 10:\r\n return 0.0001\r\n else:\r\n return 0.0001 * tf.math.exp(0.1 * (10 - epoch))\r\n\r\n\r\ndef Train(dataset=True):\r\n\r\n train_generator, val_generator, val_question_ids, VOCAB_SIZE = get_generator(\r\n dataset)\r\n\r\n save_config(dataset)\r\n\r\n checkpoint = ModelCheckpoint(CHECKPOINT_PATH + '/cp-{epoch: 04d}.ckpt',\r\n save_weights_only=True,\r\n verbose=1)\r\n\r\n scheduler_lr = LearningRateScheduler(scheduler, verbose=0)\r\n earlystop_callback = EarlyStopping(monitor='val_loss', patience=3)\r\n\r\n model = coattention(VOCAB_SIZE)\r\n\r\n model.compile(optimizer=Adam(learning_rate=LR),\r\n loss='categorical_crossentropy',\r\n metrics=['accuracy'])\r\n\r\n model.summary()\r\n\r\n # Save the weights using the `checkpoint_path` format\r\n model.save_weights(CHECKPOINT_PATH +\r\n '/cp-{epoch: 04d}.ckpt'.format(epoch=0))\r\n\r\n history = model.fit(x=train_generator,\r\n epochs=EPOCHS,\r\n validation_data=val_generator,\r\n callbacks=[checkpoint, earlystop_callback],\r\n workers=6,\r\n use_multiprocessing=True)\r\n\r\n # save history\r\n with open(HISTORY_PATH, 'w') as file:\r\n json.dump(history.history, file)\r\n\r\n # prediction\r\n predictions = model.predict(val_generator,\r\n workers=6,\r\n use_multiprocessing=True,\r\n verbose=1)\r\n\r\n ans_vocab = load_ans_vocab()\r\n\r\n result = []\r\n for q in range(len(val_question_ids)):\r\n ans = ans_vocab[str(predictions[q].argmax(axis=-1))]\r\n q_id = int(val_question_ids[q])\r\n result.append({u'answer': ans, u'question_id': q_id})\r\n\r\n with open(PRED_PATH, 'w') as file:\r\n json.dump(list(result), file)\r\n\r\n return\r\n\r\n\r\ndef save_config(dataset):\r\n if dataset == 0:\r\n DATASET = 'English'\r\n if dataset == 1:\r\n DATASET = 'Google'\r\n if dataset == 2:\r\n DATASET = 'Targoman'\r\n\r\n config = {'NAME': 'coattention',\r\n 'EMBEDDING': 'keras',\r\n \"DATASET\": DATASET,\r\n \"OPTIMIZER\": 'Adam',\r\n \"EARLY STOPPING\": 'val_loss',\r\n \"LOSS\": 'categorical_crossentropy',\r\n 'DROPOUT_RATE': DROPOUT_RATE,\r\n \"EMBEDDING_DIM\": EMBEDDING_DIM,\r\n \"EPOCHS\": EPOCHS,\r\n \"BATCH_SIZE\": BATCH_SIZE,\r\n \"SEQ_LENGTH\": SEQ_LENGTH,\r\n \"NUM_CLASSES\": NUM_CLASSES}\r\n\r\n print(\"save config in\" + str(CONFIG_PATH))\r\n with open(CONFIG_PATH, 'w') as file:\r\n json.dump(config, file)\r\n\r\n return\r\n\r\n\r\nTrain(dataset=2)\r\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
##
# hunt_and_kill.py
# 05 Oct 2021
# Generates a maze using the hunt and kill algorithm
# S
from sys import argv
from enum import Enum
import random
# Cardinal directions, can be OR'd and AND'd
DIRS = {
'N': 1 << 0,
'E': 1 << 1,
'S': 1 << 2,
'W': 1 << 3
}
O_DIRS = {
'N': 'S',
'E': 'W',
'S': 'N',
'W': 'E'
}
def init_maze(width: int, height: int) -> list[int]:
"""
Set up a 2D list with 0 as starting value. Basically an empty maze
"""
return [0] * width * height
def walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]) -> None:
"""
Does a random walk, setting the cells as it goes, until it cant find a
path.
"""
# Shortcut for accessing maze
maze_idx = lambda p: p[1] * width + p[0]
# Shortcut funcs for surrounding points
north = lambda p: (p[0] , p[1] -1)
east = lambda p: (p[0] +1, p[1] )
south = lambda p: (p[0] , p[1] +1)
west = lambda p: (p[0] -1, p[1] )
def check_neighbours(pt, visited=False) -> list[tuple[int, int]]:
"""
Returns a list of possible neighbours.
Can pass arg to only count visited neighbours
"""
# Points will be added to this list if they havent been traversed yet
possible_points = dict()
# -- NORTH
p_pt = north(pt)
# This mess of a condition will evaluate to true if the cell is visited and the user is asking for a visited cell. Viceversa.
if pt[1] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "N"
# -- EAST
p_pt = east(pt)
if pt[0] < width - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "E"
# -- SOUTH
p_pt = south(pt)
if pt[1] < height - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "S"
# -- WEST
p_pt = west(pt)
if pt[0] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):
possible_points[p_pt] = "W"
return possible_points
# First, connect to a random neighbour that has been visited.
starting_n = check_neighbours(start, True)
if starting_n:
neigh, dire = random.choice(tuple(starting_n.items()))
maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]
maze[maze_idx(start)] |= DIRS[dire]
step = start
# Walk randomly until out of options
while possible_n := check_neighbours(step):
next_step, direction = random.choice(tuple(possible_n.items()))
# Connect the two cells
maze[maze_idx(step)] |= DIRS[direction]
maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]
# Go to next
step = next_step
def gen_maze(width: int, height: int) -> list[int]:
maze = init_maze(width, height)
maze_idx = lambda p: p[1] * width + p[0]
for y in range(height):
for x in range(width):
if not maze[maze_idx((x, y))]:
walk_maze(maze, width, height, (x, y))
return maze
def print_maze(maze: list[int], width: int, height: int) -> None:
"""
Print an ASCII maze!!!! Maybe works??
"""
maze_idx = lambda p: p[1] * width + p[0]
# top row
print(' ' + '_' * (2 * width - 1))
for y in range(height):
for x in range(width):
# left wall
if maze[maze_idx((x, y))] & DIRS["W"]:
# leave wall open if you can also go down
if maze[maze_idx((x, y))] & DIRS["S"]:
print(' ', end='')
else:
print('_', end='')
else:
print('|', end='')
if maze[maze_idx((x, y))] & DIRS["S"]:
print(' ', end='')
else:
print('_', end='')
# right wall
print('|')
def main():
width = height = 10
if len(argv) > 2:
width = int(argv[1])
height = int(argv[2])
print(f"Generating maze size {width}x{height}")
maze = gen_maze(width, height)
print_maze(maze, width, height)
return maze
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "54002bc7e2a1991d2405acbe1d399e8803ac5582",
"index": 7210,
"step-1": "<mask token>\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef init_maze(width: int, height: int) ->list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nDIRS = {'N': 1 << 0, 'E': 1 << 1, 'S': 1 << 2, 'W': 1 << 3}\nO_DIRS = {'N': 'S', 'E': 'W', 'S': 'N', 'W': 'E'}\n\n\ndef init_maze(width: int, height: int) ->list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from sys import argv\nfrom enum import Enum\nimport random\nDIRS = {'N': 1 << 0, 'E': 1 << 1, 'S': 1 << 2, 'W': 1 << 3}\nO_DIRS = {'N': 'S', 'E': 'W', 'S': 'N', 'W': 'E'}\n\n\ndef init_maze(width: int, height: int) ->list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]\n ) ->None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n north = lambda p: (p[0], p[1] - 1)\n east = lambda p: (p[0] + 1, p[1])\n south = lambda p: (p[0], p[1] + 1)\n west = lambda p: (p[0] - 1, p[1])\n\n def check_neighbours(pt, visited=False) ->list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n possible_points = dict()\n p_pt = north(pt)\n if pt[1] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'N'\n p_pt = east(pt)\n if pt[0] < width - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'E'\n p_pt = south(pt)\n if pt[1] < height - 1 and bool(maze[maze_idx(p_pt)]) == (False or\n visited):\n possible_points[p_pt] = 'S'\n p_pt = west(pt)\n if pt[0] > 0 and bool(maze[maze_idx(p_pt)]) == (False or visited):\n possible_points[p_pt] = 'W'\n return possible_points\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n step = start\n while (possible_n := check_neighbours(step)):\n next_step, direction = random.choice(tuple(possible_n.items()))\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n step = next_step\n\n\ndef gen_maze(width: int, height: int) ->list[int]:\n maze = init_maze(width, height)\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n return maze\n\n\ndef print_maze(maze: list[int], width: int, height: int) ->None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n print(' ' + '_' * (2 * width - 1))\n for y in range(height):\n for x in range(width):\n if maze[maze_idx((x, y))] & DIRS['W']:\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n else:\n print('|', end='')\n if maze[maze_idx((x, y))] & DIRS['S']:\n print(' ', end='')\n else:\n print('_', end='')\n print('|')\n\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n print(f'Generating maze size {width}x{height}')\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "##\n# hunt_and_kill.py\n# 05 Oct 2021\n# Generates a maze using the hunt and kill algorithm\n# S\nfrom sys import argv\nfrom enum import Enum\nimport random\n\n# Cardinal directions, can be OR'd and AND'd\nDIRS = {\n 'N': 1 << 0,\n 'E': 1 << 1,\n 'S': 1 << 2,\n 'W': 1 << 3\n}\n\nO_DIRS = {\n 'N': 'S',\n 'E': 'W',\n 'S': 'N',\n 'W': 'E'\n}\n\ndef init_maze(width: int, height: int) -> list[int]:\n \"\"\"\n Set up a 2D list with 0 as starting value. Basically an empty maze\n \"\"\"\n return [0] * width * height\n\n\ndef walk_maze(maze: list[int], width: int, height: int, start: tuple[int, int]) -> None:\n \"\"\"\n Does a random walk, setting the cells as it goes, until it cant find a\n path.\n \"\"\"\n # Shortcut for accessing maze\n maze_idx = lambda p: p[1] * width + p[0]\n\n # Shortcut funcs for surrounding points\n north = lambda p: (p[0] , p[1] -1)\n east = lambda p: (p[0] +1, p[1] )\n south = lambda p: (p[0] , p[1] +1)\n west = lambda p: (p[0] -1, p[1] )\n\n def check_neighbours(pt, visited=False) -> list[tuple[int, int]]:\n \"\"\"\n Returns a list of possible neighbours.\n Can pass arg to only count visited neighbours\n \"\"\"\n # Points will be added to this list if they havent been traversed yet\n possible_points = dict()\n\n # -- NORTH\n p_pt = north(pt)\n # This mess of a condition will evaluate to true if the cell is visited and the user is asking for a visited cell. Viceversa.\n if pt[1] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"N\"\n\n # -- EAST\n p_pt = east(pt)\n if pt[0] < width - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"E\"\n\n # -- SOUTH\n p_pt = south(pt)\n if pt[1] < height - 1 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"S\"\n\n # -- WEST\n p_pt = west(pt)\n if pt[0] > 0 and (bool(maze[maze_idx(p_pt)]) == (False or visited)):\n possible_points[p_pt] = \"W\"\n\n return possible_points\n\n # First, connect to a random neighbour that has been visited.\n starting_n = check_neighbours(start, True)\n if starting_n:\n neigh, dire = random.choice(tuple(starting_n.items()))\n\n maze[maze_idx(neigh)] |= DIRS[O_DIRS[dire]]\n maze[maze_idx(start)] |= DIRS[dire]\n\n step = start\n\n # Walk randomly until out of options\n while possible_n := check_neighbours(step):\n next_step, direction = random.choice(tuple(possible_n.items()))\n\n # Connect the two cells\n maze[maze_idx(step)] |= DIRS[direction]\n maze[maze_idx(next_step)] |= DIRS[O_DIRS[direction]]\n\n # Go to next\n step = next_step\n\n\n\ndef gen_maze(width: int, height: int) -> list[int]:\n maze = init_maze(width, height)\n\n maze_idx = lambda p: p[1] * width + p[0]\n for y in range(height):\n for x in range(width):\n if not maze[maze_idx((x, y))]:\n walk_maze(maze, width, height, (x, y))\n\n return maze\n\ndef print_maze(maze: list[int], width: int, height: int) -> None:\n \"\"\"\n Print an ASCII maze!!!! Maybe works??\n \"\"\"\n maze_idx = lambda p: p[1] * width + p[0]\n\n # top row\n print(' ' + '_' * (2 * width - 1))\n\n for y in range(height):\n for x in range(width):\n # left wall\n if maze[maze_idx((x, y))] & DIRS[\"W\"]:\n # leave wall open if you can also go down\n if maze[maze_idx((x, y))] & DIRS[\"S\"]:\n print(' ', end='')\n else:\n print('_', end='')\n\n else:\n print('|', end='')\n\n if maze[maze_idx((x, y))] & DIRS[\"S\"]:\n print(' ', end='')\n else:\n print('_', end='')\n # right wall\n print('|')\n\ndef main():\n width = height = 10\n if len(argv) > 2:\n width = int(argv[1])\n height = int(argv[2])\n\n print(f\"Generating maze size {width}x{height}\")\n maze = gen_maze(width, height)\n print_maze(maze, width, height)\n return maze\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import numpy
import numpy.fft
import numpy.linalg
import copy
from astropy.io import fits
from scipy.interpolate import RectBivariateSpline
from scipy.signal import convolve
import offset_index
# some basic definitions
psSize = 9 # psSize x psSize postage stamps of stars
# zero padded RectBivariateSpline, if on
def RectBivariateSplineZero(y1,x1,map1,kx=1,ky=1):
return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)
y2 = numpy.zeros(numpy.size(y1)+2)
y2[1:-1] = y1
y2[0] = 2*y2[1]-y2[2]
y2[-1] = 2*y2[-2]-y2[-3]
x2 = numpy.zeros(numpy.size(x1)+2)
x2[1:-1] = x1
x2[0] = 2*x2[1]-x2[2]
x2[-1] = 2*x2[-2]-x2[-3]
map2 = numpy.zeros((numpy.size(y1)+2, numpy.size(x1)+2))
map2[1:-1,1:-1] = map1
return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)
class EmptyClass():
pass
# spectral energy distribution class
class SpectralEnergyDistribution():
# make an SED -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get Nlambda (photons/m^2/s/um) at lambda_ (um)
def Nlambda(self, lambda_):
# blackbody, info = [T (K), solidangle]
if self.type=='BB':
T = self.info[0]
x = 14387.769/lambda_/T # hc/(kTlambda)
return(2/lambda_**4*2.99792458e14*1e12*numpy.exp(-x)/(1.-numpy.exp(-x))*self.info[1])
# the 1e12 is the conversion from um^2 -> m^2
else:
print('ERROR: Invalid SED type')
exit()
# filter class
class Filter():
# make a filter -- several options for type
def __init__(self, type, info):
self.type = type
self.info = copy.deepcopy(info)
# get transmission
def Tlambda(self, lambda_):
# smoothed tophat
if self.type=='STH':
lmin = self.info[0]; dlmin = lmin*.02
lmax = self.info[1]; dlmax = lmax*.02
return((numpy.tanh((lambda_-lmin)/dlmin)-numpy.tanh((lambda_-lmax)/dlmax))/2.)
# interpolated file
# info shape (N,2) -- info[:,0] = wavelength, info[:,1] = throughput
elif self.type=='interp':
return(numpy.interp(lambda_, self.info[:,0], self.info[:,1]))
else:
print('ERROR: Invalid filter type')
exit()
# load mask files
maskfiles = EmptyClass()
maskfiles.D = 2292981.05344 # um
maskfiles.rim = []
maskfiles.full = []
maskfiles.i_rim = []
maskfiles.i_full = []
maskfiles.nSCA = 18
for k in range(18):
inFile = fits.open('pupils/SCA{:d}_rim_mask.fits'.format(k+1))
maskfiles.rim += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
inFile = fits.open('pupils/SCA{:d}_full_mask.fits'.format(k+1))
maskfiles.full += [numpy.copy(inFile[0].data[::-1,:])]
inFile.close()
# normalize
maskfiles.rim[k] /= numpy.amax(maskfiles.rim[k])
maskfiles.full[k] /= numpy.amax(maskfiles.full[k])
N_in = maskfiles.N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.rim[k], kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.full[k], kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# lower resolution masks
maskfiles.n_lores = 7
for ku in range(1,maskfiles.n_lores):
N2 = N_in//2**ku
x_in = numpy.linspace(-1+1/N2,1-1/N2,N2)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.rim[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_rim += [interp_spline]
interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.full[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)
maskfiles.i_full += [interp_spline]
# SCA locations
sca = EmptyClass()
sca.size = 40.88 # mm
sca.x = numpy.asarray([-22.14, -22.29, -22.44, -66.42, -66.92, -67.42, -110.70, -111.48, -112.64,
22.14, 22.29, 22.44, 66.42, 66.92, 67.42, 110.70, 111.48, 112.64])
sca.y = numpy.asarray([12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06,
12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06])
sca.scale = 133.08
# reference Zernikes
ZernRef = EmptyClass()
ZernRef.data = numpy.loadtxt('pupils/zernike_ref.txt')[:,-22:] * 1.38
# filter data
FilterData = numpy.loadtxt('pupils/filter.dat')
FilterData[:,1:] /= numpy.pi/4.*(maskfiles.D/1e6)**2
# makes map of Zernikes of a given amplitude
# amp[0:Namp] = Z1 ... ZNamp
# on a spacing Ngrid (x, y = -(1-1/Ngrid) .. +(1-1/Ngrid) multiplied by scale)
#
def zernike_map_noll(amp, Ngrid, scale):
xx = numpy.tile(numpy.linspace(-1+1/Ngrid,1-1/Ngrid,Ngrid), (Ngrid,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
phi = numpy.arctan2(yy,xx)
output = numpy.zeros((Ngrid,Ngrid))
nmax = 0
namp = numpy.size(amp)
while namp>(nmax+1)*(nmax+2)//2: nmax+=1
rpows = numpy.ones((nmax+1,Ngrid,Ngrid))
trigphi = numpy.ones((2*nmax+1,Ngrid,Ngrid))
for i in range(1,nmax+1): rpows[i,:,:] = rho**i
for i in range(0,nmax+1): trigphi[i,:,:] = numpy.cos(i*phi)
for i in range(1,nmax+1): trigphi[-i,:,:] = numpy.sin(i*phi)
# loop over Zernikes
for n in range(nmax+1):
for m in range(-n,n+1,2):
Z = numpy.zeros((Ngrid,Ngrid))
for k in range((n-abs(m))//2+1):
coef = (-1)**k * numpy.math.factorial(n-k)/numpy.math.factorial(k) \
/numpy.math.factorial((n-m)//2-k)/numpy.math.factorial((n+m)//2-k)
Z += coef * rpows[n-2*k,:,:]
#if m>=0:
# Z *= numpy.cos(m*phi)
#else:
# Z *= numpy.sin(-m*phi)
Z *= trigphi[m,:,:]
j = n*(n+1)//2 + abs(m)
if (-1)**j*(m+.5)<0 or m==0: j += 1
#print(n,m,j)
factor = numpy.sqrt(n+1)
if m!=0: factor *= numpy.sqrt(2)
if j<=namp: output += factor * amp[j-1] * Z
return(output)
# make annular mask of given obstruction (fraction) and scale
def make_mask_annulus(obs, Nstep, scale):
xx = numpy.tile(numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep), (Nstep,1))
yy = numpy.copy(xx.T)
rho = numpy.sqrt(xx**2+yy**2)*scale
return(numpy.where(numpy.logical_and(rho>=obs,rho<1),numpy.ones((Nstep,Nstep)),numpy.zeros((Nstep,Nstep))))
def test_zernike():
for k in range(36):
psi = numpy.zeros(36)
psi[k] = 1
N=5
M = zernike_map_noll(psi, N, N/(N-1))
print(' *** Zernike {:2d} ***'.format(k+1))
for j in range(N):
out = ''
for i in range(N):
out = out + ' {:10.5f}'.format(M[j,i])
print(out)
print('')
# psi is a vector of Zernikes, in wavelengths
# mask information: (currently none)
# scale = sampling (points per lambda/D)
# Nstep = # grid points
# output normalized to sum to 1
def mono_psf(psi, mask, scale, Nstep):
if hasattr(mask, 'N'):
if hasattr(mask, 'spline'):
interp_spline = mask.spline
else:
N_in = 2048
x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)
y_in = numpy.copy(x_in)
interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array, kx=1, ky=1)
x2 = numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep)*scale
y2 = numpy.copy(x2)
amplitude = interp_spline(y2,x2).astype(numpy.complex128) * make_mask_annulus(0, Nstep, scale)
else:
amplitude = make_mask_annulus(.32, Nstep, scale).astype(numpy.complex128)
amplitude *= numpy.exp(2j * numpy.pi * zernike_map_noll(psi, Nstep, scale))
amplitude = numpy.fft.ifft2(amplitude)
power = numpy.abs(amplitude)**2
# shift to center
newpower = numpy.zeros_like(power)
newpower[Nstep//2:Nstep,Nstep//2:Nstep] = power[0:Nstep//2,0:Nstep//2]
newpower[Nstep//2:Nstep,0:Nstep//2] = power[0:Nstep//2,Nstep//2:Nstep]
newpower[0:Nstep//2,Nstep//2:Nstep] = power[Nstep//2:Nstep,0:Nstep//2]
newpower[0:Nstep//2,0:Nstep//2] = power[Nstep//2:Nstep,Nstep//2:Nstep]
return(newpower/numpy.sum(newpower))
# helper function
def onescut(n):
array = numpy.ones((n+1))
array[0] = array[-1] = .5
return(array/n)
# Gaussian quadrature weights across a filter
# sed = spectral energy distribution
# filter = filter information (incl. bandpass)
# nOrder = order of polynomial (number of nodes)
# wlrange = [lmin,lmax,npts] in um
#
# returns wavelengths, weights
def gq_weights(sed, filter, nOrder, wlrange):
# unpack info
lmin = wlrange[0]; lmax = wlrange[1]; npts = wlrange[2]
# build integrals I_k = int x^k S(x) F(x) dx
x = numpy.linspace(lmin,lmax,npts)
c = numpy.zeros((npts))
for i in range(npts):
c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])
o = numpy.ones((npts))
I = numpy.zeros((2*nOrder))
lctr = numpy.mean(x)
for k in range(2*nOrder):
I[k] = numpy.sum(o*(x-lctr)**k*c)
# orthogonal polynomial p_n
# require sum_{j=0}^n coef_{n-j} I_{j+k} = 0 or
# sum_{j=0}^{n-1} coef_{n-j} I_{j+k} = -I_{n+k} for k = 0 .. n-1
coef = numpy.zeros((nOrder+1))
coef[0] = 1.
A = numpy.zeros((nOrder,nOrder))
for k in range(nOrder):
for j in range(nOrder):
A[k,j] = I[j+k]
coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]
p = numpy.poly1d(coef)
xroot = numpy.sort(numpy.real(p.r))
wroot = numpy.zeros_like(xroot)
pprime = numpy.polyder(p)
for i in range(nOrder):
px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i+1:])), r=True)
wroot[i] = numpy.sum(px.c[::-1]*I[:nOrder]) / pprime(xroot[i])
xroot = xroot + lctr
return xroot,wroot
# psi is a vector of Zernikes, in microns
# mask information: (currently none)
# sed = spectral energy distribution
# scale = sampling (points per lambda/D @ 1 um)
# Nstep = # grid points
# filter = filter information (incl. bandpass)
# addInfo = class for general additional information
# output normalized to sum to 1
def poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):
# integration steps
hard_lmin = 0.4
hard_lmax = 2.5
hard_Nl = 420
ilmin = hard_Nl-1; ilmax = 0
for il in range(1,hard_Nl):
wl = hard_lmin + il/hard_Nl*(hard_lmax-hard_lmin)
if filter.Tlambda(wl)>1e-4:
if il<ilmin:
ilmin=il
wlmin=wl
if il>ilmax:
ilmax=il
wlmax=wl
na = ilmin//6 + 1
nb = (hard_Nl-ilmax)//6 + 1
wl = numpy.concatenate((numpy.linspace(hard_lmin,wlmin,na+1), numpy.linspace(wlmin,wlmax,ilmax-ilmin+1), numpy.linspace(wlmax,hard_lmax,nb+1)))
dwl = numpy.concatenate(((wlmin-hard_lmin)*onescut(na), (wlmax-wlmin)*onescut(ilmax-ilmin), (hard_lmax-wlmax)*onescut(nb)))
#print(wl,dwl,numpy.size(wl),numpy.size(dwl))
# reduced coverage
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
wl, dwl = gq_weights(sed, filter, 10, [wlmin,wlmax,ilmax-ilmin+1])
# make output PSF
sumc = 0.
output = numpy.zeros((Nstep,Nstep))
for i in range(numpy.size(wl)):
c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode: c = dwl[i]
this_psi = numpy.copy(psi)/wl[i] # convert from um -> wavelengths of wavefront
sumc += c
output += c * mono_psf(this_psi, mask, scale_1um*wl[i], Nstep)
#print('{:6.4f} {:11.5E}'.format(wl[i],filter.Tlambda(wl[i])))
output /= sumc
return(output)
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# ovsamp = oversampling factor
# Nstep = number of samples in each axis
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters
# .par -> offset parameters
# addInfo = additional information class:
# .ctr -> centroid (dx,dy)
def oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):
# get information
parOn = False
if hasattr(offsets, 'par'): parOn = True
# get Zernikes in microns
ZR = ZernRef.data[4*(scanum-1):4*scanum,:]
wt_L = .5 - pos[0]/sca.size
wt_R = .5 + pos[0]/sca.size
wt_B = .5 - pos[1]/sca.size
wt_T = .5 + pos[1]/sca.size
psi = wt_T*wt_L*ZR[0,:] + wt_B*wt_L*ZR[1,:] + wt_B*wt_R*ZR[2,:] + wt_T*wt_R*ZR[3,:]
xf = sca.x[scanum-1] + pos[0]
yf = sca.y[scanum-1] + pos[1]
# Zernike offsets
if parOn:
psi[3] += offsets.par[offset_index.foc ]
psi[4] += offsets.par[offset_index.astig2]
psi[5] += offsets.par[offset_index.astig1]
psi[6] += offsets.par[offset_index.coma2]
psi[7] += offsets.par[offset_index.coma1]
psi[3] += (offsets.par[offset_index.focg1]*xf + offsets.par[offset_index.focg2]*yf)/sca.scale
scale_1um = ovsamp / (.11*numpy.pi/648000) / maskfiles.D
#print(scale_1um)
# filter curves
if filt=='K':
filter = Filter('STH', [1.95,2.30])
elif filt=='F':
filter = Filter('interp', FilterData[:,(0,7)])
elif filt=='H':
filter = Filter('interp', FilterData[:,(0,6)])
elif filt=='W':
filter = Filter('interp', FilterData[:,(0,5)])
elif filt=='J':
filter = Filter('interp', FilterData[:,(0,4)])
elif filt=='Y':
filter = Filter('interp', FilterData[:,(0,3)])
elif filt=='Z':
filter = Filter('interp', FilterData[:,(0,2)])
elif filt=='R':
filter = Filter('interp', FilterData[:,(0,1)])
else:
print('Error: unknown filter')
exit()
la = numpy.linspace(.4, 2.5, 2101)
fla = numpy.zeros(2101)
for i in range(2101): fla[i] = filter.Tlambda(la[i])
scale = scale_1um*numpy.sum(la*fla)/numpy.sum(fla)
# get the mask
mask = EmptyClass(); mask.N=1
imk = 0
while imk<maskfiles.n_lores-1 and Nstep/scale<maskfiles.N_in/2**(imk+1): imk+=1
#print(' *** ', Nstep, scale, scale/scale_1um, imk)
if filt=='F' or filt=='K':
mask.spline = maskfiles.i_full[scanum-1 + maskfiles.nSCA*imk]
else:
mask.spline = maskfiles.i_rim[scanum-1 + maskfiles.nSCA*imk]
# x & y offsets
if hasattr(addInfo, 'ctr'):
d = .5*(1-1/ovsamp)
psi[1:3] -= (addInfo.ctr+d) * ovsamp / scale_1um / 4.
output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)
# smooth
Cxx = Cyy = .09; Cxy = 0.
if parOn:
Cxx = .09 + offsets.par[offset_index.jxx ]
Cxy = offsets.par[offset_index.jxy ]
Cyy = .09 + offsets.par[offset_index.jyy ]
output_fft = numpy.fft.fft2(output)
kx = numpy.zeros((Nstep,Nstep))
ky = numpy.zeros((Nstep,Nstep))
for i in range(-Nstep//2, Nstep//2):
kx[:,i] = abs(i)
ky[i,:] = abs(i)
kx *= 2.*numpy.pi*ovsamp/Nstep
ky *= 2.*numpy.pi*ovsamp/Nstep
output_fft = output_fft * numpy.exp(-Cxx*kx**2/2. - Cyy*ky**2/2. - Cxy*kx*ky)
output = numpy.real(numpy.fft.ifft2(output_fft))
return(output)
# parameters for next couple of functions
N_STD = 1024 # must be a multiple of 4
OV_STD = 8
# make oversampled PSF at given SCA, position
#
# sed = source SED
# filt = filter (letter: RZYJHFK)
# scanum = SCA number (1..18)
# pos = (x,y) position on SCA in mm (0,0)=center
# offsets = adjustment parameters (placeholder)
# addInfo = additional information class:
# .F -> total counts (in e)
# .ctr -> centroid (dx,dy)
# .many -> @ 5x5 grid of offsets
#
# .bfe = add bfe (can include .bfe_a, .bfe_aplus)
#
# .bfe_overwrite => special mode to compute BFE with time dependent PSF
# .stamp_in = input stamp (so compute BFE from stamp_in *acting on* this PSF)
def postage_stamp(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo) * addInfo.F
out = numpy.zeros((psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
out[j,i] += numpy.sum(bigStamp[y:y+ov,x:x+ov])
if hasattr(addInfo, 'vtpe'):
out[j,i] += addInfo.vtpe * numpy.sum(bigStamp[y+ov:y+2*ov,x:x+ov])
if hasattr(addInfo,'many'):
out = numpy.zeros((25, psSize, psSize))
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
for k in range(25):
dy = k%5 - 2; dx = k//5 - 2
out[k,j,i] += numpy.sum(bigStamp[y+dy:y+dy+ov,x+dx:x+dx+ov])
# BFE?
if hasattr(addInfo, 'bfe'):
if hasattr(addInfo,'many'):
print('Error -- cannot do both bfe and many in postage_stamp')
exit()
dout = numpy.zeros_like(out)
# horizontal BFE
ah = 0
if hasattr(addInfo, 'bfe_a'): ah += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): ah += addInfo.bfe_aplus
for i in range(psSize-1):
x = N//2+(i-psSize//2)*ov
for j in range(psSize):
y = N//2+(j-psSize//2)*ov
shift = ov * ah * (out[j,i+1]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * ah * (addInfo.stamp_in[j,i+1]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y:y+ov,x+ov-1:x+ov+1])/2.
dout[j,i] += shift*mflux
dout[j,i+1] -= shift*mflux
# vertical BFE
av = 0
if hasattr(addInfo, 'bfe_a'): av += addInfo.bfe_a
if hasattr(addInfo, 'bfe_aplus'): av -= addInfo.bfe_aplus
for i in range(psSize):
x = N//2+(i-psSize//2)*ov
for j in range(psSize-1):
y = N//2+(j-psSize//2)*ov
shift = ov * av * (out[j+1,i]-out[j,i]) / 2. # in sub-pixels, average over exposure
if hasattr(addInfo, 'bfe_overwrite'): shift = ov * av * (addInfo.stamp_in[j+1,i]-addInfo.stamp_in[j,i]) / 2.
mflux = numpy.sum(bigStamp[y+ov-1:y+ov+1,x:x+ov])/2.
dout[j,i] += shift*mflux
dout[j+1,i] -= shift*mflux
out+=dout
if hasattr(addInfo, 'bfe_overwrite'): out=dout
return(out)
#
# same input format but returns moments of the PSF
# A, xc, yc, T, e1, e2
def psfmoments(sed, filt, scanum, pos, offsets, addInfo):
N = N_STD # must be even
ov = OV_STD
if hasattr(addInfo,'many'):
ov = addInfo.force_ov
if hasattr(addInfo,'FastMode'):
if addInfo.FastMode:
N = N//2
addInfoX = copy.deepcopy(addInfo); addInfoX.ctr = numpy.zeros((2)); addInfoX.F = 1.
bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)
bigStamp = convolve(bigStamp, numpy.ones((ov,ov)), mode='full', method='direct')/ov**2
Np = N+ov-1
# moment format: A,x,y,Cxx,Cxy,Cyy
mom = numpy.asarray([1,0,0,4*ov**2,0,4*ov**2]).astype(numpy.float64)
newmom = numpy.zeros_like(mom)
con = .5 # convergence factor
xx1 = numpy.tile(numpy.linspace(-(Np-1)/2., (Np-1)/2., Np), (Np,1))
yy1 = numpy.copy(xx1.T)
for iter in range(256):
det = mom[3]*mom[5]-mom[4]**2
xx = xx1-mom[1]
yy = yy1-mom[2]
G = numpy.exp((-mom[5]*xx**2 + 2*mom[4]*xx*yy - mom[3]*yy**2)/2./det) * bigStamp
newmom[0] = numpy.sum(G)
newmom[1] = numpy.sum(G*xx)
newmom[2] = numpy.sum(G*yy)
newmom[3] = numpy.sum(G*xx**2)
newmom[4] = numpy.sum(G*xx*yy)
newmom[5] = numpy.sum(G*yy**2)
mom[0] = 2*newmom[0]
err = newmom[1:]/newmom[0]; err[-3:] -= mom[-3:]/2.
mom[1:] += err*con
return(numpy.array([mom[0], mom[1]/ov, mom[2]/ov, (mom[3]+mom[5])/ov**2, (mom[3]-mom[5])/(mom[3]+mom[5]), 2*mom[4]/(mom[3]+mom[5])]))
# returns chi^2
# var = read noise variance
def chi2_postage_stamp(obs, theory, var):
obs2 = numpy.maximum(obs+var, 1e-24)
return(numpy.sum(theory+var-obs2-obs2*numpy.log((theory+var)/obs2))*2)
|
normal
|
{
"blob_id": "2ab6488276c74da8c3d9097d298fc53d1caf74b1",
"index": 6243,
"step-1": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-2": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\n<mask token>\n\n\ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n parOn = False\n if hasattr(offsets, 'par'):\n parOn = True\n ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]\n wt_L = 0.5 - pos[0] / sca.size\n wt_R = 0.5 + pos[0] / sca.size\n wt_B = 0.5 - pos[1] / sca.size\n wt_T = 0.5 + pos[1] / sca.size\n psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[\n 2, :] + wt_T * wt_R * ZR[3, :]\n xf = sca.x[scanum - 1] + pos[0]\n yf = sca.y[scanum - 1] + pos[1]\n if parOn:\n psi[3] += offsets.par[offset_index.foc]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[\n offset_index.focg2] * yf) / sca.scale\n scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D\n if filt == 'K':\n filter = Filter('STH', [1.95, 2.3])\n elif filt == 'F':\n filter = Filter('interp', FilterData[:, (0, 7)])\n elif filt == 'H':\n filter = Filter('interp', FilterData[:, (0, 6)])\n elif filt == 'W':\n filter = Filter('interp', FilterData[:, (0, 5)])\n elif filt == 'J':\n filter = Filter('interp', FilterData[:, (0, 4)])\n elif filt == 'Y':\n filter = Filter('interp', FilterData[:, (0, 3)])\n elif filt == 'Z':\n filter = Filter('interp', FilterData[:, (0, 2)])\n elif filt == 'R':\n filter = Filter('interp', FilterData[:, (0, 1)])\n else:\n print('Error: unknown filter')\n exit()\n la = numpy.linspace(0.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101):\n fla[i] = filter.Tlambda(la[i])\n scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)\n mask = EmptyClass()\n mask.N = 1\n imk = 0\n while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /\n 2 ** (imk + 1)):\n imk += 1\n if filt == 'F' or filt == 'K':\n mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]\n else:\n mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]\n if hasattr(addInfo, 'ctr'):\n d = 0.5 * (1 - 1 / ovsamp)\n psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n Cxx = Cyy = 0.09\n Cxy = 0.0\n if parOn:\n Cxx = 0.09 + offsets.par[offset_index.jxx]\n Cxy = offsets.par[offset_index.jxy]\n Cyy = 0.09 + offsets.par[offset_index.jyy]\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep, Nstep))\n ky = numpy.zeros((Nstep, Nstep))\n for i in range(-Nstep // 2, Nstep // 2):\n kx[:, i] = abs(i)\n ky[i, :] = abs(i)\n kx *= 2.0 * numpy.pi * ovsamp / Nstep\n ky *= 2.0 * numpy.pi * ovsamp / Nstep\n output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky ** \n 2 / 2.0 - Cxy * kx * ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n return output\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-3": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef test_zernike():\n for k in range(36):\n psi = numpy.zeros(36)\n psi[k] = 1\n N = 5\n M = zernike_map_noll(psi, N, N / (N - 1))\n print(' *** Zernike {:2d} ***'.format(k + 1))\n for j in range(N):\n out = ''\n for i in range(N):\n out = out + ' {:10.5f}'.format(M[j, i])\n print(out)\n print('')\n\n\n<mask token>\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\n<mask token>\n\n\ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n parOn = False\n if hasattr(offsets, 'par'):\n parOn = True\n ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]\n wt_L = 0.5 - pos[0] / sca.size\n wt_R = 0.5 + pos[0] / sca.size\n wt_B = 0.5 - pos[1] / sca.size\n wt_T = 0.5 + pos[1] / sca.size\n psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[\n 2, :] + wt_T * wt_R * ZR[3, :]\n xf = sca.x[scanum - 1] + pos[0]\n yf = sca.y[scanum - 1] + pos[1]\n if parOn:\n psi[3] += offsets.par[offset_index.foc]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[\n offset_index.focg2] * yf) / sca.scale\n scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D\n if filt == 'K':\n filter = Filter('STH', [1.95, 2.3])\n elif filt == 'F':\n filter = Filter('interp', FilterData[:, (0, 7)])\n elif filt == 'H':\n filter = Filter('interp', FilterData[:, (0, 6)])\n elif filt == 'W':\n filter = Filter('interp', FilterData[:, (0, 5)])\n elif filt == 'J':\n filter = Filter('interp', FilterData[:, (0, 4)])\n elif filt == 'Y':\n filter = Filter('interp', FilterData[:, (0, 3)])\n elif filt == 'Z':\n filter = Filter('interp', FilterData[:, (0, 2)])\n elif filt == 'R':\n filter = Filter('interp', FilterData[:, (0, 1)])\n else:\n print('Error: unknown filter')\n exit()\n la = numpy.linspace(0.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101):\n fla[i] = filter.Tlambda(la[i])\n scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)\n mask = EmptyClass()\n mask.N = 1\n imk = 0\n while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /\n 2 ** (imk + 1)):\n imk += 1\n if filt == 'F' or filt == 'K':\n mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]\n else:\n mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]\n if hasattr(addInfo, 'ctr'):\n d = 0.5 * (1 - 1 / ovsamp)\n psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n Cxx = Cyy = 0.09\n Cxy = 0.0\n if parOn:\n Cxx = 0.09 + offsets.par[offset_index.jxx]\n Cxy = offsets.par[offset_index.jxy]\n Cyy = 0.09 + offsets.par[offset_index.jyy]\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep, Nstep))\n ky = numpy.zeros((Nstep, Nstep))\n for i in range(-Nstep // 2, Nstep // 2):\n kx[:, i] = abs(i)\n ky[i, :] = abs(i)\n kx *= 2.0 * numpy.pi * ovsamp / Nstep\n ky *= 2.0 * numpy.pi * ovsamp / Nstep\n output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky ** \n 2 / 2.0 - Cxy * kx * ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n return output\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-4": "<mask token>\n\n\ndef RectBivariateSplineZero(y1, x1, map1, kx=1, ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1) + 2)\n y2[1:-1] = y1\n y2[0] = 2 * y2[1] - y2[2]\n y2[-1] = 2 * y2[-2] - y2[-3]\n x2 = numpy.zeros(numpy.size(x1) + 2)\n x2[1:-1] = x1\n x2[0] = 2 * x2[1] - x2[2]\n x2[-1] = 2 * x2[-2] - x2[-3]\n map2 = numpy.zeros((numpy.size(y1) + 2, numpy.size(x1) + 2))\n map2[1:-1, 1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\n\nclass EmptyClass:\n pass\n\n\nclass SpectralEnergyDistribution:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Nlambda(self, lambda_):\n if self.type == 'BB':\n T = self.info[0]\n x = 14387.769 / lambda_ / T\n return (2 / lambda_ ** 4 * 299792458000000.0 * 1000000000000.0 *\n numpy.exp(-x) / (1.0 - numpy.exp(-x)) * self.info[1])\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n\nclass Filter:\n\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n def Tlambda(self, lambda_):\n if self.type == 'STH':\n lmin = self.info[0]\n dlmin = lmin * 0.02\n lmax = self.info[1]\n dlmax = lmax * 0.02\n return (numpy.tanh((lambda_ - lmin) / dlmin) - numpy.tanh((\n lambda_ - lmax) / dlmax)) / 2.0\n elif self.type == 'interp':\n return numpy.interp(lambda_, self.info[:, 0], self.info[:, 1])\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n\n<mask token>\n\n\ndef make_mask_annulus(obs, Nstep, scale):\n xx = numpy.tile(numpy.linspace(-1 + 1 / Nstep, 1 - 1 / Nstep, Nstep), (\n Nstep, 1))\n yy = numpy.copy(xx.T)\n rho = numpy.sqrt(xx ** 2 + yy ** 2) * scale\n return numpy.where(numpy.logical_and(rho >= obs, rho < 1), numpy.ones((\n Nstep, Nstep)), numpy.zeros((Nstep, Nstep)))\n\n\ndef test_zernike():\n for k in range(36):\n psi = numpy.zeros(36)\n psi[k] = 1\n N = 5\n M = zernike_map_noll(psi, N, N / (N - 1))\n print(' *** Zernike {:2d} ***'.format(k + 1))\n for j in range(N):\n out = ''\n for i in range(N):\n out = out + ' {:10.5f}'.format(M[j, i])\n print(out)\n print('')\n\n\ndef mono_psf(psi, mask, scale, Nstep):\n if hasattr(mask, 'N'):\n if hasattr(mask, 'spline'):\n interp_spline = mask.spline\n else:\n N_in = 2048\n x_in = numpy.linspace(-1 + 1 / N_in, 1 - 1 / N_in, N_in)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array,\n kx=1, ky=1)\n x2 = numpy.linspace(-1 + 1 / Nstep, 1 - 1 / Nstep, Nstep) * scale\n y2 = numpy.copy(x2)\n amplitude = interp_spline(y2, x2).astype(numpy.complex128\n ) * make_mask_annulus(0, Nstep, scale)\n else:\n amplitude = make_mask_annulus(0.32, Nstep, scale).astype(numpy.\n complex128)\n amplitude *= numpy.exp(2.0j * numpy.pi * zernike_map_noll(psi, Nstep,\n scale))\n amplitude = numpy.fft.ifft2(amplitude)\n power = numpy.abs(amplitude) ** 2\n newpower = numpy.zeros_like(power)\n newpower[Nstep // 2:Nstep, Nstep // 2:Nstep] = power[0:Nstep // 2, 0:\n Nstep // 2]\n newpower[Nstep // 2:Nstep, 0:Nstep // 2] = power[0:Nstep // 2, Nstep //\n 2:Nstep]\n newpower[0:Nstep // 2, Nstep // 2:Nstep] = power[Nstep // 2:Nstep, 0:\n Nstep // 2]\n newpower[0:Nstep // 2, 0:Nstep // 2] = power[Nstep // 2:Nstep, Nstep //\n 2:Nstep]\n return newpower / numpy.sum(newpower)\n\n\ndef onescut(n):\n array = numpy.ones(n + 1)\n array[0] = array[-1] = 0.5\n return array / n\n\n\ndef gq_weights(sed, filter, nOrder, wlrange):\n lmin = wlrange[0]\n lmax = wlrange[1]\n npts = wlrange[2]\n x = numpy.linspace(lmin, lmax, npts)\n c = numpy.zeros(npts)\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones(npts)\n I = numpy.zeros(2 * nOrder)\n lctr = numpy.mean(x)\n for k in range(2 * nOrder):\n I[k] = numpy.sum(o * (x - lctr) ** k * c)\n coef = numpy.zeros(nOrder + 1)\n coef[0] = 1.0\n A = numpy.zeros((nOrder, nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k, j] = I[j + k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i + 1:])), r=True\n )\n wroot[i] = numpy.sum(px.c[::-1] * I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot, wroot\n\n\ndef poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):\n hard_lmin = 0.4\n hard_lmax = 2.5\n hard_Nl = 420\n ilmin = hard_Nl - 1\n ilmax = 0\n for il in range(1, hard_Nl):\n wl = hard_lmin + il / hard_Nl * (hard_lmax - hard_lmin)\n if filter.Tlambda(wl) > 0.0001:\n if il < ilmin:\n ilmin = il\n wlmin = wl\n if il > ilmax:\n ilmax = il\n wlmax = wl\n na = ilmin // 6 + 1\n nb = (hard_Nl - ilmax) // 6 + 1\n wl = numpy.concatenate((numpy.linspace(hard_lmin, wlmin, na + 1), numpy\n .linspace(wlmin, wlmax, ilmax - ilmin + 1), numpy.linspace(wlmax,\n hard_lmax, nb + 1)))\n dwl = numpy.concatenate(((wlmin - hard_lmin) * onescut(na), (wlmax -\n wlmin) * onescut(ilmax - ilmin), (hard_lmax - wlmax) * onescut(nb)))\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n wl, dwl = gq_weights(sed, filter, 10, [wlmin, wlmax, ilmax -\n ilmin + 1])\n sumc = 0.0\n output = numpy.zeros((Nstep, Nstep))\n for i in range(numpy.size(wl)):\n c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n c = dwl[i]\n this_psi = numpy.copy(psi) / wl[i]\n sumc += c\n output += c * mono_psf(this_psi, mask, scale_1um * wl[i], Nstep)\n output /= sumc\n return output\n\n\ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n parOn = False\n if hasattr(offsets, 'par'):\n parOn = True\n ZR = ZernRef.data[4 * (scanum - 1):4 * scanum, :]\n wt_L = 0.5 - pos[0] / sca.size\n wt_R = 0.5 + pos[0] / sca.size\n wt_B = 0.5 - pos[1] / sca.size\n wt_T = 0.5 + pos[1] / sca.size\n psi = wt_T * wt_L * ZR[0, :] + wt_B * wt_L * ZR[1, :] + wt_B * wt_R * ZR[\n 2, :] + wt_T * wt_R * ZR[3, :]\n xf = sca.x[scanum - 1] + pos[0]\n yf = sca.y[scanum - 1] + pos[1]\n if parOn:\n psi[3] += offsets.par[offset_index.foc]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n psi[3] += (offsets.par[offset_index.focg1] * xf + offsets.par[\n offset_index.focg2] * yf) / sca.scale\n scale_1um = ovsamp / (0.11 * numpy.pi / 648000) / maskfiles.D\n if filt == 'K':\n filter = Filter('STH', [1.95, 2.3])\n elif filt == 'F':\n filter = Filter('interp', FilterData[:, (0, 7)])\n elif filt == 'H':\n filter = Filter('interp', FilterData[:, (0, 6)])\n elif filt == 'W':\n filter = Filter('interp', FilterData[:, (0, 5)])\n elif filt == 'J':\n filter = Filter('interp', FilterData[:, (0, 4)])\n elif filt == 'Y':\n filter = Filter('interp', FilterData[:, (0, 3)])\n elif filt == 'Z':\n filter = Filter('interp', FilterData[:, (0, 2)])\n elif filt == 'R':\n filter = Filter('interp', FilterData[:, (0, 1)])\n else:\n print('Error: unknown filter')\n exit()\n la = numpy.linspace(0.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101):\n fla[i] = filter.Tlambda(la[i])\n scale = scale_1um * numpy.sum(la * fla) / numpy.sum(fla)\n mask = EmptyClass()\n mask.N = 1\n imk = 0\n while (imk < maskfiles.n_lores - 1 and Nstep / scale < maskfiles.N_in /\n 2 ** (imk + 1)):\n imk += 1\n if filt == 'F' or filt == 'K':\n mask.spline = maskfiles.i_full[scanum - 1 + maskfiles.nSCA * imk]\n else:\n mask.spline = maskfiles.i_rim[scanum - 1 + maskfiles.nSCA * imk]\n if hasattr(addInfo, 'ctr'):\n d = 0.5 * (1 - 1 / ovsamp)\n psi[1:3] -= (addInfo.ctr + d) * ovsamp / scale_1um / 4.0\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n Cxx = Cyy = 0.09\n Cxy = 0.0\n if parOn:\n Cxx = 0.09 + offsets.par[offset_index.jxx]\n Cxy = offsets.par[offset_index.jxy]\n Cyy = 0.09 + offsets.par[offset_index.jyy]\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep, Nstep))\n ky = numpy.zeros((Nstep, Nstep))\n for i in range(-Nstep // 2, Nstep // 2):\n kx[:, i] = abs(i)\n ky[i, :] = abs(i)\n kx *= 2.0 * numpy.pi * ovsamp / Nstep\n ky *= 2.0 * numpy.pi * ovsamp / Nstep\n output_fft = output_fft * numpy.exp(-Cxx * kx ** 2 / 2.0 - Cyy * ky ** \n 2 / 2.0 - Cxy * kx * ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n return output\n\n\n<mask token>\n\n\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo\n ) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n out[j, i] += numpy.sum(bigStamp[y:y + ov, x:x + ov])\n if hasattr(addInfo, 'vtpe'):\n out[j, i] += addInfo.vtpe * numpy.sum(bigStamp[y + ov:y + 2 *\n ov, x:x + ov])\n if hasattr(addInfo, 'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n for k in range(25):\n dy = k % 5 - 2\n dx = k // 5 - 2\n out[k, j, i] += numpy.sum(bigStamp[y + dy:y + dy + ov, \n x + dx:x + dx + ov])\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo, 'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n ah = 0\n if hasattr(addInfo, 'bfe_a'):\n ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n ah += addInfo.bfe_aplus\n for i in range(psSize - 1):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * ah * (out[j, i + 1] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * ah * (addInfo.stamp_in[j, i + 1] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y:y + ov, x + ov - 1:x + ov + 1]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j, i + 1] -= shift * mflux\n av = 0\n if hasattr(addInfo, 'bfe_a'):\n av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'):\n av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N // 2 + (i - psSize // 2) * ov\n for j in range(psSize - 1):\n y = N // 2 + (j - psSize // 2) * ov\n shift = ov * av * (out[j + 1, i] - out[j, i]) / 2.0\n if hasattr(addInfo, 'bfe_overwrite'):\n shift = ov * av * (addInfo.stamp_in[j + 1, i] - addInfo\n .stamp_in[j, i]) / 2.0\n mflux = numpy.sum(bigStamp[y + ov - 1:y + ov + 1, x:x + ov]\n ) / 2.0\n dout[j, i] += shift * mflux\n dout[j + 1, i] -= shift * mflux\n out += dout\n if hasattr(addInfo, 'bfe_overwrite'):\n out = dout\n return out\n\n\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD\n ov = OV_STD\n if hasattr(addInfo, 'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo, 'FastMode'):\n if addInfo.FastMode:\n N = N // 2\n addInfoX = copy.deepcopy(addInfo)\n addInfoX.ctr = numpy.zeros(2)\n addInfoX.F = 1.0\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov, ov)), mode='full', method\n ='direct') / ov ** 2\n Np = N + ov - 1\n mom = numpy.asarray([1, 0, 0, 4 * ov ** 2, 0, 4 * ov ** 2]).astype(numpy\n .float64)\n newmom = numpy.zeros_like(mom)\n con = 0.5\n xx1 = numpy.tile(numpy.linspace(-(Np - 1) / 2.0, (Np - 1) / 2.0, Np), (\n Np, 1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3] * mom[5] - mom[4] ** 2\n xx = xx1 - mom[1]\n yy = yy1 - mom[2]\n G = numpy.exp((-mom[5] * xx ** 2 + 2 * mom[4] * xx * yy - mom[3] * \n yy ** 2) / 2.0 / det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G * xx)\n newmom[2] = numpy.sum(G * yy)\n newmom[3] = numpy.sum(G * xx ** 2)\n newmom[4] = numpy.sum(G * xx * yy)\n newmom[5] = numpy.sum(G * yy ** 2)\n mom[0] = 2 * newmom[0]\n err = newmom[1:] / newmom[0]\n err[-3:] -= mom[-3:] / 2.0\n mom[1:] += err * con\n return numpy.array([mom[0], mom[1] / ov, mom[2] / ov, (mom[3] + mom[5]) /\n ov ** 2, (mom[3] - mom[5]) / (mom[3] + mom[5]), 2 * mom[4] / (mom[3\n ] + mom[5])])\n\n\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs + var, 1e-24)\n return numpy.sum(theory + var - obs2 - obs2 * numpy.log((theory + var) /\n obs2)) * 2\n",
"step-5": "import numpy\nimport numpy.fft\nimport numpy.linalg\nimport copy\nfrom astropy.io import fits\nfrom scipy.interpolate import RectBivariateSpline\nfrom scipy.signal import convolve\nimport offset_index\n\n# some basic definitions\npsSize = 9 # psSize x psSize postage stamps of stars\n\n# zero padded RectBivariateSpline, if on\ndef RectBivariateSplineZero(y1,x1,map1,kx=1,ky=1):\n return RectBivariateSpline(y1, x1, map1, kx=kx, ky=ky)\n y2 = numpy.zeros(numpy.size(y1)+2)\n y2[1:-1] = y1\n y2[0] = 2*y2[1]-y2[2]\n y2[-1] = 2*y2[-2]-y2[-3]\n x2 = numpy.zeros(numpy.size(x1)+2)\n x2[1:-1] = x1\n x2[0] = 2*x2[1]-x2[2]\n x2[-1] = 2*x2[-2]-x2[-3]\n map2 = numpy.zeros((numpy.size(y1)+2, numpy.size(x1)+2))\n map2[1:-1,1:-1] = map1\n return RectBivariateSpline(y2, x2, map2, kx=kx, ky=ky)\n\nclass EmptyClass():\n pass\n\n# spectral energy distribution class\nclass SpectralEnergyDistribution():\n\n # make an SED -- several options for type\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n # get Nlambda (photons/m^2/s/um) at lambda_ (um)\n def Nlambda(self, lambda_):\n\n # blackbody, info = [T (K), solidangle]\n if self.type=='BB':\n T = self.info[0]\n x = 14387.769/lambda_/T # hc/(kTlambda)\n return(2/lambda_**4*2.99792458e14*1e12*numpy.exp(-x)/(1.-numpy.exp(-x))*self.info[1])\n # the 1e12 is the conversion from um^2 -> m^2\n else:\n print('ERROR: Invalid SED type')\n exit()\n\n# filter class\nclass Filter():\n\n # make a filter -- several options for type\n def __init__(self, type, info):\n self.type = type\n self.info = copy.deepcopy(info)\n\n # get transmission\n def Tlambda(self, lambda_):\n\n # smoothed tophat\n if self.type=='STH':\n lmin = self.info[0]; dlmin = lmin*.02\n lmax = self.info[1]; dlmax = lmax*.02\n return((numpy.tanh((lambda_-lmin)/dlmin)-numpy.tanh((lambda_-lmax)/dlmax))/2.)\n # interpolated file\n # info shape (N,2) -- info[:,0] = wavelength, info[:,1] = throughput\n elif self.type=='interp':\n return(numpy.interp(lambda_, self.info[:,0], self.info[:,1]))\n else:\n print('ERROR: Invalid filter type')\n exit()\n\n# load mask files\nmaskfiles = EmptyClass()\nmaskfiles.D = 2292981.05344 # um\nmaskfiles.rim = []\nmaskfiles.full = []\nmaskfiles.i_rim = []\nmaskfiles.i_full = []\nmaskfiles.nSCA = 18\nfor k in range(18):\n inFile = fits.open('pupils/SCA{:d}_rim_mask.fits'.format(k+1))\n maskfiles.rim += [numpy.copy(inFile[0].data[::-1,:])]\n inFile.close()\n inFile = fits.open('pupils/SCA{:d}_full_mask.fits'.format(k+1))\n maskfiles.full += [numpy.copy(inFile[0].data[::-1,:])]\n inFile.close()\n\n # normalize\n maskfiles.rim[k] /= numpy.amax(maskfiles.rim[k])\n maskfiles.full[k] /= numpy.amax(maskfiles.full[k])\n\n N_in = maskfiles.N_in = 2048\n x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.rim[k], kx=1, ky=1)\n maskfiles.i_rim += [interp_spline]\n interp_spline = RectBivariateSplineZero(y_in, x_in, maskfiles.full[k], kx=1, ky=1)\n maskfiles.i_full += [interp_spline]\n\n # lower resolution masks\n maskfiles.n_lores = 7\n for ku in range(1,maskfiles.n_lores):\n N2 = N_in//2**ku\n x_in = numpy.linspace(-1+1/N2,1-1/N2,N2)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.rim[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)\n maskfiles.i_rim += [interp_spline]\n interp_spline = RectBivariateSplineZero(y_in, x_in, numpy.mean(maskfiles.full[k].reshape(N2,2**ku,N2,2**ku), axis=(1,3)), kx=1, ky=1)\n maskfiles.i_full += [interp_spline]\n\n# SCA locations\nsca = EmptyClass()\nsca.size = 40.88 # mm\nsca.x = numpy.asarray([-22.14, -22.29, -22.44, -66.42, -66.92, -67.42, -110.70, -111.48, -112.64,\n 22.14, 22.29, 22.44, 66.42, 66.92, 67.42, 110.70, 111.48, 112.64])\nsca.y = numpy.asarray([12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06,\n 12.15, -37.03, -82.06, 20.90, -28.28, -73.06, 42.20, 13.46, -51.06])\nsca.scale = 133.08\n\n# reference Zernikes\nZernRef = EmptyClass()\nZernRef.data = numpy.loadtxt('pupils/zernike_ref.txt')[:,-22:] * 1.38\n\n# filter data\nFilterData = numpy.loadtxt('pupils/filter.dat')\nFilterData[:,1:] /= numpy.pi/4.*(maskfiles.D/1e6)**2\n\n# makes map of Zernikes of a given amplitude\n# amp[0:Namp] = Z1 ... ZNamp\n# on a spacing Ngrid (x, y = -(1-1/Ngrid) .. +(1-1/Ngrid) multiplied by scale)\n#\ndef zernike_map_noll(amp, Ngrid, scale):\n xx = numpy.tile(numpy.linspace(-1+1/Ngrid,1-1/Ngrid,Ngrid), (Ngrid,1))\n yy = numpy.copy(xx.T)\n rho = numpy.sqrt(xx**2+yy**2)*scale\n phi = numpy.arctan2(yy,xx)\n output = numpy.zeros((Ngrid,Ngrid))\n nmax = 0\n namp = numpy.size(amp)\n while namp>(nmax+1)*(nmax+2)//2: nmax+=1\n rpows = numpy.ones((nmax+1,Ngrid,Ngrid))\n trigphi = numpy.ones((2*nmax+1,Ngrid,Ngrid))\n for i in range(1,nmax+1): rpows[i,:,:] = rho**i\n for i in range(0,nmax+1): trigphi[i,:,:] = numpy.cos(i*phi)\n for i in range(1,nmax+1): trigphi[-i,:,:] = numpy.sin(i*phi)\n # loop over Zernikes\n for n in range(nmax+1):\n for m in range(-n,n+1,2):\n Z = numpy.zeros((Ngrid,Ngrid))\n for k in range((n-abs(m))//2+1):\n coef = (-1)**k * numpy.math.factorial(n-k)/numpy.math.factorial(k) \\\n /numpy.math.factorial((n-m)//2-k)/numpy.math.factorial((n+m)//2-k) \n Z += coef * rpows[n-2*k,:,:]\n #if m>=0:\n # Z *= numpy.cos(m*phi)\n #else:\n # Z *= numpy.sin(-m*phi)\n Z *= trigphi[m,:,:]\n j = n*(n+1)//2 + abs(m)\n if (-1)**j*(m+.5)<0 or m==0: j += 1\n #print(n,m,j)\n factor = numpy.sqrt(n+1)\n if m!=0: factor *= numpy.sqrt(2)\n if j<=namp: output += factor * amp[j-1] * Z\n return(output)\n\n# make annular mask of given obstruction (fraction) and scale\ndef make_mask_annulus(obs, Nstep, scale):\n xx = numpy.tile(numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep), (Nstep,1))\n yy = numpy.copy(xx.T)\n rho = numpy.sqrt(xx**2+yy**2)*scale\n return(numpy.where(numpy.logical_and(rho>=obs,rho<1),numpy.ones((Nstep,Nstep)),numpy.zeros((Nstep,Nstep))))\n\ndef test_zernike():\n for k in range(36):\n psi = numpy.zeros(36)\n psi[k] = 1\n N=5\n M = zernike_map_noll(psi, N, N/(N-1))\n print(' *** Zernike {:2d} ***'.format(k+1))\n for j in range(N):\n out = ''\n for i in range(N):\n out = out + ' {:10.5f}'.format(M[j,i])\n print(out)\n print('')\n\n# psi is a vector of Zernikes, in wavelengths\n# mask information: (currently none)\n# scale = sampling (points per lambda/D)\n# Nstep = # grid points\n# output normalized to sum to 1\ndef mono_psf(psi, mask, scale, Nstep):\n if hasattr(mask, 'N'):\n if hasattr(mask, 'spline'):\n interp_spline = mask.spline\n else:\n N_in = 2048\n x_in = numpy.linspace(-1+1/N_in,1-1/N_in,N_in)\n y_in = numpy.copy(x_in)\n interp_spline = RectBivariateSplineZero(y_in, x_in, mask.array, kx=1, ky=1)\n x2 = numpy.linspace(-1+1/Nstep,1-1/Nstep,Nstep)*scale\n y2 = numpy.copy(x2)\n amplitude = interp_spline(y2,x2).astype(numpy.complex128) * make_mask_annulus(0, Nstep, scale)\n else:\n amplitude = make_mask_annulus(.32, Nstep, scale).astype(numpy.complex128)\n amplitude *= numpy.exp(2j * numpy.pi * zernike_map_noll(psi, Nstep, scale))\n amplitude = numpy.fft.ifft2(amplitude)\n power = numpy.abs(amplitude)**2\n # shift to center\n newpower = numpy.zeros_like(power)\n newpower[Nstep//2:Nstep,Nstep//2:Nstep] = power[0:Nstep//2,0:Nstep//2]\n newpower[Nstep//2:Nstep,0:Nstep//2] = power[0:Nstep//2,Nstep//2:Nstep]\n newpower[0:Nstep//2,Nstep//2:Nstep] = power[Nstep//2:Nstep,0:Nstep//2]\n newpower[0:Nstep//2,0:Nstep//2] = power[Nstep//2:Nstep,Nstep//2:Nstep]\n return(newpower/numpy.sum(newpower))\n\n# helper function\ndef onescut(n):\n array = numpy.ones((n+1))\n array[0] = array[-1] = .5\n return(array/n)\n\n# Gaussian quadrature weights across a filter\n# sed = spectral energy distribution\n# filter = filter information (incl. bandpass)\n# nOrder = order of polynomial (number of nodes)\n# wlrange = [lmin,lmax,npts] in um\n#\n# returns wavelengths, weights\ndef gq_weights(sed, filter, nOrder, wlrange):\n # unpack info\n lmin = wlrange[0]; lmax = wlrange[1]; npts = wlrange[2]\n\n # build integrals I_k = int x^k S(x) F(x) dx\n x = numpy.linspace(lmin,lmax,npts)\n c = numpy.zeros((npts))\n for i in range(npts):\n c[i] = sed.Nlambda(x[i]) * filter.Tlambda(x[i])\n o = numpy.ones((npts))\n I = numpy.zeros((2*nOrder))\n lctr = numpy.mean(x)\n for k in range(2*nOrder):\n I[k] = numpy.sum(o*(x-lctr)**k*c)\n # orthogonal polynomial p_n\n # require sum_{j=0}^n coef_{n-j} I_{j+k} = 0 or\n # sum_{j=0}^{n-1} coef_{n-j} I_{j+k} = -I_{n+k} for k = 0 .. n-1\n coef = numpy.zeros((nOrder+1))\n coef[0] = 1.\n A = numpy.zeros((nOrder,nOrder))\n for k in range(nOrder):\n for j in range(nOrder):\n A[k,j] = I[j+k]\n coef[1:] = numpy.linalg.solve(A, -I[nOrder:])[::-1]\n p = numpy.poly1d(coef)\n xroot = numpy.sort(numpy.real(p.r))\n wroot = numpy.zeros_like(xroot)\n pprime = numpy.polyder(p)\n for i in range(nOrder):\n px = numpy.poly1d(numpy.concatenate((xroot[:i], xroot[i+1:])), r=True)\n wroot[i] = numpy.sum(px.c[::-1]*I[:nOrder]) / pprime(xroot[i])\n xroot = xroot + lctr\n return xroot,wroot\n\n# psi is a vector of Zernikes, in microns\n# mask information: (currently none)\n# sed = spectral energy distribution\n# scale = sampling (points per lambda/D @ 1 um)\n# Nstep = # grid points\n# filter = filter information (incl. bandpass)\n# addInfo = class for general additional information\n# output normalized to sum to 1\ndef poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo):\n\n # integration steps\n hard_lmin = 0.4\n hard_lmax = 2.5\n hard_Nl = 420\n\n ilmin = hard_Nl-1; ilmax = 0\n for il in range(1,hard_Nl):\n wl = hard_lmin + il/hard_Nl*(hard_lmax-hard_lmin)\n if filter.Tlambda(wl)>1e-4:\n if il<ilmin:\n ilmin=il\n wlmin=wl\n if il>ilmax:\n ilmax=il\n wlmax=wl\n na = ilmin//6 + 1\n nb = (hard_Nl-ilmax)//6 + 1\n wl = numpy.concatenate((numpy.linspace(hard_lmin,wlmin,na+1), numpy.linspace(wlmin,wlmax,ilmax-ilmin+1), numpy.linspace(wlmax,hard_lmax,nb+1)))\n dwl = numpy.concatenate(((wlmin-hard_lmin)*onescut(na), (wlmax-wlmin)*onescut(ilmax-ilmin), (hard_lmax-wlmax)*onescut(nb)))\n #print(wl,dwl,numpy.size(wl),numpy.size(dwl))\n\n # reduced coverage\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode:\n wl, dwl = gq_weights(sed, filter, 10, [wlmin,wlmax,ilmax-ilmin+1])\n\n # make output PSF\n sumc = 0.\n output = numpy.zeros((Nstep,Nstep))\n for i in range(numpy.size(wl)):\n c = sed.Nlambda(wl[i]) * filter.Tlambda(wl[i]) * dwl[i]\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode: c = dwl[i]\n this_psi = numpy.copy(psi)/wl[i] # convert from um -> wavelengths of wavefront\n sumc += c\n output += c * mono_psf(this_psi, mask, scale_1um*wl[i], Nstep)\n #print('{:6.4f} {:11.5E}'.format(wl[i],filter.Tlambda(wl[i])))\n output /= sumc\n\n return(output)\n\n# make oversampled PSF at given SCA, position\n#\n# sed = source SED\n# filt = filter (letter: RZYJHFK)\n# ovsamp = oversampling factor\n# Nstep = number of samples in each axis\n# scanum = SCA number (1..18)\n# pos = (x,y) position on SCA in mm (0,0)=center\n# offsets = adjustment parameters\n# .par -> offset parameters\n# addInfo = additional information class:\n# .ctr -> centroid (dx,dy) \ndef oversamp_psf(sed, filt, ovsamp, Nstep, scanum, pos, offsets, addInfo):\n\n # get information\n parOn = False\n if hasattr(offsets, 'par'): parOn = True\n\n # get Zernikes in microns\n ZR = ZernRef.data[4*(scanum-1):4*scanum,:]\n wt_L = .5 - pos[0]/sca.size\n wt_R = .5 + pos[0]/sca.size\n wt_B = .5 - pos[1]/sca.size\n wt_T = .5 + pos[1]/sca.size\n psi = wt_T*wt_L*ZR[0,:] + wt_B*wt_L*ZR[1,:] + wt_B*wt_R*ZR[2,:] + wt_T*wt_R*ZR[3,:]\n\n xf = sca.x[scanum-1] + pos[0]\n yf = sca.y[scanum-1] + pos[1]\n\n # Zernike offsets\n if parOn:\n psi[3] += offsets.par[offset_index.foc ]\n psi[4] += offsets.par[offset_index.astig2]\n psi[5] += offsets.par[offset_index.astig1]\n\n psi[6] += offsets.par[offset_index.coma2]\n psi[7] += offsets.par[offset_index.coma1]\n\n psi[3] += (offsets.par[offset_index.focg1]*xf + offsets.par[offset_index.focg2]*yf)/sca.scale\n\n scale_1um = ovsamp / (.11*numpy.pi/648000) / maskfiles.D\n #print(scale_1um)\n\n # filter curves\n if filt=='K':\n filter = Filter('STH', [1.95,2.30])\n elif filt=='F':\n filter = Filter('interp', FilterData[:,(0,7)])\n elif filt=='H':\n filter = Filter('interp', FilterData[:,(0,6)])\n elif filt=='W':\n filter = Filter('interp', FilterData[:,(0,5)])\n elif filt=='J':\n filter = Filter('interp', FilterData[:,(0,4)])\n elif filt=='Y':\n filter = Filter('interp', FilterData[:,(0,3)])\n elif filt=='Z':\n filter = Filter('interp', FilterData[:,(0,2)])\n elif filt=='R':\n filter = Filter('interp', FilterData[:,(0,1)])\n else:\n print('Error: unknown filter')\n exit()\n\n la = numpy.linspace(.4, 2.5, 2101)\n fla = numpy.zeros(2101)\n for i in range(2101): fla[i] = filter.Tlambda(la[i])\n scale = scale_1um*numpy.sum(la*fla)/numpy.sum(fla)\n\n # get the mask\n mask = EmptyClass(); mask.N=1\n imk = 0\n while imk<maskfiles.n_lores-1 and Nstep/scale<maskfiles.N_in/2**(imk+1): imk+=1\n #print(' *** ', Nstep, scale, scale/scale_1um, imk)\n if filt=='F' or filt=='K':\n mask.spline = maskfiles.i_full[scanum-1 + maskfiles.nSCA*imk]\n else:\n mask.spline = maskfiles.i_rim[scanum-1 + maskfiles.nSCA*imk]\n\n # x & y offsets\n if hasattr(addInfo, 'ctr'):\n d = .5*(1-1/ovsamp)\n psi[1:3] -= (addInfo.ctr+d) * ovsamp / scale_1um / 4.\n\n output = poly_psf(psi, mask, sed, scale_1um, Nstep, filter, addInfo)\n\n # smooth\n Cxx = Cyy = .09; Cxy = 0.\n if parOn:\n Cxx = .09 + offsets.par[offset_index.jxx ]\n Cxy = offsets.par[offset_index.jxy ]\n Cyy = .09 + offsets.par[offset_index.jyy ]\n\n output_fft = numpy.fft.fft2(output)\n kx = numpy.zeros((Nstep,Nstep))\n ky = numpy.zeros((Nstep,Nstep))\n for i in range(-Nstep//2, Nstep//2):\n kx[:,i] = abs(i)\n ky[i,:] = abs(i)\n kx *= 2.*numpy.pi*ovsamp/Nstep\n ky *= 2.*numpy.pi*ovsamp/Nstep\n output_fft = output_fft * numpy.exp(-Cxx*kx**2/2. - Cyy*ky**2/2. - Cxy*kx*ky)\n output = numpy.real(numpy.fft.ifft2(output_fft))\n\n return(output)\n\n# parameters for next couple of functions\nN_STD = 1024 # must be a multiple of 4\nOV_STD = 8\n\n# make oversampled PSF at given SCA, position\n#\n# sed = source SED\n# filt = filter (letter: RZYJHFK)\n# scanum = SCA number (1..18)\n# pos = (x,y) position on SCA in mm (0,0)=center\n# offsets = adjustment parameters (placeholder)\n# addInfo = additional information class:\n# .F -> total counts (in e)\n# .ctr -> centroid (dx,dy)\n# .many -> @ 5x5 grid of offsets\n#\n# .bfe = add bfe (can include .bfe_a, .bfe_aplus)\n#\n# .bfe_overwrite => special mode to compute BFE with time dependent PSF\n# .stamp_in = input stamp (so compute BFE from stamp_in *acting on* this PSF)\ndef postage_stamp(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD # must be even\n ov = OV_STD\n if hasattr(addInfo,'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode:\n N = N//2\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfo) * addInfo.F\n out = numpy.zeros((psSize, psSize))\n for i in range(psSize):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize):\n y = N//2+(j-psSize//2)*ov\n out[j,i] += numpy.sum(bigStamp[y:y+ov,x:x+ov])\n if hasattr(addInfo, 'vtpe'):\n out[j,i] += addInfo.vtpe * numpy.sum(bigStamp[y+ov:y+2*ov,x:x+ov])\n if hasattr(addInfo,'many'):\n out = numpy.zeros((25, psSize, psSize))\n for i in range(psSize):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize):\n y = N//2+(j-psSize//2)*ov\n for k in range(25):\n dy = k%5 - 2; dx = k//5 - 2\n out[k,j,i] += numpy.sum(bigStamp[y+dy:y+dy+ov,x+dx:x+dx+ov])\n\n # BFE?\n if hasattr(addInfo, 'bfe'):\n if hasattr(addInfo,'many'):\n print('Error -- cannot do both bfe and many in postage_stamp')\n exit()\n dout = numpy.zeros_like(out)\n # horizontal BFE\n ah = 0\n if hasattr(addInfo, 'bfe_a'): ah += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'): ah += addInfo.bfe_aplus\n for i in range(psSize-1):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize):\n y = N//2+(j-psSize//2)*ov\n shift = ov * ah * (out[j,i+1]-out[j,i]) / 2. # in sub-pixels, average over exposure\n if hasattr(addInfo, 'bfe_overwrite'): shift = ov * ah * (addInfo.stamp_in[j,i+1]-addInfo.stamp_in[j,i]) / 2.\n mflux = numpy.sum(bigStamp[y:y+ov,x+ov-1:x+ov+1])/2.\n dout[j,i] += shift*mflux\n dout[j,i+1] -= shift*mflux\n # vertical BFE\n av = 0\n if hasattr(addInfo, 'bfe_a'): av += addInfo.bfe_a\n if hasattr(addInfo, 'bfe_aplus'): av -= addInfo.bfe_aplus\n for i in range(psSize):\n x = N//2+(i-psSize//2)*ov\n for j in range(psSize-1):\n y = N//2+(j-psSize//2)*ov\n shift = ov * av * (out[j+1,i]-out[j,i]) / 2. # in sub-pixels, average over exposure\n if hasattr(addInfo, 'bfe_overwrite'): shift = ov * av * (addInfo.stamp_in[j+1,i]-addInfo.stamp_in[j,i]) / 2.\n mflux = numpy.sum(bigStamp[y+ov-1:y+ov+1,x:x+ov])/2.\n dout[j,i] += shift*mflux\n dout[j+1,i] -= shift*mflux\n out+=dout\n\n if hasattr(addInfo, 'bfe_overwrite'): out=dout\n\n return(out)\n\n#\n# same input format but returns moments of the PSF\n# A, xc, yc, T, e1, e2\ndef psfmoments(sed, filt, scanum, pos, offsets, addInfo):\n N = N_STD # must be even\n ov = OV_STD\n if hasattr(addInfo,'many'):\n ov = addInfo.force_ov\n if hasattr(addInfo,'FastMode'):\n if addInfo.FastMode:\n N = N//2\n addInfoX = copy.deepcopy(addInfo); addInfoX.ctr = numpy.zeros((2)); addInfoX.F = 1.\n bigStamp = oversamp_psf(sed, filt, ov, N, scanum, pos, offsets, addInfoX)\n bigStamp = convolve(bigStamp, numpy.ones((ov,ov)), mode='full', method='direct')/ov**2\n Np = N+ov-1\n # moment format: A,x,y,Cxx,Cxy,Cyy\n mom = numpy.asarray([1,0,0,4*ov**2,0,4*ov**2]).astype(numpy.float64)\n newmom = numpy.zeros_like(mom)\n con = .5 # convergence factor\n xx1 = numpy.tile(numpy.linspace(-(Np-1)/2., (Np-1)/2., Np), (Np,1))\n yy1 = numpy.copy(xx1.T)\n for iter in range(256):\n det = mom[3]*mom[5]-mom[4]**2\n xx = xx1-mom[1]\n yy = yy1-mom[2]\n G = numpy.exp((-mom[5]*xx**2 + 2*mom[4]*xx*yy - mom[3]*yy**2)/2./det) * bigStamp\n newmom[0] = numpy.sum(G)\n newmom[1] = numpy.sum(G*xx)\n newmom[2] = numpy.sum(G*yy)\n newmom[3] = numpy.sum(G*xx**2)\n newmom[4] = numpy.sum(G*xx*yy)\n newmom[5] = numpy.sum(G*yy**2)\n mom[0] = 2*newmom[0]\n err = newmom[1:]/newmom[0]; err[-3:] -= mom[-3:]/2.\n mom[1:] += err*con\n return(numpy.array([mom[0], mom[1]/ov, mom[2]/ov, (mom[3]+mom[5])/ov**2, (mom[3]-mom[5])/(mom[3]+mom[5]), 2*mom[4]/(mom[3]+mom[5])]))\n\n# returns chi^2\n# var = read noise variance\ndef chi2_postage_stamp(obs, theory, var):\n obs2 = numpy.maximum(obs+var, 1e-24)\n return(numpy.sum(theory+var-obs2-obs2*numpy.log((theory+var)/obs2))*2)\n",
"step-ids": [
13,
14,
15,
18,
23
]
}
|
[
13,
14,
15,
18,
23
] |
import torch
import torch.nn as nn
import torch.nn.functional as F
class Net(torch.nn.Module):
def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device=None):
super(Net, self).__init__()
self.device = device
if dropout_prob is not None and dropout_prob > 0.5:
print("Are you sure dropout_prob is supposed to be greater than 0.5?")
# Load Roberta
self.roberta = torch.hub.load(
"pytorch/fairseq", "roberta.base", pretrained=True
)
for param in self.roberta.parameters():
param.requires_grad = False
self.roberta.eval()
# Load ResNet
resnet_full = torch.hub.load(
"pytorch/vision:v0.6.0", "resnet18", pretrained=True
)
self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])
# for param in self.resnet.parameters():
# param.requires_grad = False
# self.resnet.eval()
# self.lstm = nn.LSTM(input_size=768, hidden_size=768 * 2)
# self.lstm.eval()
# Layers
self.bns = nn.ModuleList()
self.fcs = nn.ModuleList()
self.drops = None if dropout_prob is None else nn.ModuleList()
prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2
for i, size in enumerate(layer_sizes):
self.bns.append(nn.BatchNorm1d(prev_size))
self.fcs.append(nn.Linear(prev_size, size))
if dropout_prob is not None:
self.drops.append(nn.Dropout(p=dropout_prob))
prev_size = size
def forward(self, inputs):
first_images = inputs["image1"].to(self.device)
first_text = inputs["text1"]
first_length = inputs["length1"].to(self.device)
first_categories = inputs["categories1"].to(self.device)
first_days_posted = inputs["days_posted1"].to(self.device)
second_images = inputs["image2"].to(self.device)
second_text = inputs["text2"]
second_length = inputs["length2"].to(self.device)
second_categories = inputs["categories2"].to(self.device)
second_days_posted = inputs["days_posted2"].to(self.device)
# Resnet
image_tensor_one = self.resnet.forward(first_images)
image_tensor_two = self.resnet.forward(second_images)
# Roberta
text_features1 = torch.Tensor()
text_features2 = torch.Tensor()
text_features1 = text_features1.to(self.device)
text_features2 = text_features2.to(self.device)
for text in first_text:
first_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(first_tokens)
feature_means = torch.mean(features, dim=1)
# features = torch.reshape(features, (-1, 1,768))
# output, (hn, cn) = self.lstm(features)
# cn = torch.reshape(cn, (1, 768 * 2))
text_features1 = torch.cat([text_features1, feature_means])
for text in second_text:
second_tokens = self.roberta.encode(text)[:512]
features = self.roberta.extract_features(second_tokens)
# print("DIMENSION OF FEATURES ", features.shape)
feature_means = torch.mean(features, dim=1)
# features = torch.reshape(features, (-1, 1,768))
# output, (hn, cn) = self.lstm(features)
# cn = torch.reshape(cn, (1, 768 * 2))
# print("DIMENSION OF FEATURES ", features.shape)
text_features2 = torch.cat([text_features2, feature_means])
# Concatenated tensor
concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)
concat_tensor = torch.squeeze(concat_tensor)
concat_tensor = torch.cat((text_features1, text_features2, concat_tensor), 1)
additional_features = torch.cat(
[
torch.reshape(first_length, (-1, 1)),
torch.reshape(second_length, (-1, 1)),
torch.reshape(first_days_posted, (-1, 1)),
torch.reshape(second_days_posted, (-1, 1)),
],
dim=1,
)
concat_tensor = torch.cat(
[
concat_tensor,
additional_features.float(),
first_categories.float(),
second_categories.float(),
],
dim=1,
)
x = concat_tensor
zipped_layers = (
zip(self.bns, self.fcs, [None] * len(self.bns))
if self.drops is None
else zip(self.bns, self.fcs, self.drops)
)
for i, (bn, fc, drop) in enumerate(zipped_layers):
x = bn(x)
if drop is not None:
x = drop(x)
if i == len(self.bns) - 1:
x = fc(x)
else:
x = F.relu(fc(x))
return x
|
normal
|
{
"blob_id": "4711adcc7c95993ec13b9d06fa674aa064f79bfd",
"index": 314,
"step-1": "<mask token>\n\n\nclass Net(torch.nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device\n =None):\n super(Net, self).__init__()\n self.device = device\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\n 'Are you sure dropout_prob is supposed to be greater than 0.5?'\n )\n self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',\n pretrained=True)\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',\n pretrained=True)\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device\n =None):\n super(Net, self).__init__()\n self.device = device\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\n 'Are you sure dropout_prob is supposed to be greater than 0.5?'\n )\n self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',\n pretrained=True)\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',\n pretrained=True)\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n\n def forward(self, inputs):\n first_images = inputs['image1'].to(self.device)\n first_text = inputs['text1']\n first_length = inputs['length1'].to(self.device)\n first_categories = inputs['categories1'].to(self.device)\n first_days_posted = inputs['days_posted1'].to(self.device)\n second_images = inputs['image2'].to(self.device)\n second_text = inputs['text2']\n second_length = inputs['length2'].to(self.device)\n second_categories = inputs['categories2'].to(self.device)\n second_days_posted = inputs['days_posted2'].to(self.device)\n image_tensor_one = self.resnet.forward(first_images)\n image_tensor_two = self.resnet.forward(second_images)\n text_features1 = torch.Tensor()\n text_features2 = torch.Tensor()\n text_features1 = text_features1.to(self.device)\n text_features2 = text_features2.to(self.device)\n for text in first_text:\n first_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(first_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features1 = torch.cat([text_features1, feature_means])\n for text in second_text:\n second_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(second_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features2 = torch.cat([text_features2, feature_means])\n concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)\n concat_tensor = torch.squeeze(concat_tensor)\n concat_tensor = torch.cat((text_features1, text_features2,\n concat_tensor), 1)\n additional_features = torch.cat([torch.reshape(first_length, (-1, 1\n )), torch.reshape(second_length, (-1, 1)), torch.reshape(\n first_days_posted, (-1, 1)), torch.reshape(second_days_posted,\n (-1, 1))], dim=1)\n concat_tensor = torch.cat([concat_tensor, additional_features.float\n (), first_categories.float(), second_categories.float()], dim=1)\n x = concat_tensor\n zipped_layers = zip(self.bns, self.fcs, [None] * len(self.bns)\n ) if self.drops is None else zip(self.bns, self.fcs, self.drops)\n for i, (bn, fc, drop) in enumerate(zipped_layers):\n x = bn(x)\n if drop is not None:\n x = drop(x)\n if i == len(self.bns) - 1:\n x = fc(x)\n else:\n x = F.relu(fc(x))\n return x\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(torch.nn.Module):\n\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device\n =None):\n super(Net, self).__init__()\n self.device = device\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\n 'Are you sure dropout_prob is supposed to be greater than 0.5?'\n )\n self.roberta = torch.hub.load('pytorch/fairseq', 'roberta.base',\n pretrained=True)\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n resnet_full = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18',\n pretrained=True)\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n\n def forward(self, inputs):\n first_images = inputs['image1'].to(self.device)\n first_text = inputs['text1']\n first_length = inputs['length1'].to(self.device)\n first_categories = inputs['categories1'].to(self.device)\n first_days_posted = inputs['days_posted1'].to(self.device)\n second_images = inputs['image2'].to(self.device)\n second_text = inputs['text2']\n second_length = inputs['length2'].to(self.device)\n second_categories = inputs['categories2'].to(self.device)\n second_days_posted = inputs['days_posted2'].to(self.device)\n image_tensor_one = self.resnet.forward(first_images)\n image_tensor_two = self.resnet.forward(second_images)\n text_features1 = torch.Tensor()\n text_features2 = torch.Tensor()\n text_features1 = text_features1.to(self.device)\n text_features2 = text_features2.to(self.device)\n for text in first_text:\n first_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(first_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features1 = torch.cat([text_features1, feature_means])\n for text in second_text:\n second_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(second_tokens)\n feature_means = torch.mean(features, dim=1)\n text_features2 = torch.cat([text_features2, feature_means])\n concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)\n concat_tensor = torch.squeeze(concat_tensor)\n concat_tensor = torch.cat((text_features1, text_features2,\n concat_tensor), 1)\n additional_features = torch.cat([torch.reshape(first_length, (-1, 1\n )), torch.reshape(second_length, (-1, 1)), torch.reshape(\n first_days_posted, (-1, 1)), torch.reshape(second_days_posted,\n (-1, 1))], dim=1)\n concat_tensor = torch.cat([concat_tensor, additional_features.float\n (), first_categories.float(), second_categories.float()], dim=1)\n x = concat_tensor\n zipped_layers = zip(self.bns, self.fcs, [None] * len(self.bns)\n ) if self.drops is None else zip(self.bns, self.fcs, self.drops)\n for i, (bn, fc, drop) in enumerate(zipped_layers):\n x = bn(x)\n if drop is not None:\n x = drop(x)\n if i == len(self.bns) - 1:\n x = fc(x)\n else:\n x = F.relu(fc(x))\n return x\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass Net(torch.nn.Module):\n def __init__(self, layer_sizes=[256, 128, 2], dropout_prob=None, device=None):\n super(Net, self).__init__()\n self.device = device\n\n if dropout_prob is not None and dropout_prob > 0.5:\n print(\"Are you sure dropout_prob is supposed to be greater than 0.5?\")\n\n # Load Roberta\n self.roberta = torch.hub.load(\n \"pytorch/fairseq\", \"roberta.base\", pretrained=True\n )\n for param in self.roberta.parameters():\n param.requires_grad = False\n self.roberta.eval()\n\n # Load ResNet\n resnet_full = torch.hub.load(\n \"pytorch/vision:v0.6.0\", \"resnet18\", pretrained=True\n )\n self.resnet = torch.nn.Sequential(*list(resnet_full.children())[:-1])\n # for param in self.resnet.parameters():\n # param.requires_grad = False\n # self.resnet.eval()\n\n # self.lstm = nn.LSTM(input_size=768, hidden_size=768 * 2)\n # self.lstm.eval()\n\n # Layers\n self.bns = nn.ModuleList()\n self.fcs = nn.ModuleList()\n self.drops = None if dropout_prob is None else nn.ModuleList()\n prev_size = 2 * 512 + 2 * 768 + 2 * 10 + 2 * 2\n for i, size in enumerate(layer_sizes):\n self.bns.append(nn.BatchNorm1d(prev_size))\n self.fcs.append(nn.Linear(prev_size, size))\n if dropout_prob is not None:\n self.drops.append(nn.Dropout(p=dropout_prob))\n prev_size = size\n\n def forward(self, inputs):\n first_images = inputs[\"image1\"].to(self.device)\n first_text = inputs[\"text1\"]\n first_length = inputs[\"length1\"].to(self.device)\n first_categories = inputs[\"categories1\"].to(self.device)\n first_days_posted = inputs[\"days_posted1\"].to(self.device)\n\n second_images = inputs[\"image2\"].to(self.device)\n second_text = inputs[\"text2\"]\n second_length = inputs[\"length2\"].to(self.device)\n second_categories = inputs[\"categories2\"].to(self.device)\n second_days_posted = inputs[\"days_posted2\"].to(self.device)\n\n # Resnet\n image_tensor_one = self.resnet.forward(first_images)\n image_tensor_two = self.resnet.forward(second_images)\n # Roberta\n text_features1 = torch.Tensor()\n text_features2 = torch.Tensor()\n text_features1 = text_features1.to(self.device)\n text_features2 = text_features2.to(self.device)\n for text in first_text:\n first_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(first_tokens)\n feature_means = torch.mean(features, dim=1)\n # features = torch.reshape(features, (-1, 1,768))\n # output, (hn, cn) = self.lstm(features)\n # cn = torch.reshape(cn, (1, 768 * 2))\n text_features1 = torch.cat([text_features1, feature_means])\n for text in second_text:\n second_tokens = self.roberta.encode(text)[:512]\n features = self.roberta.extract_features(second_tokens)\n # print(\"DIMENSION OF FEATURES \", features.shape)\n feature_means = torch.mean(features, dim=1)\n # features = torch.reshape(features, (-1, 1,768))\n # output, (hn, cn) = self.lstm(features)\n # cn = torch.reshape(cn, (1, 768 * 2))\n # print(\"DIMENSION OF FEATURES \", features.shape)\n text_features2 = torch.cat([text_features2, feature_means])\n\n # Concatenated tensor\n concat_tensor = torch.cat((image_tensor_one, image_tensor_two), 1)\n concat_tensor = torch.squeeze(concat_tensor)\n concat_tensor = torch.cat((text_features1, text_features2, concat_tensor), 1)\n additional_features = torch.cat(\n [\n torch.reshape(first_length, (-1, 1)),\n torch.reshape(second_length, (-1, 1)),\n torch.reshape(first_days_posted, (-1, 1)),\n torch.reshape(second_days_posted, (-1, 1)),\n ],\n dim=1,\n )\n concat_tensor = torch.cat(\n [\n concat_tensor,\n additional_features.float(),\n first_categories.float(),\n second_categories.float(),\n ],\n dim=1,\n )\n\n x = concat_tensor\n zipped_layers = (\n zip(self.bns, self.fcs, [None] * len(self.bns))\n if self.drops is None\n else zip(self.bns, self.fcs, self.drops)\n )\n for i, (bn, fc, drop) in enumerate(zipped_layers):\n x = bn(x)\n if drop is not None:\n x = drop(x)\n if i == len(self.bns) - 1:\n x = fc(x)\n else:\n x = F.relu(fc(x))\n\n return x\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from ShazamAPI import Shazam
import json
import sys
print("oi")
|
normal
|
{
"blob_id": "c248d653556ecdf27e56b57930832eb293dfd579",
"index": 5413,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('oi')\n",
"step-3": "from ShazamAPI import Shazam\nimport json\nimport sys\nprint('oi')\n",
"step-4": "from ShazamAPI import Shazam\nimport json\nimport sys\n\nprint(\"oi\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: UTF-8 -*-
# File name: ukWorkingDays
# Created by JKChang
# 29/07/2020, 11:20
# Tag:
# Description:
from datetime import date,timedelta,datetime
from workalendar.europe import UnitedKingdom
cal = UnitedKingdom()
print(cal.holidays(2020))
def workingDate(start,end):
cal = UnitedKingdom()
res = []
delta = end - start
for i in range(delta.days +1):
day = start + timedelta(days=i)
if cal.is_working_day(day) or day.weekday() < 5:
res.append(day)
else:
pass
return res
start = datetime.today()
end = datetime(2020, 12, 23)
r = workingDate(start,end)
for d in r:
print(d.strftime('%d-%B-%Y'))
print('\n'*3)
|
normal
|
{
"blob_id": "feed412278d9e711e49ef209ece0876c1de4a873",
"index": 886,
"step-1": "<mask token>\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\n<mask token>\n",
"step-2": "<mask token>\nprint(cal.holidays(2020))\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\n<mask token>\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n' * 3)\n",
"step-3": "<mask token>\ncal = UnitedKingdom()\nprint(cal.holidays(2020))\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\nstart = datetime.today()\nend = datetime(2020, 12, 23)\nr = workingDate(start, end)\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n' * 3)\n",
"step-4": "from datetime import date, timedelta, datetime\nfrom workalendar.europe import UnitedKingdom\ncal = UnitedKingdom()\nprint(cal.holidays(2020))\n\n\ndef workingDate(start, end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days + 1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\n\nstart = datetime.today()\nend = datetime(2020, 12, 23)\nr = workingDate(start, end)\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n' * 3)\n",
"step-5": "# -*- coding: UTF-8 -*-\n# File name: ukWorkingDays\n# Created by JKChang\n# 29/07/2020, 11:20\n# Tag:\n# Description:\n\nfrom datetime import date,timedelta,datetime\nfrom workalendar.europe import UnitedKingdom\n\ncal = UnitedKingdom()\nprint(cal.holidays(2020))\n\ndef workingDate(start,end):\n cal = UnitedKingdom()\n res = []\n delta = end - start\n for i in range(delta.days +1):\n day = start + timedelta(days=i)\n if cal.is_working_day(day) or day.weekday() < 5:\n res.append(day)\n else:\n pass\n return res\n\nstart = datetime.today()\nend = datetime(2020, 12, 23)\nr = workingDate(start,end)\nfor d in r:\n print(d.strftime('%d-%B-%Y'))\n print('\\n'*3)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
"""Google Scraper
Usage:
web_scraper.py <search> <pages> <processes>
web_scraper.py (-h | --help)
Arguments:
<search> String to be Searched
<pages> Number of pages
<processes> Number of parallel processes
Options:
-h, --help Show this screen.
"""
import re
from functools import partial
from multiprocessing import Pool
from time import time as timer
import requests
from bs4 import BeautifulSoup
from docopt import docopt
def get_urls(search_string, start):
temp = []
url = 'http://www.google.com/search'
payload = {'q': search_string, 'start': start}
my_headers = {'User-agent': 'Mozilla/11.0'}
r = requests.get(url, params=payload, headers=my_headers)
soup = BeautifulSoup(r.text, 'html.parser')
h3tags = soup.find_all('h3', class_='r')
for h3 in h3tags:
try:
temp.append(re.search('url\?q=(.+?)\&sa', h3.a['href']).group(1))
except:
continue
return temp
def main():
start = timer()
result = []
arguments = docopt(__doc__, version='MakMan Google Scrapper & Mass Exploiter')
search = arguments['<search>']
pages = arguments['<pages>']
processes = int(arguments['<processes>'])
####Changes for Multi-Processing####
make_request = partial(get_urls, search)
pagelist = [str(x * 10) for x in range(0, int(pages))]
with Pool(processes) as p:
tmp = p.map(make_request, pagelist)
for x in tmp:
result.extend(x)
####Changes for Multi-Processing####
result = list(set(result))
print(*result, sep='\n')
print('\nTotal URLs Scraped : %s ' % str(len(result)))
print('Script Execution Time : %s ' % (timer() - start,))
if __name__ == '__main__':
main()
# End
|
normal
|
{
"blob_id": "68dcac07bbdb4dde983939be98ece127d963c254",
"index": 3610,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_urls(search_string, start):\n temp = []\n url = 'http://www.google.com/search'\n payload = {'q': search_string, 'start': start}\n my_headers = {'User-agent': 'Mozilla/11.0'}\n r = requests.get(url, params=payload, headers=my_headers)\n soup = BeautifulSoup(r.text, 'html.parser')\n h3tags = soup.find_all('h3', class_='r')\n for h3 in h3tags:\n try:\n temp.append(re.search('url\\\\?q=(.+?)\\\\&sa', h3.a['href']).group(1))\n except:\n continue\n return temp\n\n\ndef main():\n start = timer()\n result = []\n arguments = docopt(__doc__, version=\n 'MakMan Google Scrapper & Mass Exploiter')\n search = arguments['<search>']\n pages = arguments['<pages>']\n processes = int(arguments['<processes>'])\n make_request = partial(get_urls, search)\n pagelist = [str(x * 10) for x in range(0, int(pages))]\n with Pool(processes) as p:\n tmp = p.map(make_request, pagelist)\n for x in tmp:\n result.extend(x)\n result = list(set(result))\n print(*result, sep='\\n')\n print('\\nTotal URLs Scraped : %s ' % str(len(result)))\n print('Script Execution Time : %s ' % (timer() - start,))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_urls(search_string, start):\n temp = []\n url = 'http://www.google.com/search'\n payload = {'q': search_string, 'start': start}\n my_headers = {'User-agent': 'Mozilla/11.0'}\n r = requests.get(url, params=payload, headers=my_headers)\n soup = BeautifulSoup(r.text, 'html.parser')\n h3tags = soup.find_all('h3', class_='r')\n for h3 in h3tags:\n try:\n temp.append(re.search('url\\\\?q=(.+?)\\\\&sa', h3.a['href']).group(1))\n except:\n continue\n return temp\n\n\ndef main():\n start = timer()\n result = []\n arguments = docopt(__doc__, version=\n 'MakMan Google Scrapper & Mass Exploiter')\n search = arguments['<search>']\n pages = arguments['<pages>']\n processes = int(arguments['<processes>'])\n make_request = partial(get_urls, search)\n pagelist = [str(x * 10) for x in range(0, int(pages))]\n with Pool(processes) as p:\n tmp = p.map(make_request, pagelist)\n for x in tmp:\n result.extend(x)\n result = list(set(result))\n print(*result, sep='\\n')\n print('\\nTotal URLs Scraped : %s ' % str(len(result)))\n print('Script Execution Time : %s ' % (timer() - start,))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport re\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom time import time as timer\nimport requests\nfrom bs4 import BeautifulSoup\nfrom docopt import docopt\n\n\ndef get_urls(search_string, start):\n temp = []\n url = 'http://www.google.com/search'\n payload = {'q': search_string, 'start': start}\n my_headers = {'User-agent': 'Mozilla/11.0'}\n r = requests.get(url, params=payload, headers=my_headers)\n soup = BeautifulSoup(r.text, 'html.parser')\n h3tags = soup.find_all('h3', class_='r')\n for h3 in h3tags:\n try:\n temp.append(re.search('url\\\\?q=(.+?)\\\\&sa', h3.a['href']).group(1))\n except:\n continue\n return temp\n\n\ndef main():\n start = timer()\n result = []\n arguments = docopt(__doc__, version=\n 'MakMan Google Scrapper & Mass Exploiter')\n search = arguments['<search>']\n pages = arguments['<pages>']\n processes = int(arguments['<processes>'])\n make_request = partial(get_urls, search)\n pagelist = [str(x * 10) for x in range(0, int(pages))]\n with Pool(processes) as p:\n tmp = p.map(make_request, pagelist)\n for x in tmp:\n result.extend(x)\n result = list(set(result))\n print(*result, sep='\\n')\n print('\\nTotal URLs Scraped : %s ' % str(len(result)))\n print('Script Execution Time : %s ' % (timer() - start,))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"Google Scraper\n \nUsage:\n web_scraper.py <search> <pages> <processes>\n web_scraper.py (-h | --help)\n \nArguments:\n <search> String to be Searched\n <pages> Number of pages\n <processes> Number of parallel processes\n \nOptions:\n -h, --help Show this screen.\n \n\"\"\"\n\nimport re\nfrom functools import partial\nfrom multiprocessing import Pool\nfrom time import time as timer\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom docopt import docopt\n\n\ndef get_urls(search_string, start):\n temp = []\n url = 'http://www.google.com/search'\n payload = {'q': search_string, 'start': start}\n my_headers = {'User-agent': 'Mozilla/11.0'}\n r = requests.get(url, params=payload, headers=my_headers)\n soup = BeautifulSoup(r.text, 'html.parser')\n h3tags = soup.find_all('h3', class_='r')\n for h3 in h3tags:\n try:\n temp.append(re.search('url\\?q=(.+?)\\&sa', h3.a['href']).group(1))\n except:\n continue\n return temp\n\n\ndef main():\n start = timer()\n result = []\n arguments = docopt(__doc__, version='MakMan Google Scrapper & Mass Exploiter')\n search = arguments['<search>']\n pages = arguments['<pages>']\n processes = int(arguments['<processes>'])\n ####Changes for Multi-Processing####\n make_request = partial(get_urls, search)\n pagelist = [str(x * 10) for x in range(0, int(pages))]\n with Pool(processes) as p:\n tmp = p.map(make_request, pagelist)\n for x in tmp:\n result.extend(x)\n ####Changes for Multi-Processing####\n result = list(set(result))\n print(*result, sep='\\n')\n print('\\nTotal URLs Scraped : %s ' % str(len(result)))\n print('Script Execution Time : %s ' % (timer() - start,))\n\n\nif __name__ == '__main__':\n main()\n\n # End\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
import time
import argparse
import utils
from data_loader import DataLoader
from generate_model_predictions import sacrebleu_metric, compute_bleu
import tensorflow as tf
import os
import json
from transformer import create_masks
# Since the target sequences are padded, it is important
# to apply a padding mask when calculating the loss.
def loss_function(real, pred, loss_object, pad_token_id):
"""Calculates total loss containing cross entropy with padding ignored.
Args:
real: Tensor of size [batch_size, length_logits, vocab_size]
pred: Tensor of size [batch_size, length_labels]
loss_object: Cross entropy loss
pad_token_id: Pad token id to ignore
Returns:
A scalar float tensor for loss.
"""
mask = tf.math.logical_not(tf.math.equal(real, pad_token_id))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_sum(loss_) / tf.reduce_sum(mask)
def train_step(model, loss_object, optimizer, inp, tar,
train_loss, train_accuracy, pad_token_id):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
with tf.GradientTape() as tape:
# training=True is only needed if there are layers with different
# behavior during training versus inference (e.g. Dropout).
predictions, _ = model(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions, loss_object, pad_token_id)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
train_loss(loss)
train_accuracy(tar_real, predictions)
def val_step(model, loss_object, inp, tar,
val_loss, val_accuracy, pad_token_id):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)
predictions, _ = model(inp, tar_inp,
False,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions, loss_object, pad_token_id)
val_loss(loss)
val_accuracy(tar_real, predictions)
def compute_bleu_score(transformer_model, dataset, user_config, tokenizer_tar, epoch):
inp_language = user_config["inp_language"]
target_language = user_config["target_language"]
checkpoint_path = user_config["transformer_checkpoint_path"]
val_aligned_path_tar = user_config["val_data_path_{}".format(target_language)]
pred_file_path = "../log/log_{}_{}/".format(inp_language, target_language) + checkpoint_path.split('/')[
-1] + "_epoch-" + str(epoch) + "_prediction_{}.txt".format(target_language)
sacrebleu_metric(transformer_model, pred_file_path, None,
tokenizer_tar, dataset,
tokenizer_tar.MAX_LENGTH)
print("-----------------------------")
compute_bleu(pred_file_path, val_aligned_path_tar, print_all_scores=False)
print("-----------------------------")
# append checkpoint and score to file name for easy reference
new_path = "../log/log_{}_{}/".format(inp_language, target_language) + checkpoint_path.split('/')[
-1] + "_epoch-" + str(epoch) + "_prediction_{}".format(target_language) + ".txt"
# append score and checkpoint name to file_name
os.rename(pred_file_path, new_path)
print("Saved translated prediction at {}".format(new_path))
def do_training(user_config):
inp_language = user_config["inp_language"]
target_language = user_config["target_language"]
print("\n****Training model from {} to {}****\n".format(inp_language, target_language))
print("****Loading tokenizers****")
# load pre-trained tokenizer
tokenizer_inp, tokenizer_tar = utils.load_tokenizers(inp_language, target_language, user_config)
print("****Loading train dataset****")
# train data loader
train_aligned_path_inp = user_config["train_data_path_{}".format(inp_language)]
train_aligned_path_tar = user_config["train_data_path_{}".format(target_language)]
train_dataloader = DataLoader(user_config["transformer_batch_size"],
train_aligned_path_inp,
train_aligned_path_tar,
tokenizer_inp,
tokenizer_tar,
inp_language,
target_language,
True)
train_dataset = train_dataloader.get_data_loader()
print("****Loading val dataset****")
# val data loader
val_aligned_path_inp = user_config["val_data_path_{}".format(inp_language)]
val_aligned_path_tar = user_config["val_data_path_{}".format(target_language)]
val_dataloader = DataLoader(user_config["transformer_batch_size"] * 2, # for fast validation increase batch size
val_aligned_path_inp,
val_aligned_path_tar,
tokenizer_inp,
tokenizer_tar,
inp_language,
target_language,
False)
val_dataset = val_dataloader.get_data_loader()
# define loss and accuracy metrics
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
val_loss = tf.keras.metrics.Mean(name='val_loss')
val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='val_accuracy')
print("****Loading transformer model****")
# load model and optimizer
transformer_model, optimizer, ckpt_manager = \
utils.load_transformer_model(user_config, tokenizer_inp, tokenizer_tar)
epochs = user_config["transformer_epochs"]
print("\nTraining model now...")
for epoch in range(epochs):
print()
start = time.time()
train_loss.reset_states()
train_accuracy.reset_states()
val_loss.reset_states()
val_accuracy.reset_states()
# inp -> english, tar -> french
for (batch, (inp, tar, _)) in enumerate(train_dataset):
train_step(transformer_model, loss_object, optimizer, inp, tar,
train_loss, train_accuracy, pad_token_id=tokenizer_tar.pad_token_id)
if batch % 50 == 0:
print('Train: Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(
epoch + 1, batch, train_loss.result(), train_accuracy.result()))
if (batch + 1) % 2200 == 0:
# inp -> english, tar -> french
for (_, (inp, tar, _)) in enumerate(val_dataset):
val_step(transformer_model, loss_object, inp, tar,
val_loss, val_accuracy, pad_token_id=tokenizer_tar.pad_token_id)
print('Batch {}: Val Loss: {:.4f}, Val Accuracy: {:.4f}\n'.format(batch, val_loss.result(),
val_accuracy.result()))
if user_config["compute_bleu"]:
print("\nComputing BLEU at batch {}: ".format(batch))
compute_bleu_score(transformer_model, val_dataset, user_config, tokenizer_tar, batch * epoch + 1)
print("After {} epochs".format(epoch + 1))
print('Train Loss: {:.4f}, Train Accuracy: {:.4f}'.format(train_loss.result(), train_accuracy.result()))
# inp -> english, tar -> french
for (batch, (inp, tar, _)) in enumerate(val_dataset):
val_step(transformer_model, loss_object, inp, tar,
val_loss, val_accuracy, pad_token_id=tokenizer_tar.pad_token_id)
print('Val Loss: {:.4f}, Val Accuracy: {:.4f}'.format(val_loss.result(), val_accuracy.result()))
print('Time taken for training epoch {}: {} secs'.format(epoch + 1, time.time() - start))
# evaluate and save model every x-epochs
ckpt_save_path = ckpt_manager.save()
print('Saving checkpoint after epoch {} at {}'.format(epoch + 1, ckpt_save_path))
if user_config["compute_bleu"]:
print("\nComputing BLEU at epoch {}: ".format(epoch + 1))
compute_bleu_score(transformer_model, val_dataset, user_config, tokenizer_tar, epoch + 1)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--config", help="Configuration file containing training parameters", type=str)
args = parser.parse_args()
user_config = utils.load_file(args.config)
seed = user_config["random_seed"]
utils.set_seed(seed)
print(json.dumps(user_config, indent=2))
do_training(user_config)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "7613dde4f49044fbca13acad2dd75587ef68f477",
"index": 2903,
"step-1": "<mask token>\n\n\ndef loss_function(real, pred, loss_object, pad_token_id):\n \"\"\"Calculates total loss containing cross entropy with padding ignored.\n Args:\n real: Tensor of size [batch_size, length_logits, vocab_size]\n pred: Tensor of size [batch_size, length_labels]\n loss_object: Cross entropy loss\n pad_token_id: Pad token id to ignore\n Returns:\n A scalar float tensor for loss.\n \"\"\"\n mask = tf.math.logical_not(tf.math.equal(real, pad_token_id))\n loss_ = loss_object(real, pred)\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n return tf.reduce_sum(loss_) / tf.reduce_sum(mask)\n\n\ndef train_step(model, loss_object, optimizer, inp, tar, train_loss,\n train_accuracy, pad_token_id):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp,\n tar_inp)\n with tf.GradientTape() as tape:\n predictions, _ = model(inp, tar_inp, True, enc_padding_mask,\n combined_mask, dec_padding_mask)\n loss = loss_function(tar_real, predictions, loss_object, pad_token_id)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n train_loss(loss)\n train_accuracy(tar_real, predictions)\n\n\ndef val_step(model, loss_object, inp, tar, val_loss, val_accuracy, pad_token_id\n ):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp,\n tar_inp)\n predictions, _ = model(inp, tar_inp, False, enc_padding_mask,\n combined_mask, dec_padding_mask)\n loss = loss_function(tar_real, predictions, loss_object, pad_token_id)\n val_loss(loss)\n val_accuracy(tar_real, predictions)\n\n\n<mask token>\n\n\ndef do_training(user_config):\n inp_language = user_config['inp_language']\n target_language = user_config['target_language']\n print('\\n****Training model from {} to {}****\\n'.format(inp_language,\n target_language))\n print('****Loading tokenizers****')\n tokenizer_inp, tokenizer_tar = utils.load_tokenizers(inp_language,\n target_language, user_config)\n print('****Loading train dataset****')\n train_aligned_path_inp = user_config['train_data_path_{}'.format(\n inp_language)]\n train_aligned_path_tar = user_config['train_data_path_{}'.format(\n target_language)]\n train_dataloader = DataLoader(user_config['transformer_batch_size'],\n train_aligned_path_inp, train_aligned_path_tar, tokenizer_inp,\n tokenizer_tar, inp_language, target_language, True)\n train_dataset = train_dataloader.get_data_loader()\n print('****Loading val dataset****')\n val_aligned_path_inp = user_config['val_data_path_{}'.format(inp_language)]\n val_aligned_path_tar = user_config['val_data_path_{}'.format(\n target_language)]\n val_dataloader = DataLoader(user_config['transformer_batch_size'] * 2,\n val_aligned_path_inp, val_aligned_path_tar, tokenizer_inp,\n tokenizer_tar, inp_language, target_language, False)\n val_dataset = val_dataloader.get_data_loader()\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits\n =True, reduction='none')\n train_loss = tf.keras.metrics.Mean(name='train_loss')\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name=\n 'train_accuracy')\n val_loss = tf.keras.metrics.Mean(name='val_loss')\n val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name=\n 'val_accuracy')\n print('****Loading transformer model****')\n transformer_model, optimizer, ckpt_manager = utils.load_transformer_model(\n user_config, tokenizer_inp, tokenizer_tar)\n epochs = user_config['transformer_epochs']\n print('\\nTraining model now...')\n for epoch in range(epochs):\n print()\n start = time.time()\n train_loss.reset_states()\n train_accuracy.reset_states()\n val_loss.reset_states()\n val_accuracy.reset_states()\n for batch, (inp, tar, _) in enumerate(train_dataset):\n train_step(transformer_model, loss_object, optimizer, inp, tar,\n train_loss, train_accuracy, pad_token_id=tokenizer_tar.\n pad_token_id)\n if batch % 50 == 0:\n print('Train: Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'\n .format(epoch + 1, batch, train_loss.result(),\n train_accuracy.result()))\n if (batch + 1) % 2200 == 0:\n for _, (inp, tar, _) in enumerate(val_dataset):\n val_step(transformer_model, loss_object, inp, tar,\n val_loss, val_accuracy, pad_token_id=tokenizer_tar.\n pad_token_id)\n print('Batch {}: Val Loss: {:.4f}, Val Accuracy: {:.4f}\\n'.\n format(batch, val_loss.result(), val_accuracy.result()))\n if user_config['compute_bleu']:\n print('\\nComputing BLEU at batch {}: '.format(batch))\n compute_bleu_score(transformer_model, val_dataset,\n user_config, tokenizer_tar, batch * epoch + 1)\n print('After {} epochs'.format(epoch + 1))\n print('Train Loss: {:.4f}, Train Accuracy: {:.4f}'.format(\n train_loss.result(), train_accuracy.result()))\n for batch, (inp, tar, _) in enumerate(val_dataset):\n val_step(transformer_model, loss_object, inp, tar, val_loss,\n val_accuracy, pad_token_id=tokenizer_tar.pad_token_id)\n print('Val Loss: {:.4f}, Val Accuracy: {:.4f}'.format(val_loss.\n result(), val_accuracy.result()))\n print('Time taken for training epoch {}: {} secs'.format(epoch + 1,\n time.time() - start))\n ckpt_save_path = ckpt_manager.save()\n print('Saving checkpoint after epoch {} at {}'.format(epoch + 1,\n ckpt_save_path))\n if user_config['compute_bleu']:\n print('\\nComputing BLEU at epoch {}: '.format(epoch + 1))\n compute_bleu_score(transformer_model, val_dataset, user_config,\n tokenizer_tar, epoch + 1)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', help=\n 'Configuration file containing training parameters', type=str)\n args = parser.parse_args()\n user_config = utils.load_file(args.config)\n seed = user_config['random_seed']\n utils.set_seed(seed)\n print(json.dumps(user_config, indent=2))\n do_training(user_config)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef loss_function(real, pred, loss_object, pad_token_id):\n \"\"\"Calculates total loss containing cross entropy with padding ignored.\n Args:\n real: Tensor of size [batch_size, length_logits, vocab_size]\n pred: Tensor of size [batch_size, length_labels]\n loss_object: Cross entropy loss\n pad_token_id: Pad token id to ignore\n Returns:\n A scalar float tensor for loss.\n \"\"\"\n mask = tf.math.logical_not(tf.math.equal(real, pad_token_id))\n loss_ = loss_object(real, pred)\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n return tf.reduce_sum(loss_) / tf.reduce_sum(mask)\n\n\ndef train_step(model, loss_object, optimizer, inp, tar, train_loss,\n train_accuracy, pad_token_id):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp,\n tar_inp)\n with tf.GradientTape() as tape:\n predictions, _ = model(inp, tar_inp, True, enc_padding_mask,\n combined_mask, dec_padding_mask)\n loss = loss_function(tar_real, predictions, loss_object, pad_token_id)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n train_loss(loss)\n train_accuracy(tar_real, predictions)\n\n\ndef val_step(model, loss_object, inp, tar, val_loss, val_accuracy, pad_token_id\n ):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp,\n tar_inp)\n predictions, _ = model(inp, tar_inp, False, enc_padding_mask,\n combined_mask, dec_padding_mask)\n loss = loss_function(tar_real, predictions, loss_object, pad_token_id)\n val_loss(loss)\n val_accuracy(tar_real, predictions)\n\n\ndef compute_bleu_score(transformer_model, dataset, user_config,\n tokenizer_tar, epoch):\n inp_language = user_config['inp_language']\n target_language = user_config['target_language']\n checkpoint_path = user_config['transformer_checkpoint_path']\n val_aligned_path_tar = user_config['val_data_path_{}'.format(\n target_language)]\n pred_file_path = '../log/log_{}_{}/'.format(inp_language, target_language\n ) + checkpoint_path.split('/')[-1] + '_epoch-' + str(epoch\n ) + '_prediction_{}.txt'.format(target_language)\n sacrebleu_metric(transformer_model, pred_file_path, None, tokenizer_tar,\n dataset, tokenizer_tar.MAX_LENGTH)\n print('-----------------------------')\n compute_bleu(pred_file_path, val_aligned_path_tar, print_all_scores=False)\n print('-----------------------------')\n new_path = '../log/log_{}_{}/'.format(inp_language, target_language\n ) + checkpoint_path.split('/')[-1] + '_epoch-' + str(epoch\n ) + '_prediction_{}'.format(target_language) + '.txt'\n os.rename(pred_file_path, new_path)\n print('Saved translated prediction at {}'.format(new_path))\n\n\ndef do_training(user_config):\n inp_language = user_config['inp_language']\n target_language = user_config['target_language']\n print('\\n****Training model from {} to {}****\\n'.format(inp_language,\n target_language))\n print('****Loading tokenizers****')\n tokenizer_inp, tokenizer_tar = utils.load_tokenizers(inp_language,\n target_language, user_config)\n print('****Loading train dataset****')\n train_aligned_path_inp = user_config['train_data_path_{}'.format(\n inp_language)]\n train_aligned_path_tar = user_config['train_data_path_{}'.format(\n target_language)]\n train_dataloader = DataLoader(user_config['transformer_batch_size'],\n train_aligned_path_inp, train_aligned_path_tar, tokenizer_inp,\n tokenizer_tar, inp_language, target_language, True)\n train_dataset = train_dataloader.get_data_loader()\n print('****Loading val dataset****')\n val_aligned_path_inp = user_config['val_data_path_{}'.format(inp_language)]\n val_aligned_path_tar = user_config['val_data_path_{}'.format(\n target_language)]\n val_dataloader = DataLoader(user_config['transformer_batch_size'] * 2,\n val_aligned_path_inp, val_aligned_path_tar, tokenizer_inp,\n tokenizer_tar, inp_language, target_language, False)\n val_dataset = val_dataloader.get_data_loader()\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits\n =True, reduction='none')\n train_loss = tf.keras.metrics.Mean(name='train_loss')\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name=\n 'train_accuracy')\n val_loss = tf.keras.metrics.Mean(name='val_loss')\n val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name=\n 'val_accuracy')\n print('****Loading transformer model****')\n transformer_model, optimizer, ckpt_manager = utils.load_transformer_model(\n user_config, tokenizer_inp, tokenizer_tar)\n epochs = user_config['transformer_epochs']\n print('\\nTraining model now...')\n for epoch in range(epochs):\n print()\n start = time.time()\n train_loss.reset_states()\n train_accuracy.reset_states()\n val_loss.reset_states()\n val_accuracy.reset_states()\n for batch, (inp, tar, _) in enumerate(train_dataset):\n train_step(transformer_model, loss_object, optimizer, inp, tar,\n train_loss, train_accuracy, pad_token_id=tokenizer_tar.\n pad_token_id)\n if batch % 50 == 0:\n print('Train: Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'\n .format(epoch + 1, batch, train_loss.result(),\n train_accuracy.result()))\n if (batch + 1) % 2200 == 0:\n for _, (inp, tar, _) in enumerate(val_dataset):\n val_step(transformer_model, loss_object, inp, tar,\n val_loss, val_accuracy, pad_token_id=tokenizer_tar.\n pad_token_id)\n print('Batch {}: Val Loss: {:.4f}, Val Accuracy: {:.4f}\\n'.\n format(batch, val_loss.result(), val_accuracy.result()))\n if user_config['compute_bleu']:\n print('\\nComputing BLEU at batch {}: '.format(batch))\n compute_bleu_score(transformer_model, val_dataset,\n user_config, tokenizer_tar, batch * epoch + 1)\n print('After {} epochs'.format(epoch + 1))\n print('Train Loss: {:.4f}, Train Accuracy: {:.4f}'.format(\n train_loss.result(), train_accuracy.result()))\n for batch, (inp, tar, _) in enumerate(val_dataset):\n val_step(transformer_model, loss_object, inp, tar, val_loss,\n val_accuracy, pad_token_id=tokenizer_tar.pad_token_id)\n print('Val Loss: {:.4f}, Val Accuracy: {:.4f}'.format(val_loss.\n result(), val_accuracy.result()))\n print('Time taken for training epoch {}: {} secs'.format(epoch + 1,\n time.time() - start))\n ckpt_save_path = ckpt_manager.save()\n print('Saving checkpoint after epoch {} at {}'.format(epoch + 1,\n ckpt_save_path))\n if user_config['compute_bleu']:\n print('\\nComputing BLEU at epoch {}: '.format(epoch + 1))\n compute_bleu_score(transformer_model, val_dataset, user_config,\n tokenizer_tar, epoch + 1)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', help=\n 'Configuration file containing training parameters', type=str)\n args = parser.parse_args()\n user_config = utils.load_file(args.config)\n seed = user_config['random_seed']\n utils.set_seed(seed)\n print(json.dumps(user_config, indent=2))\n do_training(user_config)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef loss_function(real, pred, loss_object, pad_token_id):\n \"\"\"Calculates total loss containing cross entropy with padding ignored.\n Args:\n real: Tensor of size [batch_size, length_logits, vocab_size]\n pred: Tensor of size [batch_size, length_labels]\n loss_object: Cross entropy loss\n pad_token_id: Pad token id to ignore\n Returns:\n A scalar float tensor for loss.\n \"\"\"\n mask = tf.math.logical_not(tf.math.equal(real, pad_token_id))\n loss_ = loss_object(real, pred)\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n return tf.reduce_sum(loss_) / tf.reduce_sum(mask)\n\n\ndef train_step(model, loss_object, optimizer, inp, tar, train_loss,\n train_accuracy, pad_token_id):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp,\n tar_inp)\n with tf.GradientTape() as tape:\n predictions, _ = model(inp, tar_inp, True, enc_padding_mask,\n combined_mask, dec_padding_mask)\n loss = loss_function(tar_real, predictions, loss_object, pad_token_id)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n train_loss(loss)\n train_accuracy(tar_real, predictions)\n\n\ndef val_step(model, loss_object, inp, tar, val_loss, val_accuracy, pad_token_id\n ):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp,\n tar_inp)\n predictions, _ = model(inp, tar_inp, False, enc_padding_mask,\n combined_mask, dec_padding_mask)\n loss = loss_function(tar_real, predictions, loss_object, pad_token_id)\n val_loss(loss)\n val_accuracy(tar_real, predictions)\n\n\ndef compute_bleu_score(transformer_model, dataset, user_config,\n tokenizer_tar, epoch):\n inp_language = user_config['inp_language']\n target_language = user_config['target_language']\n checkpoint_path = user_config['transformer_checkpoint_path']\n val_aligned_path_tar = user_config['val_data_path_{}'.format(\n target_language)]\n pred_file_path = '../log/log_{}_{}/'.format(inp_language, target_language\n ) + checkpoint_path.split('/')[-1] + '_epoch-' + str(epoch\n ) + '_prediction_{}.txt'.format(target_language)\n sacrebleu_metric(transformer_model, pred_file_path, None, tokenizer_tar,\n dataset, tokenizer_tar.MAX_LENGTH)\n print('-----------------------------')\n compute_bleu(pred_file_path, val_aligned_path_tar, print_all_scores=False)\n print('-----------------------------')\n new_path = '../log/log_{}_{}/'.format(inp_language, target_language\n ) + checkpoint_path.split('/')[-1] + '_epoch-' + str(epoch\n ) + '_prediction_{}'.format(target_language) + '.txt'\n os.rename(pred_file_path, new_path)\n print('Saved translated prediction at {}'.format(new_path))\n\n\ndef do_training(user_config):\n inp_language = user_config['inp_language']\n target_language = user_config['target_language']\n print('\\n****Training model from {} to {}****\\n'.format(inp_language,\n target_language))\n print('****Loading tokenizers****')\n tokenizer_inp, tokenizer_tar = utils.load_tokenizers(inp_language,\n target_language, user_config)\n print('****Loading train dataset****')\n train_aligned_path_inp = user_config['train_data_path_{}'.format(\n inp_language)]\n train_aligned_path_tar = user_config['train_data_path_{}'.format(\n target_language)]\n train_dataloader = DataLoader(user_config['transformer_batch_size'],\n train_aligned_path_inp, train_aligned_path_tar, tokenizer_inp,\n tokenizer_tar, inp_language, target_language, True)\n train_dataset = train_dataloader.get_data_loader()\n print('****Loading val dataset****')\n val_aligned_path_inp = user_config['val_data_path_{}'.format(inp_language)]\n val_aligned_path_tar = user_config['val_data_path_{}'.format(\n target_language)]\n val_dataloader = DataLoader(user_config['transformer_batch_size'] * 2,\n val_aligned_path_inp, val_aligned_path_tar, tokenizer_inp,\n tokenizer_tar, inp_language, target_language, False)\n val_dataset = val_dataloader.get_data_loader()\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits\n =True, reduction='none')\n train_loss = tf.keras.metrics.Mean(name='train_loss')\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name=\n 'train_accuracy')\n val_loss = tf.keras.metrics.Mean(name='val_loss')\n val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name=\n 'val_accuracy')\n print('****Loading transformer model****')\n transformer_model, optimizer, ckpt_manager = utils.load_transformer_model(\n user_config, tokenizer_inp, tokenizer_tar)\n epochs = user_config['transformer_epochs']\n print('\\nTraining model now...')\n for epoch in range(epochs):\n print()\n start = time.time()\n train_loss.reset_states()\n train_accuracy.reset_states()\n val_loss.reset_states()\n val_accuracy.reset_states()\n for batch, (inp, tar, _) in enumerate(train_dataset):\n train_step(transformer_model, loss_object, optimizer, inp, tar,\n train_loss, train_accuracy, pad_token_id=tokenizer_tar.\n pad_token_id)\n if batch % 50 == 0:\n print('Train: Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'\n .format(epoch + 1, batch, train_loss.result(),\n train_accuracy.result()))\n if (batch + 1) % 2200 == 0:\n for _, (inp, tar, _) in enumerate(val_dataset):\n val_step(transformer_model, loss_object, inp, tar,\n val_loss, val_accuracy, pad_token_id=tokenizer_tar.\n pad_token_id)\n print('Batch {}: Val Loss: {:.4f}, Val Accuracy: {:.4f}\\n'.\n format(batch, val_loss.result(), val_accuracy.result()))\n if user_config['compute_bleu']:\n print('\\nComputing BLEU at batch {}: '.format(batch))\n compute_bleu_score(transformer_model, val_dataset,\n user_config, tokenizer_tar, batch * epoch + 1)\n print('After {} epochs'.format(epoch + 1))\n print('Train Loss: {:.4f}, Train Accuracy: {:.4f}'.format(\n train_loss.result(), train_accuracy.result()))\n for batch, (inp, tar, _) in enumerate(val_dataset):\n val_step(transformer_model, loss_object, inp, tar, val_loss,\n val_accuracy, pad_token_id=tokenizer_tar.pad_token_id)\n print('Val Loss: {:.4f}, Val Accuracy: {:.4f}'.format(val_loss.\n result(), val_accuracy.result()))\n print('Time taken for training epoch {}: {} secs'.format(epoch + 1,\n time.time() - start))\n ckpt_save_path = ckpt_manager.save()\n print('Saving checkpoint after epoch {} at {}'.format(epoch + 1,\n ckpt_save_path))\n if user_config['compute_bleu']:\n print('\\nComputing BLEU at epoch {}: '.format(epoch + 1))\n compute_bleu_score(transformer_model, val_dataset, user_config,\n tokenizer_tar, epoch + 1)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', help=\n 'Configuration file containing training parameters', type=str)\n args = parser.parse_args()\n user_config = utils.load_file(args.config)\n seed = user_config['random_seed']\n utils.set_seed(seed)\n print(json.dumps(user_config, indent=2))\n do_training(user_config)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import time\nimport argparse\nimport utils\nfrom data_loader import DataLoader\nfrom generate_model_predictions import sacrebleu_metric, compute_bleu\nimport tensorflow as tf\nimport os\nimport json\nfrom transformer import create_masks\n\n\ndef loss_function(real, pred, loss_object, pad_token_id):\n \"\"\"Calculates total loss containing cross entropy with padding ignored.\n Args:\n real: Tensor of size [batch_size, length_logits, vocab_size]\n pred: Tensor of size [batch_size, length_labels]\n loss_object: Cross entropy loss\n pad_token_id: Pad token id to ignore\n Returns:\n A scalar float tensor for loss.\n \"\"\"\n mask = tf.math.logical_not(tf.math.equal(real, pad_token_id))\n loss_ = loss_object(real, pred)\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n return tf.reduce_sum(loss_) / tf.reduce_sum(mask)\n\n\ndef train_step(model, loss_object, optimizer, inp, tar, train_loss,\n train_accuracy, pad_token_id):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp,\n tar_inp)\n with tf.GradientTape() as tape:\n predictions, _ = model(inp, tar_inp, True, enc_padding_mask,\n combined_mask, dec_padding_mask)\n loss = loss_function(tar_real, predictions, loss_object, pad_token_id)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n train_loss(loss)\n train_accuracy(tar_real, predictions)\n\n\ndef val_step(model, loss_object, inp, tar, val_loss, val_accuracy, pad_token_id\n ):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp,\n tar_inp)\n predictions, _ = model(inp, tar_inp, False, enc_padding_mask,\n combined_mask, dec_padding_mask)\n loss = loss_function(tar_real, predictions, loss_object, pad_token_id)\n val_loss(loss)\n val_accuracy(tar_real, predictions)\n\n\ndef compute_bleu_score(transformer_model, dataset, user_config,\n tokenizer_tar, epoch):\n inp_language = user_config['inp_language']\n target_language = user_config['target_language']\n checkpoint_path = user_config['transformer_checkpoint_path']\n val_aligned_path_tar = user_config['val_data_path_{}'.format(\n target_language)]\n pred_file_path = '../log/log_{}_{}/'.format(inp_language, target_language\n ) + checkpoint_path.split('/')[-1] + '_epoch-' + str(epoch\n ) + '_prediction_{}.txt'.format(target_language)\n sacrebleu_metric(transformer_model, pred_file_path, None, tokenizer_tar,\n dataset, tokenizer_tar.MAX_LENGTH)\n print('-----------------------------')\n compute_bleu(pred_file_path, val_aligned_path_tar, print_all_scores=False)\n print('-----------------------------')\n new_path = '../log/log_{}_{}/'.format(inp_language, target_language\n ) + checkpoint_path.split('/')[-1] + '_epoch-' + str(epoch\n ) + '_prediction_{}'.format(target_language) + '.txt'\n os.rename(pred_file_path, new_path)\n print('Saved translated prediction at {}'.format(new_path))\n\n\ndef do_training(user_config):\n inp_language = user_config['inp_language']\n target_language = user_config['target_language']\n print('\\n****Training model from {} to {}****\\n'.format(inp_language,\n target_language))\n print('****Loading tokenizers****')\n tokenizer_inp, tokenizer_tar = utils.load_tokenizers(inp_language,\n target_language, user_config)\n print('****Loading train dataset****')\n train_aligned_path_inp = user_config['train_data_path_{}'.format(\n inp_language)]\n train_aligned_path_tar = user_config['train_data_path_{}'.format(\n target_language)]\n train_dataloader = DataLoader(user_config['transformer_batch_size'],\n train_aligned_path_inp, train_aligned_path_tar, tokenizer_inp,\n tokenizer_tar, inp_language, target_language, True)\n train_dataset = train_dataloader.get_data_loader()\n print('****Loading val dataset****')\n val_aligned_path_inp = user_config['val_data_path_{}'.format(inp_language)]\n val_aligned_path_tar = user_config['val_data_path_{}'.format(\n target_language)]\n val_dataloader = DataLoader(user_config['transformer_batch_size'] * 2,\n val_aligned_path_inp, val_aligned_path_tar, tokenizer_inp,\n tokenizer_tar, inp_language, target_language, False)\n val_dataset = val_dataloader.get_data_loader()\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits\n =True, reduction='none')\n train_loss = tf.keras.metrics.Mean(name='train_loss')\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name=\n 'train_accuracy')\n val_loss = tf.keras.metrics.Mean(name='val_loss')\n val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name=\n 'val_accuracy')\n print('****Loading transformer model****')\n transformer_model, optimizer, ckpt_manager = utils.load_transformer_model(\n user_config, tokenizer_inp, tokenizer_tar)\n epochs = user_config['transformer_epochs']\n print('\\nTraining model now...')\n for epoch in range(epochs):\n print()\n start = time.time()\n train_loss.reset_states()\n train_accuracy.reset_states()\n val_loss.reset_states()\n val_accuracy.reset_states()\n for batch, (inp, tar, _) in enumerate(train_dataset):\n train_step(transformer_model, loss_object, optimizer, inp, tar,\n train_loss, train_accuracy, pad_token_id=tokenizer_tar.\n pad_token_id)\n if batch % 50 == 0:\n print('Train: Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'\n .format(epoch + 1, batch, train_loss.result(),\n train_accuracy.result()))\n if (batch + 1) % 2200 == 0:\n for _, (inp, tar, _) in enumerate(val_dataset):\n val_step(transformer_model, loss_object, inp, tar,\n val_loss, val_accuracy, pad_token_id=tokenizer_tar.\n pad_token_id)\n print('Batch {}: Val Loss: {:.4f}, Val Accuracy: {:.4f}\\n'.\n format(batch, val_loss.result(), val_accuracy.result()))\n if user_config['compute_bleu']:\n print('\\nComputing BLEU at batch {}: '.format(batch))\n compute_bleu_score(transformer_model, val_dataset,\n user_config, tokenizer_tar, batch * epoch + 1)\n print('After {} epochs'.format(epoch + 1))\n print('Train Loss: {:.4f}, Train Accuracy: {:.4f}'.format(\n train_loss.result(), train_accuracy.result()))\n for batch, (inp, tar, _) in enumerate(val_dataset):\n val_step(transformer_model, loss_object, inp, tar, val_loss,\n val_accuracy, pad_token_id=tokenizer_tar.pad_token_id)\n print('Val Loss: {:.4f}, Val Accuracy: {:.4f}'.format(val_loss.\n result(), val_accuracy.result()))\n print('Time taken for training epoch {}: {} secs'.format(epoch + 1,\n time.time() - start))\n ckpt_save_path = ckpt_manager.save()\n print('Saving checkpoint after epoch {} at {}'.format(epoch + 1,\n ckpt_save_path))\n if user_config['compute_bleu']:\n print('\\nComputing BLEU at epoch {}: '.format(epoch + 1))\n compute_bleu_score(transformer_model, val_dataset, user_config,\n tokenizer_tar, epoch + 1)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--config', help=\n 'Configuration file containing training parameters', type=str)\n args = parser.parse_args()\n user_config = utils.load_file(args.config)\n seed = user_config['random_seed']\n utils.set_seed(seed)\n print(json.dumps(user_config, indent=2))\n do_training(user_config)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import time\nimport argparse\nimport utils\nfrom data_loader import DataLoader\nfrom generate_model_predictions import sacrebleu_metric, compute_bleu\nimport tensorflow as tf\nimport os\nimport json\nfrom transformer import create_masks\n\n\n# Since the target sequences are padded, it is important\n# to apply a padding mask when calculating the loss.\ndef loss_function(real, pred, loss_object, pad_token_id):\n \"\"\"Calculates total loss containing cross entropy with padding ignored.\n Args:\n real: Tensor of size [batch_size, length_logits, vocab_size]\n pred: Tensor of size [batch_size, length_labels]\n loss_object: Cross entropy loss\n pad_token_id: Pad token id to ignore\n Returns:\n A scalar float tensor for loss.\n \"\"\"\n mask = tf.math.logical_not(tf.math.equal(real, pad_token_id))\n loss_ = loss_object(real, pred)\n mask = tf.cast(mask, dtype=loss_.dtype)\n loss_ *= mask\n return tf.reduce_sum(loss_) / tf.reduce_sum(mask)\n\n\ndef train_step(model, loss_object, optimizer, inp, tar,\n train_loss, train_accuracy, pad_token_id):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)\n\n with tf.GradientTape() as tape:\n # training=True is only needed if there are layers with different\n # behavior during training versus inference (e.g. Dropout).\n predictions, _ = model(inp, tar_inp,\n True,\n enc_padding_mask,\n combined_mask,\n dec_padding_mask)\n loss = loss_function(tar_real, predictions, loss_object, pad_token_id)\n\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n\n train_loss(loss)\n train_accuracy(tar_real, predictions)\n\n\ndef val_step(model, loss_object, inp, tar,\n val_loss, val_accuracy, pad_token_id):\n tar_inp = tar[:, :-1]\n tar_real = tar[:, 1:]\n\n enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp)\n\n predictions, _ = model(inp, tar_inp,\n False,\n enc_padding_mask,\n combined_mask,\n dec_padding_mask)\n loss = loss_function(tar_real, predictions, loss_object, pad_token_id)\n\n val_loss(loss)\n val_accuracy(tar_real, predictions)\n\n\ndef compute_bleu_score(transformer_model, dataset, user_config, tokenizer_tar, epoch):\n inp_language = user_config[\"inp_language\"]\n target_language = user_config[\"target_language\"]\n checkpoint_path = user_config[\"transformer_checkpoint_path\"]\n val_aligned_path_tar = user_config[\"val_data_path_{}\".format(target_language)]\n pred_file_path = \"../log/log_{}_{}/\".format(inp_language, target_language) + checkpoint_path.split('/')[\n -1] + \"_epoch-\" + str(epoch) + \"_prediction_{}.txt\".format(target_language)\n\n sacrebleu_metric(transformer_model, pred_file_path, None,\n tokenizer_tar, dataset,\n tokenizer_tar.MAX_LENGTH)\n print(\"-----------------------------\")\n compute_bleu(pred_file_path, val_aligned_path_tar, print_all_scores=False)\n print(\"-----------------------------\")\n\n # append checkpoint and score to file name for easy reference\n new_path = \"../log/log_{}_{}/\".format(inp_language, target_language) + checkpoint_path.split('/')[\n -1] + \"_epoch-\" + str(epoch) + \"_prediction_{}\".format(target_language) + \".txt\"\n # append score and checkpoint name to file_name\n os.rename(pred_file_path, new_path)\n print(\"Saved translated prediction at {}\".format(new_path))\n\n\ndef do_training(user_config):\n inp_language = user_config[\"inp_language\"]\n target_language = user_config[\"target_language\"]\n\n print(\"\\n****Training model from {} to {}****\\n\".format(inp_language, target_language))\n\n print(\"****Loading tokenizers****\")\n # load pre-trained tokenizer\n tokenizer_inp, tokenizer_tar = utils.load_tokenizers(inp_language, target_language, user_config)\n\n print(\"****Loading train dataset****\")\n # train data loader\n train_aligned_path_inp = user_config[\"train_data_path_{}\".format(inp_language)]\n train_aligned_path_tar = user_config[\"train_data_path_{}\".format(target_language)]\n train_dataloader = DataLoader(user_config[\"transformer_batch_size\"],\n train_aligned_path_inp,\n train_aligned_path_tar,\n tokenizer_inp,\n tokenizer_tar,\n inp_language,\n target_language,\n True)\n train_dataset = train_dataloader.get_data_loader()\n\n print(\"****Loading val dataset****\")\n # val data loader\n val_aligned_path_inp = user_config[\"val_data_path_{}\".format(inp_language)]\n val_aligned_path_tar = user_config[\"val_data_path_{}\".format(target_language)]\n val_dataloader = DataLoader(user_config[\"transformer_batch_size\"] * 2, # for fast validation increase batch size\n val_aligned_path_inp,\n val_aligned_path_tar,\n tokenizer_inp,\n tokenizer_tar,\n inp_language,\n target_language,\n False)\n val_dataset = val_dataloader.get_data_loader()\n\n # define loss and accuracy metrics\n loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')\n train_loss = tf.keras.metrics.Mean(name='train_loss')\n train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n val_loss = tf.keras.metrics.Mean(name='val_loss')\n val_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='val_accuracy')\n\n print(\"****Loading transformer model****\")\n # load model and optimizer\n transformer_model, optimizer, ckpt_manager = \\\n utils.load_transformer_model(user_config, tokenizer_inp, tokenizer_tar)\n\n epochs = user_config[\"transformer_epochs\"]\n print(\"\\nTraining model now...\")\n for epoch in range(epochs):\n print()\n start = time.time()\n train_loss.reset_states()\n train_accuracy.reset_states()\n val_loss.reset_states()\n val_accuracy.reset_states()\n\n # inp -> english, tar -> french\n for (batch, (inp, tar, _)) in enumerate(train_dataset):\n train_step(transformer_model, loss_object, optimizer, inp, tar,\n train_loss, train_accuracy, pad_token_id=tokenizer_tar.pad_token_id)\n\n if batch % 50 == 0:\n print('Train: Epoch {} Batch {} Loss {:.4f} Accuracy {:.4f}'.format(\n epoch + 1, batch, train_loss.result(), train_accuracy.result()))\n\n if (batch + 1) % 2200 == 0:\n # inp -> english, tar -> french\n for (_, (inp, tar, _)) in enumerate(val_dataset):\n val_step(transformer_model, loss_object, inp, tar,\n val_loss, val_accuracy, pad_token_id=tokenizer_tar.pad_token_id)\n print('Batch {}: Val Loss: {:.4f}, Val Accuracy: {:.4f}\\n'.format(batch, val_loss.result(),\n val_accuracy.result()))\n if user_config[\"compute_bleu\"]:\n print(\"\\nComputing BLEU at batch {}: \".format(batch))\n compute_bleu_score(transformer_model, val_dataset, user_config, tokenizer_tar, batch * epoch + 1)\n\n print(\"After {} epochs\".format(epoch + 1))\n print('Train Loss: {:.4f}, Train Accuracy: {:.4f}'.format(train_loss.result(), train_accuracy.result()))\n\n # inp -> english, tar -> french\n for (batch, (inp, tar, _)) in enumerate(val_dataset):\n val_step(transformer_model, loss_object, inp, tar,\n val_loss, val_accuracy, pad_token_id=tokenizer_tar.pad_token_id)\n print('Val Loss: {:.4f}, Val Accuracy: {:.4f}'.format(val_loss.result(), val_accuracy.result()))\n\n print('Time taken for training epoch {}: {} secs'.format(epoch + 1, time.time() - start))\n\n # evaluate and save model every x-epochs\n ckpt_save_path = ckpt_manager.save()\n print('Saving checkpoint after epoch {} at {}'.format(epoch + 1, ckpt_save_path))\n if user_config[\"compute_bleu\"]:\n print(\"\\nComputing BLEU at epoch {}: \".format(epoch + 1))\n compute_bleu_score(transformer_model, val_dataset, user_config, tokenizer_tar, epoch + 1)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--config\", help=\"Configuration file containing training parameters\", type=str)\n args = parser.parse_args()\n user_config = utils.load_file(args.config)\n seed = user_config[\"random_seed\"]\n utils.set_seed(seed)\n print(json.dumps(user_config, indent=2))\n do_training(user_config)\n\n\nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
def check(root, a, b):
if root:
if (root.left == a and root.right == b) or (root.left ==b and root.right==a):
return False
return check(root.left, a, b) and check(root.right, a, b)
return True
def isCousin(root, a, b):
# Your code here
if check(root, a, b)==False:
return False
q=[]
q.insert(0, root)
tmp=set()
while(len(q)):
l = len(q)
for i in range(l):
n = q.pop()
tmp.add(n.data)
if n.left:
q.insert(0, n.left)
if n.right:
q.insert(0, n.right)
if a in tmp and b in tmp:
return True
tmp.clear()
return False
|
normal
|
{
"blob_id": "96cfee85194c9c30b3d74bbddc2a31b6933eb032",
"index": 2226,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef isCousin(root, a, b):\n if check(root, a, b) == False:\n return False\n q = []\n q.insert(0, root)\n tmp = set()\n while len(q):\n l = len(q)\n for i in range(l):\n n = q.pop()\n tmp.add(n.data)\n if n.left:\n q.insert(0, n.left)\n if n.right:\n q.insert(0, n.right)\n if a in tmp and b in tmp:\n return True\n tmp.clear()\n return False\n",
"step-3": "def check(root, a, b):\n if root:\n if (root.left == a and root.right == b or root.left == b and root.\n right == a):\n return False\n return check(root.left, a, b) and check(root.right, a, b)\n return True\n\n\ndef isCousin(root, a, b):\n if check(root, a, b) == False:\n return False\n q = []\n q.insert(0, root)\n tmp = set()\n while len(q):\n l = len(q)\n for i in range(l):\n n = q.pop()\n tmp.add(n.data)\n if n.left:\n q.insert(0, n.left)\n if n.right:\n q.insert(0, n.right)\n if a in tmp and b in tmp:\n return True\n tmp.clear()\n return False\n",
"step-4": "def check(root, a, b):\n if root:\n if (root.left == a and root.right == b) or (root.left ==b and root.right==a):\n return False\n return check(root.left, a, b) and check(root.right, a, b)\n return True\ndef isCousin(root, a, b):\n # Your code here\n if check(root, a, b)==False:\n return False\n q=[]\n q.insert(0, root)\n tmp=set()\n while(len(q)):\n l = len(q)\n for i in range(l):\n n = q.pop()\n tmp.add(n.data)\n if n.left:\n q.insert(0, n.left)\n if n.right:\n q.insert(0, n.right)\n if a in tmp and b in tmp:\n return True\n tmp.clear()\n return False",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import MySQLdb
import settings
import redis
import socket
import fcntl
import struct
import datetime
db = MySQLdb.connect(settings.host, settings.user, settings.pwd, settings.db)
cursor = db.cursor()
def connect_mysql():
try:
db.ping()
except:
db = MySQLdb.connect(settings.host, settings.user, settings.pwd, settings.db)
def init_database(table, sql):
cursor.execute("DROP TABLE IF EXISTS %s" % table)
cursor.execute(sql)
print "init %s successful" % table
def insert_data(sql):
connect_mysql()
try:
cursor = db.cursor()
cursor.execute(sql)
db.commit()
except:
print "execute %s error" % sql
db.rollback()
def set_tags_from_result():
sql = "select WIDTH,DROP_RATE,MEMORY,CPU,SERVICE,THREAD_NUM,FRECURENT,R100 from result"
devide = [125.0, 1, 100.0, 100.0, 1000.0, 20.0, 1, 1]
result = [1,2,3,4,5,6,7,8]
try:
cursor.execute(sql)
results = cursor.fetchall()
for element in results:
for i in range(len(element)):
result[i] = element[i]/devide[i]
sql = "insert into tags (WIDTH,DROP_RATE,MEMORY,CPU,SERVICE,THREAD_NUM,FRECURENT,R100) values('%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f')" % (result[0], result[1], result[2],result[3], result[4], result[5], result[6], result[7])
insert_data(sql)
except Exception as msg:
print "select from result error"
print msg
print str(msg)
db.close()
if __name__ == '__main__':
table = 'tags'
sql = """CREATE TABLE %s (
WIDTH FLOAT(3,2),
DROP_RATE FLOAT,
MEMORY FLOAT(3,2),
CPU FLOAT(3,2),
SERVICE FLOAT(3,2),
THREAD_NUM FLOAT,
FRECURENT FLOAT,
R100 FLOAT(2, 1))""" % table
init_database(table, sql)
set_tags_from_result()
|
normal
|
{
"blob_id": "b46b9b086fc089e24cb39a0c2c4ac252591b2190",
"index": 1540,
"step-1": "import MySQLdb\nimport settings\nimport redis\nimport socket\nimport fcntl\nimport struct\nimport datetime\n\n\ndb = MySQLdb.connect(settings.host, settings.user, settings.pwd, settings.db)\ncursor = db.cursor()\ndef connect_mysql():\n\ttry: \n db.ping()\n except: \n db = MySQLdb.connect(settings.host, settings.user, settings.pwd, settings.db)\n\ndef init_database(table, sql):\n\tcursor.execute(\"DROP TABLE IF EXISTS %s\" % table)\n cursor.execute(sql)\n\tprint \"init %s successful\" % table\n\ndef insert_data(sql):\n connect_mysql()\n\ttry:\n cursor = db.cursor()\n cursor.execute(sql)\n db.commit()\n except:\n print \"execute %s error\" % sql\n db.rollback()\n\n\ndef set_tags_from_result():\n\tsql = \"select WIDTH,DROP_RATE,MEMORY,CPU,SERVICE,THREAD_NUM,FRECURENT,R100 from result\"\n\tdevide = [125.0, 1, 100.0, 100.0, 1000.0, 20.0, 1, 1]\n\tresult = [1,2,3,4,5,6,7,8]\n\ttry:\n\t\tcursor.execute(sql)\n\t\tresults = cursor.fetchall()\n\t\tfor element in results:\n\t\t\tfor i in range(len(element)):\n\t\t\t\tresult[i] = element[i]/devide[i]\n\t\t\tsql = \"insert into tags (WIDTH,DROP_RATE,MEMORY,CPU,SERVICE,THREAD_NUM,FRECURENT,R100) values('%f', '%f', '%f', '%f', '%f', '%f', '%f', '%f')\" % (result[0], result[1], result[2],result[3], result[4], result[5], result[6], result[7])\n\t\t\tinsert_data(sql)\n\texcept Exception as msg:\n \tprint \"select from result error\"\n\t print msg\n print str(msg)\n\tdb.close()\nif __name__ == '__main__':\n\ttable = 'tags'\n\tsql = \"\"\"CREATE TABLE %s (\n WIDTH FLOAT(3,2),\n DROP_RATE FLOAT,\n MEMORY FLOAT(3,2),\n CPU FLOAT(3,2),\n SERVICE FLOAT(3,2),\n THREAD_NUM FLOAT,\n FRECURENT FLOAT,\n R100 FLOAT(2, 1))\"\"\" % table\n\tinit_database(table, sql)\n\tset_tags_from_result()\n\t\n\t\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from numpy import empty
import pickle
from dataset import Dataset
from image import Image
f = open("./digitdata/trainingimages", "r")
reader = f.readlines()
labels = open("./digitdata/traininglabels", "r")
lreader = labels.readlines()
trainImageList = []
j = 0
i = 0
while(j < len(reader)):
image_array = empty([28, 28])
for r in range(0, 28):
row = reader[j]
j += 1
for c in range(0, 28):
if row[c] == '#' or row[c] == '+':
image_array[r][c] = 1
else:
image_array[r][c] = 0
label = lreader[i]
i += 1
image = Image(image_array, label)
trainImageList.append(image)
f = open("./digitdata/testimages", "r")
reader = f.readlines()
labels = open("./digitdata/testlabels", "r")
lreader = labels.readlines()
testImageList = []
j = 0
i = 0
while(j < len(reader)):
image_array = empty([28, 28])
for r in range(0, 28):
row = reader[j]
j += 1
for c in range(0, 28):
if row[c] == '#' or row[c] == '+':
image_array[r][c] = 1
else:
image_array[r][c] = 0
label = lreader[i]
i += 1
image = Image(image_array, label)
testImageList.append(image)
f = open("./digitdata/validationimages", "r")
reader = f.readlines()
labels = open("./digitdata/validationlabels", "r")
lreader = labels.readlines()
valImageList = []
j = 0
i = 0
while(j < len(reader)):
image_array = empty([28, 28])
for r in range(0, 28):
row = reader[j]
j += 1
for c in range(0, 28):
if row[c] == '#' or row[c] == '+':
image_array[r][c] = 1
else:
image_array[r][c] = 0
label = lreader[i]
i += 1
image = Image(image_array, label)
valImageList.append(image)
dataset = Dataset(trainImageList, testImageList, valImageList)
output_file = open('digits_dataset', 'wb')
pickle.dump(dataset, output_file)
|
normal
|
{
"blob_id": "aff439361716c35e5f492680a55e7470b4ee0c42",
"index": 5905,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwhile j < len(reader):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n trainImageList.append(image)\n<mask token>\nwhile j < len(reader):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n testImageList.append(image)\n<mask token>\nwhile j < len(reader):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n valImageList.append(image)\n<mask token>\npickle.dump(dataset, output_file)\n",
"step-3": "<mask token>\nf = open('./digitdata/trainingimages', 'r')\nreader = f.readlines()\nlabels = open('./digitdata/traininglabels', 'r')\nlreader = labels.readlines()\ntrainImageList = []\nj = 0\ni = 0\nwhile j < len(reader):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n trainImageList.append(image)\nf = open('./digitdata/testimages', 'r')\nreader = f.readlines()\nlabels = open('./digitdata/testlabels', 'r')\nlreader = labels.readlines()\ntestImageList = []\nj = 0\ni = 0\nwhile j < len(reader):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n testImageList.append(image)\nf = open('./digitdata/validationimages', 'r')\nreader = f.readlines()\nlabels = open('./digitdata/validationlabels', 'r')\nlreader = labels.readlines()\nvalImageList = []\nj = 0\ni = 0\nwhile j < len(reader):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n valImageList.append(image)\ndataset = Dataset(trainImageList, testImageList, valImageList)\noutput_file = open('digits_dataset', 'wb')\npickle.dump(dataset, output_file)\n",
"step-4": "from numpy import empty\nimport pickle\nfrom dataset import Dataset\nfrom image import Image\nf = open('./digitdata/trainingimages', 'r')\nreader = f.readlines()\nlabels = open('./digitdata/traininglabels', 'r')\nlreader = labels.readlines()\ntrainImageList = []\nj = 0\ni = 0\nwhile j < len(reader):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n trainImageList.append(image)\nf = open('./digitdata/testimages', 'r')\nreader = f.readlines()\nlabels = open('./digitdata/testlabels', 'r')\nlreader = labels.readlines()\ntestImageList = []\nj = 0\ni = 0\nwhile j < len(reader):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n testImageList.append(image)\nf = open('./digitdata/validationimages', 'r')\nreader = f.readlines()\nlabels = open('./digitdata/validationlabels', 'r')\nlreader = labels.readlines()\nvalImageList = []\nj = 0\ni = 0\nwhile j < len(reader):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n valImageList.append(image)\ndataset = Dataset(trainImageList, testImageList, valImageList)\noutput_file = open('digits_dataset', 'wb')\npickle.dump(dataset, output_file)\n",
"step-5": "from numpy import empty\nimport pickle\nfrom dataset import Dataset\nfrom image import Image\n\nf = open(\"./digitdata/trainingimages\", \"r\")\nreader = f.readlines()\n\nlabels = open(\"./digitdata/traininglabels\", \"r\")\nlreader = labels.readlines()\n\ntrainImageList = []\n\nj = 0\ni = 0\nwhile(j < len(reader)):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n trainImageList.append(image)\n\n\nf = open(\"./digitdata/testimages\", \"r\")\nreader = f.readlines()\n\nlabels = open(\"./digitdata/testlabels\", \"r\")\nlreader = labels.readlines()\n\ntestImageList = []\n\nj = 0\ni = 0\nwhile(j < len(reader)):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n testImageList.append(image)\n\nf = open(\"./digitdata/validationimages\", \"r\")\nreader = f.readlines()\n\nlabels = open(\"./digitdata/validationlabels\", \"r\")\nlreader = labels.readlines()\n\nvalImageList = []\n\nj = 0\ni = 0\nwhile(j < len(reader)):\n image_array = empty([28, 28])\n for r in range(0, 28):\n row = reader[j]\n j += 1\n for c in range(0, 28):\n if row[c] == '#' or row[c] == '+':\n image_array[r][c] = 1\n else:\n image_array[r][c] = 0\n label = lreader[i]\n i += 1\n image = Image(image_array, label)\n valImageList.append(image)\n\ndataset = Dataset(trainImageList, testImageList, valImageList)\noutput_file = open('digits_dataset', 'wb')\npickle.dump(dataset, output_file)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from disaggregation import DisaggregationManager
import numpy as np
from more_itertools import windowed
x = np.random.random_sample(10 * 32 * 1024)
w = windowed(x, n=1024, step=128)
z = DisaggregationManager._overlap_average(np.array(list(w)), stride=128)
print(z.shape)
print(x.shape)
assert z.shape == x.shape
|
normal
|
{
"blob_id": "6d4950ca61cd1e2ee7ef8b409577e9df2d65addd",
"index": 4462,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(z.shape)\nprint(x.shape)\nassert z.shape == x.shape\n",
"step-3": "<mask token>\nx = np.random.random_sample(10 * 32 * 1024)\nw = windowed(x, n=1024, step=128)\nz = DisaggregationManager._overlap_average(np.array(list(w)), stride=128)\nprint(z.shape)\nprint(x.shape)\nassert z.shape == x.shape\n",
"step-4": "from disaggregation import DisaggregationManager\nimport numpy as np\nfrom more_itertools import windowed\nx = np.random.random_sample(10 * 32 * 1024)\nw = windowed(x, n=1024, step=128)\nz = DisaggregationManager._overlap_average(np.array(list(w)), stride=128)\nprint(z.shape)\nprint(x.shape)\nassert z.shape == x.shape\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from context import vicemergencyapi
from vicemergencyapi.vicemergency import VicEmergency
from geographiclib.geodesic import Geodesic
from shapely.geometry import Point
def geoDistance(p1, p2):
return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']
melbourne = Point(144.962272, -37.812274)
def compare(f):
return geoDistance(f.getLocation(), melbourne)
for i in sorted(VicEmergency.getItems(), key=compare):
print(i.properties["sourceTitle"])
print(i.properties["category1"])
print(i.properties["location"])
print("{:.0f}km".format(geoDistance(i.getLocation(), melbourne) / 1000))
print("============================")
|
normal
|
{
"blob_id": "920f00632599945397364dd0f52f21234e17f9ef",
"index": 9445,
"step-1": "<mask token>\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\n<mask token>\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\n<mask token>\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n print(i.properties['sourceTitle'])\n print(i.properties['category1'])\n print(i.properties['location'])\n print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000))\n print('============================')\n",
"step-3": "<mask token>\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\nmelbourne = Point(144.962272, -37.812274)\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n print(i.properties['sourceTitle'])\n print(i.properties['category1'])\n print(i.properties['location'])\n print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000))\n print('============================')\n",
"step-4": "from context import vicemergencyapi\nfrom vicemergencyapi.vicemergency import VicEmergency\nfrom geographiclib.geodesic import Geodesic\nfrom shapely.geometry import Point\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\nmelbourne = Point(144.962272, -37.812274)\n\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n print(i.properties['sourceTitle'])\n print(i.properties['category1'])\n print(i.properties['location'])\n print('{:.0f}km'.format(geoDistance(i.getLocation(), melbourne) / 1000))\n print('============================')\n",
"step-5": "from context import vicemergencyapi\nfrom vicemergencyapi.vicemergency import VicEmergency\n\nfrom geographiclib.geodesic import Geodesic\nfrom shapely.geometry import Point\n\n\ndef geoDistance(p1, p2):\n return Geodesic.WGS84.Inverse(p1.y, p1.x, p2.y, p2.x)['s12']\n\n\nmelbourne = Point(144.962272, -37.812274)\n\ndef compare(f):\n return geoDistance(f.getLocation(), melbourne)\n\nfor i in sorted(VicEmergency.getItems(), key=compare):\n\n print(i.properties[\"sourceTitle\"])\n print(i.properties[\"category1\"])\n print(i.properties[\"location\"])\n print(\"{:.0f}km\".format(geoDistance(i.getLocation(), melbourne) / 1000))\n\n print(\"============================\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
__title__ = 'pyaddepar'
__version__ = '0.6.0'
__author__ = 'Thomas Schmelzer'
__license__ = 'MIT'
__copyright__ = 'Copyright 2019 by Lobnek Wealth Management'
|
normal
|
{
"blob_id": "cc985ae061c04696dbf5114273befd62321756ae",
"index": 9569,
"step-1": "<mask token>\n",
"step-2": "__title__ = 'pyaddepar'\n__version__ = '0.6.0'\n__author__ = 'Thomas Schmelzer'\n__license__ = 'MIT'\n__copyright__ = 'Copyright 2019 by Lobnek Wealth Management'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
import unittest
import TicTacToe
class pVpTestCase(unittest.TestCase):
# def test_something(self):
# self.assertEqual(True, False)
def twoplayer_setup(self):
game1 = TicTacToe.Game()
player1 = TicTacToe.Player('X', game1)
player2 = TicTacToe.Player('O', game1)
return (game1, player1, player2)
#Player 1 wins
def test_mock_game1(self):
game1, player1, player2 = self.twoplayer_setup()
p1moves = ['top left', 'middle', 'bottom right']
p2moves = ['top middle', 'bottom left', 'top right']
winner = game1.play_test(player1, player2, p1moves, p2moves)
self.assertEqual(player1, winner)
#Player 2 wins
def test_mock_game2(self):
game1, player1, player2 = self.twoplayer_setup()
p1moves = ['top right', 'middle', 'bottom right']
p2moves = ['top left', 'middle left', 'bottom left']
winner = game1.play_test(player1, player2, p1moves, p2moves)
self.assertEqual(player2, winner)
#Draw
def test_mock_game3(self):
game1, player1, player2 = self.twoplayer_setup()
p1moves = ['top right', 'middle top', 'middle', 'bottom right', 'middle left']
p2moves = ['top left', 'middle right', 'bottom left', 'bottom middle']
winner = game1.play_test(player1, player2, p1moves, p2moves)
self.assertEqual(None, winner)
class CvPTestCase(unittest.TestCase):
def onecompplayer_setup(self):
game1 = TicTacToe.Game()
computer1 = TicTacToe.Computer('X', game1)
player2 = TicTacToe.Player('O', game1)
return (game1, computer1, player2)
def test_place3(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1:"X", 2:"X",3:"-",
4:"-", 5:"-", 6:"-",
7:"-", 8:"-", 9:"-"}
p2moves = []
winner = game1.play_comp_test(computer1, player2, p2moves)
self.assertEqual(computer1, winner)
self.assertEqual({1:"X", 2:"X",3:"X",
4:"-", 5:"-", 6:"-",
7:"-", 8:"-", 9:"-"},
game1.game_board
)
def test_place2(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1:"-", 2:"-",3:"-",
4:"-", 5:"X", 6:"-",
7:"-", 8:"X", 9:"-"}
p2moves = []
winner = game1.play_comp_test(computer1, player2, p2moves)
self.assertEqual(computer1, winner)
self.assertEqual({1:"-", 2:"X",3:"-",
4:"-", 5:"X", 6:"-",
7:"-", 8:"X", 9:"-"},
game1.game_board
)
def test_place8(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1: "-", 2: "-", 3: "-",
4: "-", 5: "-", 6: "-",
7: "X", 8: "-", 9: "X"}
p2moves = []
winner = game1.play_comp_test(computer1, player2, p2moves)
self.assertEqual(computer1, winner)
self.assertEqual({1: "-", 2: "-", 3: "-",
4: "-", 5: "-", 6: "-",
7: "X", 8: "X", 9: "X"},
game1.game_board
)
def test_block5(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1: "-", 2: "-", 3: "-",
4: "O", 5: "-", 6: "O",
7: "-", 8: "-", 9: "-"}
computer1.auto_move()
self.assertEqual({1: "-", 2: "-", 3: "-",
4: "O", 5: "X", 6: "O",
7: "-", 8: "-", 9: "-"},
game1.game_board
)
def test_block7(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1: "-", 2: "-", 3: "O",
4: "-", 5: "O", 6: "-",
7: "-", 8: "-", 9: "-"}
computer1.auto_move()
self.assertEqual({1: "-", 2: "-", 3: "O",
4: "-", 5: "O", 6: "-",
7: "X", 8: "-", 9: "-"},
game1.game_board
)
def test_block3(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1: "-", 2: "-", 3: "-",
4: "-", 5: "-", 6: "O",
7: "-", 8: "-", 9: "O"}
computer1.auto_move()
self.assertEqual({1: "-", 2: "-", 3: "X",
4: "-", 5: "-", 6: "O",
7: "-", 8: "-", 9: "O"},
game1.game_board
)
def test_center_empty(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1: "-", 2: "-", 3: "-",
4: "-", 5: "-", 6: "-",
7: "-", 8: "-", 9: "-"}
computer1.auto_move()
self.assertEqual({1: "-", 2: "-", 3: "-",
4: "-", 5: "X", 6: "-",
7: "-", 8: "-", 9: "-"},
game1.game_board
)
def test_center_nonempty(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1: "O", 2: "-", 3: "-",
4: "X", 5: "-", 6: "O",
7: "X", 8: "-", 9: "-"}
computer1.auto_move()
self.assertEqual({1: "O", 2: "-", 3: "-",
4: "X", 5: "X", 6: "O",
7: "X", 8: "-", 9: "-"},
game1.game_board
)
def test_oppcorner7(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1: "-", 2: "-", 3: "O",
4: "-", 5: "X", 6: "-",
7: "-", 8: "-", 9: "-"}
computer1.auto_move()
self.assertEqual({1: "-", 2: "-", 3: "O",
4: "-", 5: "X", 6: "-",
7: "X", 8: "-", 9: "-"},
game1.game_board
)
def test_oppcorner1(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1: "-", 2: "-", 3: "-",
4: "O", 5: "X", 6: "X",
7: "-", 8: "-", 9: "O"}
computer1.auto_move()
self.assertEqual({1: "X", 2: "-", 3: "-",
4: "O", 5: "X", 6: "X",
7: "-", 8: "-", 9: "O"},
game1.game_board
)
def test_oppcorner3(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1: "-", 2: "-", 3: "-",
4: "-", 5: "X", 6: "-",
7: "O", 8: "-", 9: "-"}
computer1.auto_move()
self.assertEqual({1: "-", 2: "-", 3: "X",
4: "-", 5: "X", 6: "-",
7: "O", 8: "-", 9: "-"},
game1.game_board
)
def test_oppcorner9(self):
game1, computer1, player2 = self.onecompplayer_setup()
game1.game_board = {1: "O", 2: "-", 3: "-",
4: "-", 5: "X", 6: "-",
7: "-", 8: "-", 9: "-"}
computer1.auto_move()
self.assertEqual({1: "O", 2: "-", 3: "-",
4: "-", 5: "X", 6: "-",
7: "-", 8: "-", 9: "X"},
game1.game_board
)
if __name__ == '__main__':
unittest.main()
|
normal
|
{
"blob_id": "de0521db3909054c333ac3877ff0adf15ab180fb",
"index": 1732,
"step-1": "<mask token>\n\n\nclass CvPTestCase(unittest.TestCase):\n\n def onecompplayer_setup(self):\n game1 = TicTacToe.Game()\n computer1 = TicTacToe.Computer('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return game1, computer1, player2\n\n def test_place3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'X', (2): 'X', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): 'X', (2): 'X', (3): 'X', (4): '-', (5): '-',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_place2(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): 'X', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): 'X', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): 'X', (9): '-'}, game1.game_board)\n\n def test_place8(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): 'X', (8): '-', (9): 'X'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): '-',\n (6): '-', (7): 'X', (8): 'X', (9): 'X'}, game1.game_board)\n\n def test_block5(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'O', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'O', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'O',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): '-',\n (6): 'O', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_center_empty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_center_nonempty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): 'X', (5):\n '-', (6): 'O', (7): 'X', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): 'X', (5): 'X',\n (6): 'O', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n <mask token>\n\n def test_oppcorner1(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n 'X', (6): 'X', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): 'X', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'X', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_oppcorner3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): 'O', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): 'X',\n (6): '-', (7): 'O', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner9(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): 'X'}, game1.game_board)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass pVpTestCase(unittest.TestCase):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass CvPTestCase(unittest.TestCase):\n\n def onecompplayer_setup(self):\n game1 = TicTacToe.Game()\n computer1 = TicTacToe.Computer('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return game1, computer1, player2\n\n def test_place3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'X', (2): 'X', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): 'X', (2): 'X', (3): 'X', (4): '-', (5): '-',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_place2(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): 'X', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): 'X', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): 'X', (9): '-'}, game1.game_board)\n\n def test_place8(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): 'X', (8): '-', (9): 'X'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): '-',\n (6): '-', (7): 'X', (8): 'X', (9): 'X'}, game1.game_board)\n\n def test_block5(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'O', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'O', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'O',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): '-',\n (6): 'O', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_center_empty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_center_nonempty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): 'X', (5):\n '-', (6): 'O', (7): 'X', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): 'X', (5): 'X',\n (6): 'O', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'X',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner1(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n 'X', (6): 'X', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): 'X', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'X', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_oppcorner3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): 'O', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): 'X',\n (6): '-', (7): 'O', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner9(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): 'X'}, game1.game_board)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass pVpTestCase(unittest.TestCase):\n <mask token>\n\n def test_mock_game1(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top left', 'middle', 'bottom right']\n p2moves = ['top middle', 'bottom left', 'top right']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player1, winner)\n\n def test_mock_game2(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top right', 'middle', 'bottom right']\n p2moves = ['top left', 'middle left', 'bottom left']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player2, winner)\n <mask token>\n\n\nclass CvPTestCase(unittest.TestCase):\n\n def onecompplayer_setup(self):\n game1 = TicTacToe.Game()\n computer1 = TicTacToe.Computer('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return game1, computer1, player2\n\n def test_place3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'X', (2): 'X', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): 'X', (2): 'X', (3): 'X', (4): '-', (5): '-',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_place2(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): 'X', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): 'X', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): 'X', (9): '-'}, game1.game_board)\n\n def test_place8(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): 'X', (8): '-', (9): 'X'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): '-',\n (6): '-', (7): 'X', (8): 'X', (9): 'X'}, game1.game_board)\n\n def test_block5(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'O', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'O', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'O',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): '-',\n (6): 'O', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_center_empty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_center_nonempty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): 'X', (5):\n '-', (6): 'O', (7): 'X', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): 'X', (5): 'X',\n (6): 'O', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'X',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner1(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n 'X', (6): 'X', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): 'X', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'X', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_oppcorner3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): 'O', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): 'X',\n (6): '-', (7): 'O', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner9(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): 'X'}, game1.game_board)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass pVpTestCase(unittest.TestCase):\n\n def twoplayer_setup(self):\n game1 = TicTacToe.Game()\n player1 = TicTacToe.Player('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return game1, player1, player2\n\n def test_mock_game1(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top left', 'middle', 'bottom right']\n p2moves = ['top middle', 'bottom left', 'top right']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player1, winner)\n\n def test_mock_game2(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top right', 'middle', 'bottom right']\n p2moves = ['top left', 'middle left', 'bottom left']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player2, winner)\n\n def test_mock_game3(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top right', 'middle top', 'middle', 'bottom right',\n 'middle left']\n p2moves = ['top left', 'middle right', 'bottom left', 'bottom middle']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(None, winner)\n\n\nclass CvPTestCase(unittest.TestCase):\n\n def onecompplayer_setup(self):\n game1 = TicTacToe.Game()\n computer1 = TicTacToe.Computer('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return game1, computer1, player2\n\n def test_place3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'X', (2): 'X', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): 'X', (2): 'X', (3): 'X', (4): '-', (5): '-',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_place2(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): 'X', (9): '-'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): 'X', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): 'X', (9): '-'}, game1.game_board)\n\n def test_place8(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): 'X', (8): '-', (9): 'X'}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): '-',\n (6): '-', (7): 'X', (8): 'X', (9): 'X'}, game1.game_board)\n\n def test_block5(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'O', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'O', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'O',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_block3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): 'O', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): '-',\n (6): 'O', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_center_empty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n '-', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): '-'}, game1.game_board)\n\n def test_center_nonempty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): 'X', (5):\n '-', (6): 'O', (7): 'X', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): 'X', (5): 'X',\n (6): 'O', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): 'O', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'O', (4): '-', (5): 'X',\n (6): '-', (7): 'X', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner1(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): 'O', (5):\n 'X', (6): 'X', (7): '-', (8): '-', (9): 'O'}\n computer1.auto_move()\n self.assertEqual({(1): 'X', (2): '-', (3): '-', (4): 'O', (5): 'X',\n (6): 'X', (7): '-', (8): '-', (9): 'O'}, game1.game_board)\n\n def test_oppcorner3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): '-', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): 'O', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): '-', (2): '-', (3): 'X', (4): '-', (5): 'X',\n (6): '-', (7): 'O', (8): '-', (9): '-'}, game1.game_board)\n\n def test_oppcorner9(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {(1): 'O', (2): '-', (3): '-', (4): '-', (5):\n 'X', (6): '-', (7): '-', (8): '-', (9): '-'}\n computer1.auto_move()\n self.assertEqual({(1): 'O', (2): '-', (3): '-', (4): '-', (5): 'X',\n (6): '-', (7): '-', (8): '-', (9): 'X'}, game1.game_board)\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import unittest\nimport TicTacToe\n\nclass pVpTestCase(unittest.TestCase):\n # def test_something(self):\n # self.assertEqual(True, False)\n\n def twoplayer_setup(self):\n game1 = TicTacToe.Game()\n player1 = TicTacToe.Player('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return (game1, player1, player2)\n\n #Player 1 wins\n def test_mock_game1(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top left', 'middle', 'bottom right']\n p2moves = ['top middle', 'bottom left', 'top right']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player1, winner)\n\n #Player 2 wins\n def test_mock_game2(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top right', 'middle', 'bottom right']\n p2moves = ['top left', 'middle left', 'bottom left']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(player2, winner)\n\n #Draw\n def test_mock_game3(self):\n game1, player1, player2 = self.twoplayer_setup()\n p1moves = ['top right', 'middle top', 'middle', 'bottom right', 'middle left']\n p2moves = ['top left', 'middle right', 'bottom left', 'bottom middle']\n winner = game1.play_test(player1, player2, p1moves, p2moves)\n self.assertEqual(None, winner)\n\nclass CvPTestCase(unittest.TestCase):\n\n def onecompplayer_setup(self):\n game1 = TicTacToe.Game()\n computer1 = TicTacToe.Computer('X', game1)\n player2 = TicTacToe.Player('O', game1)\n return (game1, computer1, player2)\n\n def test_place3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1:\"X\", 2:\"X\",3:\"-\",\n 4:\"-\", 5:\"-\", 6:\"-\",\n 7:\"-\", 8:\"-\", 9:\"-\"}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({1:\"X\", 2:\"X\",3:\"X\",\n 4:\"-\", 5:\"-\", 6:\"-\",\n 7:\"-\", 8:\"-\", 9:\"-\"},\n game1.game_board\n )\n\n def test_place2(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1:\"-\", 2:\"-\",3:\"-\",\n 4:\"-\", 5:\"X\", 6:\"-\",\n 7:\"-\", 8:\"X\", 9:\"-\"}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({1:\"-\", 2:\"X\",3:\"-\",\n 4:\"-\", 5:\"X\", 6:\"-\",\n 7:\"-\", 8:\"X\", 9:\"-\"},\n game1.game_board\n )\n\n def test_place8(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"-\", 6: \"-\",\n 7: \"X\", 8: \"-\", 9: \"X\"}\n p2moves = []\n winner = game1.play_comp_test(computer1, player2, p2moves)\n self.assertEqual(computer1, winner)\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"-\", 6: \"-\",\n 7: \"X\", 8: \"X\", 9: \"X\"},\n game1.game_board\n )\n\n def test_block5(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"O\", 5: \"-\", 6: \"O\",\n 7: \"-\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"O\", 5: \"X\", 6: \"O\",\n 7: \"-\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_block7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"O\",\n 4: \"-\", 5: \"O\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"O\",\n 4: \"-\", 5: \"O\", 6: \"-\",\n 7: \"X\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_block3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"-\", 6: \"O\",\n 7: \"-\", 8: \"-\", 9: \"O\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"X\",\n 4: \"-\", 5: \"-\", 6: \"O\",\n 7: \"-\", 8: \"-\", 9: \"O\"},\n game1.game_board\n )\n\n def test_center_empty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"-\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_center_nonempty(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"O\", 2: \"-\", 3: \"-\",\n 4: \"X\", 5: \"-\", 6: \"O\",\n 7: \"X\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"O\", 2: \"-\", 3: \"-\",\n 4: \"X\", 5: \"X\", 6: \"O\",\n 7: \"X\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_oppcorner7(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"O\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"O\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"X\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_oppcorner1(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"O\", 5: \"X\", 6: \"X\",\n 7: \"-\", 8: \"-\", 9: \"O\"}\n computer1.auto_move()\n self.assertEqual({1: \"X\", 2: \"-\", 3: \"-\",\n 4: \"O\", 5: \"X\", 6: \"X\",\n 7: \"-\", 8: \"-\", 9: \"O\"},\n game1.game_board\n )\n\n def test_oppcorner3(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"-\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"O\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"-\", 2: \"-\", 3: \"X\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"O\", 8: \"-\", 9: \"-\"},\n game1.game_board\n )\n\n def test_oppcorner9(self):\n game1, computer1, player2 = self.onecompplayer_setup()\n game1.game_board = {1: \"O\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"-\"}\n computer1.auto_move()\n self.assertEqual({1: \"O\", 2: \"-\", 3: \"-\",\n 4: \"-\", 5: \"X\", 6: \"-\",\n 7: \"-\", 8: \"-\", 9: \"X\"},\n game1.game_board\n )\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-ids": [
13,
15,
17,
20,
22
]
}
|
[
13,
15,
17,
20,
22
] |
from joecceasy import Easy
def main():
paths = ['..','.']
absOfEntries = [ i.abs for i in Easy.WalkAnIter(paths) ]
for i in absOfEntries:
print( i )
if __name__=='__main__':
main()
"""
def main(maxEntries = 99):
i = -1
print( "Walker test, Walking current directory:" )
for entry in Easy.WalkAnIter( ['.'] ):
i += 1 ## because i start at -1, 1st run of line will be 0
if i > maxEntries:
break
print(entry.abs)
print( ' \n ' )
"""
#isFileByPython = os.path.isfile(entry.abs)
# print( 'entry: ', entry.name, 'f', entry.isFile, 'd', entry.isDir,
# 'fa', entry.isFileAt, 'da', entry.isDirAt, 'pf', isFileByPython, se#p=' ')
#end='' )
#print( entry.abs, entry.isFileAt, entry.isDirAt, sep=' ' )
#print( entry.__dict__ )
|
normal
|
{
"blob_id": "b720a52f1c2e6e6be7c0887cd94441d248382242",
"index": 1836,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n paths = ['..', '.']\n absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]\n for i in absOfEntries:\n print(i)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n paths = ['..', '.']\n absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]\n for i in absOfEntries:\n print(i)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-4": "from joecceasy import Easy\n\n\ndef main():\n paths = ['..', '.']\n absOfEntries = [i.abs for i in Easy.WalkAnIter(paths)]\n for i in absOfEntries:\n print(i)\n\n\nif __name__ == '__main__':\n main()\n<mask token>\n",
"step-5": "from joecceasy import Easy\r\n\r\ndef main():\r\n \r\n paths = ['..','.']\r\n absOfEntries = [ i.abs for i in Easy.WalkAnIter(paths) ]\r\n for i in absOfEntries:\r\n print( i )\r\n \r\nif __name__=='__main__':\r\n main()\r\n \r\n \r\n\"\"\"\r\ndef main(maxEntries = 99):\r\n i = -1\r\n print( \"Walker test, Walking current directory:\" )\r\n for entry in Easy.WalkAnIter( ['.'] ):\r\n i += 1 ## because i start at -1, 1st run of line will be 0\r\n if i > maxEntries:\r\n break\r\n print(entry.abs)\r\n print( ' \\n ' )\r\n\"\"\"\r\n\r\n#isFileByPython = os.path.isfile(entry.abs)\r\n# print( 'entry: ', entry.name, 'f', entry.isFile, 'd', entry.isDir,\r\n# 'fa', entry.isFileAt, 'da', entry.isDirAt, 'pf', isFileByPython, se#p=' ')\r\n#end='' )\r\n#print( entry.abs, entry.isFileAt, entry.isDirAt, sep=' ' )\r\n#print( entry.__dict__ )",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from sklearn.feature_extraction.text import TfidfVectorizer
import sentimentAnalysis as sA
import sys
import os
import numpy as np
from sklearn import decomposition
from gensim import corpora, models
if len(sys.argv) > 1:
keyword = sys.argv[1]
else:
keyword = 'data'
locationList = ["","Dallas","NY","SF","LA","Chicago","Washington","Atlanta"]
#Calculating the highest positive and negative comments for all locations and without any location constraint
for location in locationList:
resultPolarityTrump, resultSubjectivityTrump = sA.main("trump",location)
resultPolarityHillary, resultSubjectivityHillary = sA.main("hillary",location)
resultPolarityObama, resultSubjectivityObama = sA.main("obama",location)
print("Trump:",resultPolarityTrump, resultSubjectivityTrump)
print("Hillary:",resultPolarityHillary, resultSubjectivityHillary)
print("Obama:",resultPolarityObama, resultSubjectivityObama)
if resultPolarityObama > resultPolarityTrump and resultPolarityObama > resultPolarityHillary:
highestPol = "Obama"#resultPolarityObama
elif resultPolarityTrump > resultPolarityObama and resultPolarityTrump > resultPolarityHillary:
highestPol = "Trump"#resultPolarityTrump
else:
highestPol = "Hillary"#resultPolarityHillary
if resultSubjectivityObama > resultSubjectivityTrump and resultSubjectivityObama > resultSubjectivityHillary:
highestSub = "Obama"#resultSubjectivityObama
elif resultSubjectivityTrump > resultSubjectivityObama and resultSubjectivityTrump > resultSubjectivityHillary:
highestSub = "Trump"#resultSubjectivityTrump
else:
highestSub = "Hillary"#resultSubjectivityHillary
print("{} has highest positive comments.".format(highestPol))
print("{} has highest negative comments.".format(highestSub))
#JSON Dataset that has tweets
corpus=['tweet_stream_hillary.json','tweet_stream_obama.json','tweet_stream_trump.json']
#Topic Analysis, LDA
fname=[]
corpus=[]
docs=[]
corpus_root='Corpus Data'
for filename in os.listdir(corpus_root):
file = open(os.path.join(corpus_root, filename), "r")
doc = file.read()
words=doc.split()
file.close()
fname.append(filename)
corpus.append(doc)
docs.append(words)
vectorizer = TfidfVectorizer(stop_words='english', min_df=2)
dtm = vectorizer.fit_transform(corpus)
vocab = vectorizer.get_feature_names()
num_topics=3
num_top_words=10
clf = decomposition.NMF(n_components=num_topics, random_state=1)
doctopic = clf.fit_transform(dtm)
print num_topics, clf.reconstruction_err_
topic_words = []
for topic in clf.components_:
word_idx = np.argsort(topic)[::-1][0:num_top_words]
topic_words.append([vocab[i] for i in word_idx])
for t in range(len(topic_words)):
print "Topic {}: {}".format(t, ' '.join(topic_words[t][:15]))
dic = corpora.Dictionary(docs)
corp = [dic.doc2bow(text) for text in docs]
tfidf = models.TfidfModel(corp)
corpus_tfidf = tfidf[corp]
model = models.ldamodel.LdaModel(corpus_tfidf, num_topics=num_topics, id2word=dic, update_every=1, passes=100)
print("LDA model")
topics_found = model.print_topics(20)
counter = 1
for t in topics_found:
print("Topic #{} {}".format(counter, t))
counter += 1
topics_found2 = model.print_topics(50)
counter2 = 1
for t in topics_found2:
print("Topic #{} {}".format(counter2, t))
counter2 += 1
|
normal
|
{
"blob_id": "ee47b60274ed2eb53a05203e0086d7815bcaaa6e",
"index": 7759,
"step-1": "# -*- coding: utf-8 -*-\r\n\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nimport sentimentAnalysis as sA\r\nimport sys\r\nimport os\r\nimport numpy as np\r\nfrom sklearn import decomposition\r\nfrom gensim import corpora, models\r\n\r\nif len(sys.argv) > 1:\r\n keyword = sys.argv[1]\r\nelse:\r\n keyword = 'data'\r\n \r\nlocationList = [\"\",\"Dallas\",\"NY\",\"SF\",\"LA\",\"Chicago\",\"Washington\",\"Atlanta\"]\r\n\r\n#Calculating the highest positive and negative comments for all locations and without any location constraint \r\nfor location in locationList:\r\n resultPolarityTrump, resultSubjectivityTrump = sA.main(\"trump\",location)\r\n resultPolarityHillary, resultSubjectivityHillary = sA.main(\"hillary\",location)\r\n resultPolarityObama, resultSubjectivityObama = sA.main(\"obama\",location)\r\n print(\"Trump:\",resultPolarityTrump, resultSubjectivityTrump)\r\n print(\"Hillary:\",resultPolarityHillary, resultSubjectivityHillary)\r\n print(\"Obama:\",resultPolarityObama, resultSubjectivityObama)\r\n \r\n if resultPolarityObama > resultPolarityTrump and resultPolarityObama > resultPolarityHillary:\r\n highestPol = \"Obama\"#resultPolarityObama\r\n elif resultPolarityTrump > resultPolarityObama and resultPolarityTrump > resultPolarityHillary:\r\n highestPol = \"Trump\"#resultPolarityTrump\r\n else:\r\n highestPol = \"Hillary\"#resultPolarityHillary\r\n \r\n if resultSubjectivityObama > resultSubjectivityTrump and resultSubjectivityObama > resultSubjectivityHillary:\r\n highestSub = \"Obama\"#resultSubjectivityObama\r\n elif resultSubjectivityTrump > resultSubjectivityObama and resultSubjectivityTrump > resultSubjectivityHillary:\r\n highestSub = \"Trump\"#resultSubjectivityTrump\r\n else:\r\n highestSub = \"Hillary\"#resultSubjectivityHillary\r\n \r\n print(\"{} has highest positive comments.\".format(highestPol))\r\n print(\"{} has highest negative comments.\".format(highestSub))\r\n \r\n #JSON Dataset that has tweets \r\n corpus=['tweet_stream_hillary.json','tweet_stream_obama.json','tweet_stream_trump.json']\r\n \r\n #Topic Analysis, LDA \r\n fname=[]\r\n corpus=[]\r\n docs=[]\r\n corpus_root='Corpus Data'\r\n for filename in os.listdir(corpus_root):\r\n file = open(os.path.join(corpus_root, filename), \"r\")\r\n doc = file.read()\r\n words=doc.split()\r\n file.close()\r\n fname.append(filename)\r\n corpus.append(doc)\r\n docs.append(words)\r\n \r\n vectorizer = TfidfVectorizer(stop_words='english', min_df=2)\r\n dtm = vectorizer.fit_transform(corpus)\r\n vocab = vectorizer.get_feature_names()\r\n \r\n num_topics=3\r\n num_top_words=10\r\n clf = decomposition.NMF(n_components=num_topics, random_state=1)\r\n doctopic = clf.fit_transform(dtm)\r\n print num_topics, clf.reconstruction_err_\r\n \r\n topic_words = []\r\n for topic in clf.components_:\r\n word_idx = np.argsort(topic)[::-1][0:num_top_words]\r\n topic_words.append([vocab[i] for i in word_idx])\r\n \r\n for t in range(len(topic_words)):\r\n print \"Topic {}: {}\".format(t, ' '.join(topic_words[t][:15]))\r\n \r\n dic = corpora.Dictionary(docs)\r\n corp = [dic.doc2bow(text) for text in docs]\r\n tfidf = models.TfidfModel(corp)\r\n corpus_tfidf = tfidf[corp]\r\n model = models.ldamodel.LdaModel(corpus_tfidf, num_topics=num_topics, id2word=dic, update_every=1, passes=100)\r\n print(\"LDA model\")\r\n topics_found = model.print_topics(20)\r\n counter = 1\r\n for t in topics_found:\r\n print(\"Topic #{} {}\".format(counter, t))\r\n counter += 1\r\n topics_found2 = model.print_topics(50)\r\n counter2 = 1\r\n for t in topics_found2:\r\n print(\"Topic #{} {}\".format(counter2, t))\r\n counter2 += 1\r\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Write a function that receives a string as a parameter and returns a dictionary in which the keys are the characters in the character string and the values are the number of occurrences of that character in the given text.
# Example: For string "Ana has apples." given as a parameter the function will return the dictionary: {'A': 1, '': 2, 'n': 1, 'a': 2, 'r': 2, '.': 1}.
# varianta 1
string=input("Input your string: ")
def funct(string):
dict={}
for i in string:
if i in dict:
dict[i]+=1
else:
dict[i]= 1
return dict
print(funct(string))
# varianta 2
from collections import Counter
def counter():
string=input("Input your string :")
result=Counter(string)
return result
print(counter())
|
normal
|
{
"blob_id": "14807568af046594644095a2682e0eba4f445b26",
"index": 8053,
"step-1": "<mask token>\n\n\ndef funct(string):\n dict = {}\n for i in string:\n if i in dict:\n dict[i] += 1\n else:\n dict[i] = 1\n return dict\n\n\n<mask token>\n\n\ndef counter():\n string = input('Input your string :')\n result = Counter(string)\n return result\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef funct(string):\n dict = {}\n for i in string:\n if i in dict:\n dict[i] += 1\n else:\n dict[i] = 1\n return dict\n\n\nprint(funct(string))\n<mask token>\n\n\ndef counter():\n string = input('Input your string :')\n result = Counter(string)\n return result\n\n\nprint(counter())\n",
"step-3": "string = input('Input your string: ')\n\n\ndef funct(string):\n dict = {}\n for i in string:\n if i in dict:\n dict[i] += 1\n else:\n dict[i] = 1\n return dict\n\n\nprint(funct(string))\n<mask token>\n\n\ndef counter():\n string = input('Input your string :')\n result = Counter(string)\n return result\n\n\nprint(counter())\n",
"step-4": "string = input('Input your string: ')\n\n\ndef funct(string):\n dict = {}\n for i in string:\n if i in dict:\n dict[i] += 1\n else:\n dict[i] = 1\n return dict\n\n\nprint(funct(string))\nfrom collections import Counter\n\n\ndef counter():\n string = input('Input your string :')\n result = Counter(string)\n return result\n\n\nprint(counter())\n",
"step-5": "# Write a function that receives a string as a parameter and returns a dictionary in which the keys are the characters in the character string and the values are the number of occurrences of that character in the given text.\n# Example: For string \"Ana has apples.\" given as a parameter the function will return the dictionary: {'A': 1, '': 2, 'n': 1, 'a': 2, 'r': 2, '.': 1}.\n\n# varianta 1\nstring=input(\"Input your string: \")\ndef funct(string):\n dict={}\n for i in string:\n if i in dict:\n dict[i]+=1\n else:\n dict[i]= 1\n return dict\nprint(funct(string))\n\n# varianta 2\nfrom collections import Counter\ndef counter():\n string=input(\"Input your string :\")\n result=Counter(string)\n return result\nprint(counter())",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from http import HTTPStatus
#from pytest_chalice.handlers import RequestHandler
import app
from chalice.test import Client
def test_index_with_url():
with Client(app.app) as client:
response = client.http.get('/?url=https://google.com')
assert response.status_code == HTTPStatus.MOVED_PERMANENTLY
assert response.headers['Location'] is not None
def test_index_without_url():
with Client(app.app) as client:
response = client.http.get('/')
assert response.body == b'Invalid or missing url'
def test_link_received_by_sns():
with Client(app.app) as client:
with open('sns_message.txt') as f:
event = client.events.generate_sns_event(message=f.read())
with open('/tmp/event.json', 'w') as f:
import json
f.write(json.dumps(event))
response = client.lambda_.invoke('handle_link_visit', event)
assert response.payload['message'] == 'link visited'
|
normal
|
{
"blob_id": "e7e9a53d4c41448521b324d51641a46827faa692",
"index": 2607,
"step-1": "<mask token>\n\n\ndef test_index_with_url():\n with Client(app.app) as client:\n response = client.http.get('/?url=https://google.com')\n assert response.status_code == HTTPStatus.MOVED_PERMANENTLY\n assert response.headers['Location'] is not None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_index_with_url():\n with Client(app.app) as client:\n response = client.http.get('/?url=https://google.com')\n assert response.status_code == HTTPStatus.MOVED_PERMANENTLY\n assert response.headers['Location'] is not None\n\n\n<mask token>\n\n\ndef test_link_received_by_sns():\n with Client(app.app) as client:\n with open('sns_message.txt') as f:\n event = client.events.generate_sns_event(message=f.read())\n with open('/tmp/event.json', 'w') as f:\n import json\n f.write(json.dumps(event))\n response = client.lambda_.invoke('handle_link_visit', event)\n assert response.payload['message'] == 'link visited'\n",
"step-3": "<mask token>\n\n\ndef test_index_with_url():\n with Client(app.app) as client:\n response = client.http.get('/?url=https://google.com')\n assert response.status_code == HTTPStatus.MOVED_PERMANENTLY\n assert response.headers['Location'] is not None\n\n\ndef test_index_without_url():\n with Client(app.app) as client:\n response = client.http.get('/')\n assert response.body == b'Invalid or missing url'\n\n\ndef test_link_received_by_sns():\n with Client(app.app) as client:\n with open('sns_message.txt') as f:\n event = client.events.generate_sns_event(message=f.read())\n with open('/tmp/event.json', 'w') as f:\n import json\n f.write(json.dumps(event))\n response = client.lambda_.invoke('handle_link_visit', event)\n assert response.payload['message'] == 'link visited'\n",
"step-4": "from http import HTTPStatus\nimport app\nfrom chalice.test import Client\n\n\ndef test_index_with_url():\n with Client(app.app) as client:\n response = client.http.get('/?url=https://google.com')\n assert response.status_code == HTTPStatus.MOVED_PERMANENTLY\n assert response.headers['Location'] is not None\n\n\ndef test_index_without_url():\n with Client(app.app) as client:\n response = client.http.get('/')\n assert response.body == b'Invalid or missing url'\n\n\ndef test_link_received_by_sns():\n with Client(app.app) as client:\n with open('sns_message.txt') as f:\n event = client.events.generate_sns_event(message=f.read())\n with open('/tmp/event.json', 'w') as f:\n import json\n f.write(json.dumps(event))\n response = client.lambda_.invoke('handle_link_visit', event)\n assert response.payload['message'] == 'link visited'\n",
"step-5": "from http import HTTPStatus\n#from pytest_chalice.handlers import RequestHandler\nimport app\nfrom chalice.test import Client\n\ndef test_index_with_url():\n with Client(app.app) as client:\n response = client.http.get('/?url=https://google.com')\n assert response.status_code == HTTPStatus.MOVED_PERMANENTLY\n assert response.headers['Location'] is not None\n\ndef test_index_without_url():\n with Client(app.app) as client:\n response = client.http.get('/')\n assert response.body == b'Invalid or missing url'\n\ndef test_link_received_by_sns():\n with Client(app.app) as client:\n with open('sns_message.txt') as f:\n event = client.events.generate_sns_event(message=f.read())\n with open('/tmp/event.json', 'w') as f:\n import json\n f.write(json.dumps(event))\n response = client.lambda_.invoke('handle_link_visit', event)\n assert response.payload['message'] == 'link visited'",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import csv
from functools import reduce
class Csvread:
def __init__(self, fpath):
self._path=fpath
with open (fpath) as file:
read_f=csv.reader(file)
print(read_f) #<_csv.reader object at 0x000002A53144DF40>
self._sheet = list(read_f)[1:] #utworzenie listy
def get_sheet(self):
return self._sheet
class Csvcalc:
def __init__(self, cont):
self._cont=cont
def row_count(self):
return len(self._cont)
def get_row (self, row_no):
return self._cont[row_no]
def col_count (self):
return len(self._cont[1])
def get_colum (self,no_col):
return list (x[no_col] for x in self._cont)
def sum_col (self,col_no):
return reduce(lambda x, y: x+y, self.get_colum(col_no))
def mul_col(self, col_no):
return sum(lambda x,y: x*y, self.get_colum(col_no))
csv1= Csvread('./data.csv')
print(csv1) #<__main__.Csvread object at 0x000002A5312B4040>
|
normal
|
{
"blob_id": "67793c8851e7107c6566da4e0ca5d5ffcf6341ad",
"index": 8867,
"step-1": "<mask token>\n\n\nclass Csvcalc:\n\n def __init__(self, cont):\n self._cont = cont\n\n def row_count(self):\n return len(self._cont)\n\n def get_row(self, row_no):\n return self._cont[row_no]\n\n def col_count(self):\n return len(self._cont[1])\n\n def get_colum(self, no_col):\n return list(x[no_col] for x in self._cont)\n <mask token>\n\n def mul_col(self, col_no):\n return sum(lambda x, y: x * y, self.get_colum(col_no))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Csvread:\n <mask token>\n\n def get_sheet(self):\n return self._sheet\n\n\nclass Csvcalc:\n\n def __init__(self, cont):\n self._cont = cont\n\n def row_count(self):\n return len(self._cont)\n\n def get_row(self, row_no):\n return self._cont[row_no]\n\n def col_count(self):\n return len(self._cont[1])\n\n def get_colum(self, no_col):\n return list(x[no_col] for x in self._cont)\n\n def sum_col(self, col_no):\n return reduce(lambda x, y: x + y, self.get_colum(col_no))\n\n def mul_col(self, col_no):\n return sum(lambda x, y: x * y, self.get_colum(col_no))\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Csvread:\n\n def __init__(self, fpath):\n self._path = fpath\n with open(fpath) as file:\n read_f = csv.reader(file)\n print(read_f)\n self._sheet = list(read_f)[1:]\n\n def get_sheet(self):\n return self._sheet\n\n\nclass Csvcalc:\n\n def __init__(self, cont):\n self._cont = cont\n\n def row_count(self):\n return len(self._cont)\n\n def get_row(self, row_no):\n return self._cont[row_no]\n\n def col_count(self):\n return len(self._cont[1])\n\n def get_colum(self, no_col):\n return list(x[no_col] for x in self._cont)\n\n def sum_col(self, col_no):\n return reduce(lambda x, y: x + y, self.get_colum(col_no))\n\n def mul_col(self, col_no):\n return sum(lambda x, y: x * y, self.get_colum(col_no))\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Csvread:\n\n def __init__(self, fpath):\n self._path = fpath\n with open(fpath) as file:\n read_f = csv.reader(file)\n print(read_f)\n self._sheet = list(read_f)[1:]\n\n def get_sheet(self):\n return self._sheet\n\n\nclass Csvcalc:\n\n def __init__(self, cont):\n self._cont = cont\n\n def row_count(self):\n return len(self._cont)\n\n def get_row(self, row_no):\n return self._cont[row_no]\n\n def col_count(self):\n return len(self._cont[1])\n\n def get_colum(self, no_col):\n return list(x[no_col] for x in self._cont)\n\n def sum_col(self, col_no):\n return reduce(lambda x, y: x + y, self.get_colum(col_no))\n\n def mul_col(self, col_no):\n return sum(lambda x, y: x * y, self.get_colum(col_no))\n\n\ncsv1 = Csvread('./data.csv')\nprint(csv1)\n",
"step-5": "import csv\nfrom functools import reduce\n\nclass Csvread:\n\n def __init__(self, fpath):\n self._path=fpath\n with open (fpath) as file:\n read_f=csv.reader(file)\n print(read_f) #<_csv.reader object at 0x000002A53144DF40>\n\n self._sheet = list(read_f)[1:] #utworzenie listy\n\n\n def get_sheet(self):\n return self._sheet\n\nclass Csvcalc:\n def __init__(self, cont):\n self._cont=cont\n def row_count(self):\n return len(self._cont)\n def get_row (self, row_no):\n return self._cont[row_no]\n def col_count (self):\n return len(self._cont[1])\n def get_colum (self,no_col):\n return list (x[no_col] for x in self._cont)\n def sum_col (self,col_no):\n return reduce(lambda x, y: x+y, self.get_colum(col_no))\n\n def mul_col(self, col_no):\n return sum(lambda x,y: x*y, self.get_colum(col_no))\n\n\n\n\n\ncsv1= Csvread('./data.csv')\nprint(csv1) #<__main__.Csvread object at 0x000002A5312B4040>\n\n\n\n\n",
"step-ids": [
7,
10,
11,
13,
15
]
}
|
[
7,
10,
11,
13,
15
] |
# coding:utf-8
from flask_sqlalchemy import SQLAlchemy
from config.manager import app
from config.db import db
class Category(db.Model):
__tablename__ = 'category'
id = db.Column(db.Integer, primary_key=True) # 编号
name = db.Column(db.String(20), nullable=False) # 账号
addtime = db.Column(db.DateTime, nullable=False) # 注册时间
def __repr__(self):
return "<User %r>" % self.name
if __name__ == '__main__':
db.create_all()
|
normal
|
{
"blob_id": "743aa4ccbb9a131b5ef3d04475789d3d1da1a2fa",
"index": 2407,
"step-1": "<mask token>\n\n\nclass Category(db.Model):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n addtime = db.Column(db.DateTime, nullable=False)\n\n def __repr__(self):\n return '<User %r>' % self.name\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n addtime = db.Column(db.DateTime, nullable=False)\n\n def __repr__(self):\n return '<User %r>' % self.name\n\n\nif __name__ == '__main__':\n db.create_all()\n",
"step-4": "from flask_sqlalchemy import SQLAlchemy\nfrom config.manager import app\nfrom config.db import db\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(20), nullable=False)\n addtime = db.Column(db.DateTime, nullable=False)\n\n def __repr__(self):\n return '<User %r>' % self.name\n\n\nif __name__ == '__main__':\n db.create_all()\n",
"step-5": "# coding:utf-8\nfrom flask_sqlalchemy import SQLAlchemy\nfrom config.manager import app\nfrom config.db import db\n\n\nclass Category(db.Model):\n __tablename__ = 'category'\n id = db.Column(db.Integer, primary_key=True) # 编号\n name = db.Column(db.String(20), nullable=False) # 账号\n addtime = db.Column(db.DateTime, nullable=False) # 注册时间\n\n def __repr__(self):\n return \"<User %r>\" % self.name\n\n\nif __name__ == '__main__':\n db.create_all()\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from django.views.generic import (ListView, DetailView, CreateView,
DeleteView, UpdateView, TemplateView)
from django.views.generic.edit import ModelFormMixin
from django.urls import reverse_lazy
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from .models import Movie, Actor
from .forms import MovieForm
from django.http import Http404
def my_print(*args, **kwargs):
raise Http404(*args, **kwargs)
class BaseModelApi(TemplateView, ModelFormMixin):
def get_template_names(self):
prefix = self.request.method
if prefix in ['PUT', 'PATCH', 'POST']:
prefix = 'form'
name = self.model
return [f'{name}/{name}_{prefix}.html']
def get(self, request):
pass
def post(self, request):
pass
def put(self, request):
pass
def patch(self, request):
pass
def delete(self, request):
pass
def dispatch(self, request):
pass
def get_context_data(self):
pass
def get_form(self):
pass
def get_form_class(self):
name = f'{self.model}'.title()
# prefix = f'{self.request.method}'.title()
self.form_class = eval(f'{name}Form')
return self.form_class
class MoviesView(ListView):
model = Movie
context_object_name = 'movies'
class MovieView(DetailView):
model = Movie
context_object_name = 'movie'
class ActorView(DetailView):
model = Actor
context_object_name = 'actor'
@method_decorator(login_required, name='dispatch')
class MovieCreateView(CreateView):
form_class = MovieForm
template_name = 'movies/movie_form.html'
success_url = reverse_lazy('movie_all')
@method_decorator(login_required, name='dispatch')
class MovieUpdateView(UpdateView):
model = Movie
form_class = MovieForm
template_name = 'movies/movie_form.html'
success_url = reverse_lazy('movie_all')
@method_decorator(login_required, name='dispatch')
class MovieDelete(DeleteView):
model = Movie
success_url = reverse_lazy('movie_all')
|
normal
|
{
"blob_id": "a63e5186c0eb8b5ae8510b473168db3461166513",
"index": 7784,
"step-1": "<mask token>\n\n\nclass BaseModelApi(TemplateView, ModelFormMixin):\n\n def get_template_names(self):\n prefix = self.request.method\n if prefix in ['PUT', 'PATCH', 'POST']:\n prefix = 'form'\n name = self.model\n return [f'{name}/{name}_{prefix}.html']\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MoviesView(ListView):\n model = Movie\n context_object_name = 'movies'\n\n\nclass MovieView(DetailView):\n model = Movie\n context_object_name = 'movie'\n\n\nclass ActorView(DetailView):\n model = Actor\n context_object_name = 'actor'\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieCreateView(CreateView):\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieUpdateView(UpdateView):\n model = Movie\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieDelete(DeleteView):\n model = Movie\n success_url = reverse_lazy('movie_all')\n",
"step-2": "<mask token>\n\n\nclass BaseModelApi(TemplateView, ModelFormMixin):\n\n def get_template_names(self):\n prefix = self.request.method\n if prefix in ['PUT', 'PATCH', 'POST']:\n prefix = 'form'\n name = self.model\n return [f'{name}/{name}_{prefix}.html']\n <mask token>\n <mask token>\n <mask token>\n\n def patch(self, request):\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass MoviesView(ListView):\n model = Movie\n context_object_name = 'movies'\n\n\nclass MovieView(DetailView):\n model = Movie\n context_object_name = 'movie'\n\n\nclass ActorView(DetailView):\n model = Actor\n context_object_name = 'actor'\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieCreateView(CreateView):\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieUpdateView(UpdateView):\n model = Movie\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieDelete(DeleteView):\n model = Movie\n success_url = reverse_lazy('movie_all')\n",
"step-3": "<mask token>\n\n\nclass BaseModelApi(TemplateView, ModelFormMixin):\n\n def get_template_names(self):\n prefix = self.request.method\n if prefix in ['PUT', 'PATCH', 'POST']:\n prefix = 'form'\n name = self.model\n return [f'{name}/{name}_{prefix}.html']\n\n def get(self, request):\n pass\n\n def post(self, request):\n pass\n <mask token>\n\n def patch(self, request):\n pass\n <mask token>\n\n def dispatch(self, request):\n pass\n\n def get_context_data(self):\n pass\n\n def get_form(self):\n pass\n\n def get_form_class(self):\n name = f'{self.model}'.title()\n self.form_class = eval(f'{name}Form')\n return self.form_class\n\n\nclass MoviesView(ListView):\n model = Movie\n context_object_name = 'movies'\n\n\nclass MovieView(DetailView):\n model = Movie\n context_object_name = 'movie'\n\n\nclass ActorView(DetailView):\n model = Actor\n context_object_name = 'actor'\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieCreateView(CreateView):\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieUpdateView(UpdateView):\n model = Movie\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieDelete(DeleteView):\n model = Movie\n success_url = reverse_lazy('movie_all')\n",
"step-4": "from django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView, TemplateView\nfrom django.views.generic.edit import ModelFormMixin\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Movie, Actor\nfrom .forms import MovieForm\nfrom django.http import Http404\n\n\ndef my_print(*args, **kwargs):\n raise Http404(*args, **kwargs)\n\n\nclass BaseModelApi(TemplateView, ModelFormMixin):\n\n def get_template_names(self):\n prefix = self.request.method\n if prefix in ['PUT', 'PATCH', 'POST']:\n prefix = 'form'\n name = self.model\n return [f'{name}/{name}_{prefix}.html']\n\n def get(self, request):\n pass\n\n def post(self, request):\n pass\n\n def put(self, request):\n pass\n\n def patch(self, request):\n pass\n\n def delete(self, request):\n pass\n\n def dispatch(self, request):\n pass\n\n def get_context_data(self):\n pass\n\n def get_form(self):\n pass\n\n def get_form_class(self):\n name = f'{self.model}'.title()\n self.form_class = eval(f'{name}Form')\n return self.form_class\n\n\nclass MoviesView(ListView):\n model = Movie\n context_object_name = 'movies'\n\n\nclass MovieView(DetailView):\n model = Movie\n context_object_name = 'movie'\n\n\nclass ActorView(DetailView):\n model = Actor\n context_object_name = 'actor'\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieCreateView(CreateView):\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieUpdateView(UpdateView):\n model = Movie\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieDelete(DeleteView):\n model = Movie\n success_url = reverse_lazy('movie_all')\n",
"step-5": "from django.views.generic import (ListView, DetailView, CreateView,\n DeleteView, UpdateView, TemplateView)\nfrom django.views.generic.edit import ModelFormMixin\nfrom django.urls import reverse_lazy\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\n\nfrom .models import Movie, Actor\nfrom .forms import MovieForm\nfrom django.http import Http404\n\n\ndef my_print(*args, **kwargs):\n raise Http404(*args, **kwargs)\n\n\nclass BaseModelApi(TemplateView, ModelFormMixin):\n\n def get_template_names(self):\n prefix = self.request.method\n if prefix in ['PUT', 'PATCH', 'POST']:\n prefix = 'form'\n name = self.model\n return [f'{name}/{name}_{prefix}.html']\n\n def get(self, request):\n pass\n\n def post(self, request):\n pass\n\n def put(self, request):\n pass\n\n def patch(self, request):\n pass\n\n def delete(self, request):\n pass\n\n def dispatch(self, request):\n pass\n\n def get_context_data(self):\n pass\n\n def get_form(self):\n pass\n\n def get_form_class(self):\n name = f'{self.model}'.title()\n # prefix = f'{self.request.method}'.title()\n self.form_class = eval(f'{name}Form')\n return self.form_class\n\n\nclass MoviesView(ListView):\n model = Movie\n context_object_name = 'movies'\n\n\nclass MovieView(DetailView):\n model = Movie\n context_object_name = 'movie'\n\n\nclass ActorView(DetailView):\n model = Actor\n context_object_name = 'actor'\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieCreateView(CreateView):\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieUpdateView(UpdateView):\n model = Movie\n form_class = MovieForm\n template_name = 'movies/movie_form.html'\n success_url = reverse_lazy('movie_all')\n\n\n@method_decorator(login_required, name='dispatch')\nclass MovieDelete(DeleteView):\n model = Movie\n success_url = reverse_lazy('movie_all')\n",
"step-ids": [
14,
15,
21,
25,
26
]
}
|
[
14,
15,
21,
25,
26
] |
import matplotlib.pyplot as plt
import numpy as np
from tti_explorer.contacts import he_infection_profile
plt.style.use('default')
loc = 0
# taken from He et al
gamma_params = {
'a': 2.11,
'loc': loc,
'scale': 1/0.69
}
t = 10
days = np.arange(t)
mass = he_infection_profile(t, gamma_params)
fig, ax = plt.subplots(1, figsize=(9*0.8, 5*0.8))
xaxis = np.linspace(-2, t, 1000)
ax.bar(
np.arange(5)+0.1,
[1/5, 1/5, 1/5, 1/5, 1/5],
label="Kucharski profile",
align="edge",
color="C1",
zorder=1,
alpha=0.6
)
ax.bar(days, mass, label="Discretised", align="edge", zorder=1)
ax.legend(loc="upper right")
ax.set_axis_on()
ax.set_ylabel('Secondary attack profile')
ax.set_xlabel('Days since start of infectious period')
ax.set_xticks(days)
plt.show()
# fig.savefig('./charts/inf_profile.pdf')
|
normal
|
{
"blob_id": "fa5cbbd03641d2937e4502ce459d64d20b5ee227",
"index": 8630,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.style.use('default')\n<mask token>\nax.bar(np.arange(5) + 0.1, [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], label=\n 'Kucharski profile', align='edge', color='C1', zorder=1, alpha=0.6)\nax.bar(days, mass, label='Discretised', align='edge', zorder=1)\nax.legend(loc='upper right')\nax.set_axis_on()\nax.set_ylabel('Secondary attack profile')\nax.set_xlabel('Days since start of infectious period')\nax.set_xticks(days)\nplt.show()\n",
"step-3": "<mask token>\nplt.style.use('default')\nloc = 0\ngamma_params = {'a': 2.11, 'loc': loc, 'scale': 1 / 0.69}\nt = 10\ndays = np.arange(t)\nmass = he_infection_profile(t, gamma_params)\nfig, ax = plt.subplots(1, figsize=(9 * 0.8, 5 * 0.8))\nxaxis = np.linspace(-2, t, 1000)\nax.bar(np.arange(5) + 0.1, [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], label=\n 'Kucharski profile', align='edge', color='C1', zorder=1, alpha=0.6)\nax.bar(days, mass, label='Discretised', align='edge', zorder=1)\nax.legend(loc='upper right')\nax.set_axis_on()\nax.set_ylabel('Secondary attack profile')\nax.set_xlabel('Days since start of infectious period')\nax.set_xticks(days)\nplt.show()\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nfrom tti_explorer.contacts import he_infection_profile\nplt.style.use('default')\nloc = 0\ngamma_params = {'a': 2.11, 'loc': loc, 'scale': 1 / 0.69}\nt = 10\ndays = np.arange(t)\nmass = he_infection_profile(t, gamma_params)\nfig, ax = plt.subplots(1, figsize=(9 * 0.8, 5 * 0.8))\nxaxis = np.linspace(-2, t, 1000)\nax.bar(np.arange(5) + 0.1, [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], label=\n 'Kucharski profile', align='edge', color='C1', zorder=1, alpha=0.6)\nax.bar(days, mass, label='Discretised', align='edge', zorder=1)\nax.legend(loc='upper right')\nax.set_axis_on()\nax.set_ylabel('Secondary attack profile')\nax.set_xlabel('Days since start of infectious period')\nax.set_xticks(days)\nplt.show()\n",
"step-5": "\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom tti_explorer.contacts import he_infection_profile\n\nplt.style.use('default')\nloc = 0\n# taken from He et al\ngamma_params = {\n 'a': 2.11,\n 'loc': loc,\n 'scale': 1/0.69\n}\nt = 10\ndays = np.arange(t)\n\nmass = he_infection_profile(t, gamma_params)\n\nfig, ax = plt.subplots(1, figsize=(9*0.8, 5*0.8))\nxaxis = np.linspace(-2, t, 1000)\nax.bar(\n np.arange(5)+0.1,\n [1/5, 1/5, 1/5, 1/5, 1/5],\n label=\"Kucharski profile\",\n align=\"edge\",\n color=\"C1\",\n zorder=1,\n alpha=0.6\n)\nax.bar(days, mass, label=\"Discretised\", align=\"edge\", zorder=1)\nax.legend(loc=\"upper right\")\nax.set_axis_on()\nax.set_ylabel('Secondary attack profile')\nax.set_xlabel('Days since start of infectious period')\nax.set_xticks(days)\nplt.show()\n# fig.savefig('./charts/inf_profile.pdf')\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import random
from datetime import timedelta
from typing import Union, Type, Tuple, List, Dict
from django import http
from django.test import TestCase, Client
from django.utils import timezone
from exam_web import errors
from exam_web.models import Student, AcademyGroup, uuid_str, ExamSession, \
UserSession, Question, Stage, QuestionType, ExamTicket, ExamStatus
class ApiClient(Client):
path: str
def __init__(self, path: str, student: Student = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.student = student
self.path = path
self.headers = {'content_type': 'application/json'}
if student:
self.cookies['student'] = student.id
def path_params(self, **params):
return ApiClient(self.path.format(**params), self.student)
def get(self, **kwargs):
return super().get(self.path, data=kwargs, **self.headers)
def post(self, **json):
return super().post(self.path, data=json, **self.headers)
def __call__(self, **kwargs):
raise AttributeError('Use `get` or `post` methods instead')
class ApiTestCase(TestCase):
group: AcademyGroup
student: Student
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.group = AcademyGroup.objects.create(name='test_group')
cls.student = Student.objects.create(name='test user', group=cls.group)
@classmethod
def tearDownClass(cls):
cls.student.delete()
cls.group.delete()
super().tearDownClass()
def setup_exam_objects(self):
self.session = ExamSession.objects.create(
start_time=timezone.now(), duration=timedelta(minutes=40))
self.student_session = UserSession.objects.create(
student=self.student, exam_session=self.session)
self.questions = [
Question.objects.create(
stage=Stage.first, type=QuestionType.single, max_score=1,
text='test single question', options=['a', 'b', 'c']
),
Question.objects.create(
stage=Stage.first, type=QuestionType.multi, max_score=1,
text='test multi question', options=['a', 'b', 'c']
),
Question.objects.create(
stage=Stage.second, type=QuestionType.open, max_score=1,
text='test open question', options=None,
),
]
self.tickets = [
ExamTicket.objects.create(
student=self.student, session=self.student_session,
question=question) for question in self.questions
]
self.ticket_map = {x.id: x for x in self.tickets}
def teardown_exam_objects(self):
for ticket in self.tickets:
ticket.delete()
for question in self.questions:
question.delete()
self.student_session.delete()
def assertResponseSuccess(self, response: http.HttpResponse):
content = response.content.decode()
self.assertEqual(response.status_code, 200,
(response.status_code, content))
content = response.json()
self.assertIn('result', content, content)
return content['result']
def assertResponseError(
self, response: http.JsonResponse,
error: Union[errors.APIError, Type[errors.APIError]] = None
) -> Tuple[int, str]:
content = response.json()
self.assertGreaterEqual(response.status_code, 400,
(response.status_code, content))
self.assertIn('error', content, content)
if error is not None:
if isinstance(error, type):
error = error()
self.assertEqual(response.status_code, error.status,
(response.status_code, content))
self.assertEqual(content['error'], error.message,
(response.status_code, content))
return response.status_code, content['error']
class TestAuthorize(ApiTestCase):
authorize: ApiClient
def setUp(self):
super().setUp()
self.authorize = ApiClient('/api/authorize')
def test_authorized(self):
response = self.authorize.post(token=self.student.id)
result = self.assertResponseSuccess(response)
self.assertEqual(response.cookies['student'].value, self.student.id)
self.assertEqual(result['name'], self.student.name)
self.assertEqual(result['group'], self.group.name)
self.assertEqual(result['id'], self.student.id)
def test_authorized_unknown_token(self):
response = self.authorize.post(token=uuid_str())
self.assertResponseError(response, errors.Unauthorized)
def test_authorized_invalid_params(self):
response = self.authorize.post()
self.assertResponseError(response, errors.InvalidParameter('token'))
response = self.authorize.post(token=12345678)
self.assertResponseError(response, errors.InvalidParameter('token'))
response = self.authorize.get()
self.assertEqual(response.status_code, 405)
class TestGetExamSessions(ApiTestCase):
get_exams: ApiClient
session: ExamSession
student_session: UserSession
questions: List[Question]
tickets: List[ExamTicket]
def setUp(self):
super().setUp()
self.get_exams = ApiClient('/api/exams', student=self.student)
self.setup_exam_objects()
def tearDown(self):
self.teardown_exam_objects()
super().tearDown()
def test_get_exams_available(self):
result = self.assertResponseSuccess(self.get_exams.get())
self.assertIsInstance(result, list)
self.assertEqual(len(result), 1)
user_session = result[0]
self.assertEqual(
user_session['started_at'], self.session.start_time.isoformat())
self.assertEqual(user_session['duration'],
self.session.duration.total_seconds() / 60)
self.assertEqual(user_session['checked_in'], False)
self.assertEqual(user_session['finished_at'], None)
self.assertEqual(user_session['status'], ExamStatus.available.value)
self.assertEqual(user_session['score'], None)
def test_get_exams_check_in(self):
self.student_session.started_at = timezone.now()
self.student_session.save()
result = self.assertResponseSuccess(self.get_exams.get())
user_session = result[0]
self.assertEqual(user_session['checked_in'], True)
def test_get_exams_submitted(self):
now = timezone.now()
self.student_session.started_at = timezone.now()
self.student_session.finished_at = now
self.student_session.save()
result = self.assertResponseSuccess(self.get_exams.get())
user_session = result[0]
self.assertEqual(user_session['finished_at'], now.isoformat())
self.assertEqual(user_session['status'], ExamStatus.submitted)
self.assertEqual(user_session['score'], None)
def test_get_exams_non_available(self):
self.session.start_time = timezone.now() + self.session.duration
self.session.save()
result = self.assertResponseSuccess(self.get_exams.get())
user_session = result[0]
self.assertEqual(user_session['started_at'],
self.session.start_time.isoformat())
self.assertEqual(user_session['finished_at'], None)
self.assertEqual(user_session['status'], ExamStatus.not_available)
def test_get_exams_unauthorized(self):
self.get_exams.cookies = {}
self.assertResponseError(self.get_exams.get(), errors.Unauthorized)
response = self.get_exams.post()
self.assertEqual(response.status_code, 405)
def test_get_exams_score(self):
for ticket in self.tickets:
ticket.score = 1.0
ticket.save()
result = self.assertResponseSuccess(self.get_exams.get())
user_session = result[0]
self.assertEqual(user_session['score'],
sum(t.score for t in self.tickets))
self.tickets[0].score = None
self.tickets[0].save()
result = self.assertResponseSuccess(self.get_exams.get())
user_session = result[0]
self.assertEqual(user_session['score'], None)
class TestGetExamTickets(ApiTestCase):
get_exams: ApiClient
session: ExamSession
student_session: UserSession
questions: List[Question]
tickets: List[ExamTicket]
ticket_map: Dict[str, ExamTicket]
def setUp(self):
super().setUp()
self.get_exam_questions = \
ApiClient('/api/tickets', student=self.student)
self.setup_exam_objects()
def tearDown(self):
self.teardown_exam_objects()
super().tearDown()
def test_get_exam_questions(self):
self.assertFalse(self.student_session.check_in)
result = self.assertResponseSuccess(
self.get_exam_questions.post(session_id=self.student_session.id))
self.assertEqual(result['status'], ExamStatus.available)
self.assertEqual(result['score'], None)
self.student_session.refresh_from_db()
self.assertTrue(self.student_session.check_in)
questions = result['questions']
self.assertIsInstance(questions, list)
self.assertEqual(len(questions), len(self.tickets))
self.assertEqual([x['id'] for x in questions], [
x.id for x in sorted(self.tickets, key=lambda x: x.question.stage)
])
for question in questions:
ticket = self.ticket_map[question['id']]
ticket_question = ticket.question
self.assertEqual(question.pop('id'), ticket.id)
view = ticket_question.as_dict
view.pop('id')
self.assertEqual(question, view)
def test_get_exam_questions_already_checked_in(self):
self.student_session.check_in = True
checkin_date = self.student_session.started_at
result = self.assertResponseSuccess(
self.get_exam_questions.post(session_id=self.student_session.id))
self.assertEqual(result['status'], ExamStatus.available)
self.assertEqual(result['score'], None)
self.student_session.refresh_from_db()
self.assertTrue(self.student_session.check_in)
self.assertEqual(self.student_session.started_at, checkin_date)
questions = result['questions']
self.assertIsInstance(questions, list)
self.assertEqual(len(questions), len(self.tickets))
def test_get_exam_questions_not_available(self):
self.session.start_time += self.session.duration
self.session.save()
result = self.assertResponseSuccess(
self.get_exam_questions.post(session_id=self.student_session.id))
self.assertEqual(result['status'], ExamStatus.not_available)
self.assertEqual(result['score'], None)
questions = result['questions']
self.assertIsInstance(questions, list)
self.assertEqual(len(questions), 0)
def test_get_exam_questions_submitted(self):
self.student_session.finished_at = timezone.now()
self.student_session.save()
ANSWER = 'answer'
for ticket in self.tickets:
ticket.answer = ANSWER
ticket.save()
result = self.assertResponseSuccess(
self.get_exam_questions.post(session_id=self.student_session.id))
self.assertEqual(result['status'], ExamStatus.submitted)
self.assertEqual(result['score'], None)
questions = result['questions']
self.assertIsInstance(questions, list)
self.assertEqual(len(questions), len(self.tickets))
for question in questions:
ticket = self.ticket_map[question['id']]
ticket.refresh_from_db()
answer = question.pop('answer')
self.assertEqual(answer, ticket.answer)
self.assertEqual(question['score'], None)
def test_get_exam_questions_submitted_and_scored(self):
self.student_session.finished_at = timezone.now()
self.student_session.save()
ANSWER = 'answer'
for ticket in self.tickets:
ticket.answer = ANSWER
ticket.score = 1.0
ticket.save()
result = self.assertResponseSuccess(
self.get_exam_questions.post(session_id=self.student_session.id))
self.assertEqual(result['status'], ExamStatus.submitted)
self.assertEqual(result['score'], sum(t.score for t in self.tickets))
questions = result['questions']
self.assertIsInstance(questions, list)
self.assertEqual(len(questions), len(self.tickets))
for question in questions:
ticket = self.ticket_map[question['id']]
ticket.refresh_from_db()
self.assertEqual(question['score'], ticket.score)
def test_get_exam_questions_invalid_params(self):
self.assertResponseError(self.get_exam_questions.post(),
errors.InvalidParameter('session_id'))
self.assertResponseError(
self.get_exam_questions.post(session_id=uuid_str()),
errors.ExamNotFound)
self.get_exam_questions.cookies = {}
self.assertResponseError(
self.get_exam_questions.post(session_id=self.student_session.id),
errors.Unauthorized)
response = self.get_exam_questions.get()
self.assertEqual(response.status_code, 405)
class TestSubmitExam(ApiTestCase):
def setUp(self):
super().setUp()
self.submit_exam = ApiClient('/api/submit', student=self.student)
self.setup_exam_objects()
def tearDown(self):
self.teardown_exam_objects()
super().tearDown()
def test_submit_exam(self):
answers = {}
ANSWER = 'answer'
for ticket in self.tickets:
if ticket.question.type == QuestionType.single:
answers[ticket.id] = \
random.randint(0, len(ticket.question.options)-1)
elif ticket.question.type == QuestionType.multi:
answers[ticket.id] = random.sample(
list(range(0, len(ticket.question.options))),
k=random.randint(0, len(ticket.question.options))
)
else:
answers[ticket.id] = ANSWER
result = self.assertResponseSuccess(self.submit_exam.post(
session_id=self.student_session.id, answers=answers))
self.assertEqual(result, True)
self.student_session.refresh_from_db()
self.assertEqual(self.student_session.status, ExamStatus.submitted)
for ticket in self.tickets:
ticket.refresh_from_db()
if ticket.question.type == QuestionType.single:
self.assertEqual(
ticket.answer, ticket.question.options[answers[ticket.id]])
elif ticket.question.type == QuestionType.multi:
self.assertEqual(ticket.answer, ';'.join([
ticket.question.options[x]
for x in sorted(answers[ticket.id])
]))
self.assertIsNotNone(ticket.answered_at)
def test_submit_without_any_answer(self):
result = self.assertResponseSuccess(self.submit_exam.post(
session_id=self.student_session.id, answers={}))
self.assertEqual(result, True)
self.student_session.refresh_from_db()
self.assertEqual(self.student_session.status, ExamStatus.submitted)
for ticket in self.tickets:
ticket.refresh_from_db()
self.assertIsNone(ticket.answered_at)
self.assertIsNone(ticket.answer)
def test_submit_partial_answer_errors(self):
ANSWER = 'answer'
answers = {
# неверный порядковый индекс ответа
self.tickets[0].id: len(self.tickets[0].question.options),
# неверный тип ответа
self.tickets[1].id: 0,
# корректный ответ
self.tickets[2].id: ANSWER,
# неверный ид билета
uuid_str(): ANSWER,
# несуществующий тикет
self.tickets[2].id + 1: ANSWER,
}
result = self.assertResponseSuccess(self.submit_exam.post(
session_id=self.student_session.id, answers=answers))
self.assertEqual(result, True)
self.student_session.refresh_from_db()
self.assertEqual(self.student_session.status, ExamStatus.submitted)
for ticket in self.tickets:
ticket.refresh_from_db()
self.assertIsNone(self.tickets[0].answer)
self.assertIsNone(self.tickets[0].answered_at)
self.assertIsNone(self.tickets[1].answer)
self.assertIsNone(self.tickets[1].answered_at)
self.assertEqual(self.tickets[2].answer, ANSWER)
self.assertIsNotNone(self.tickets[2].answered_at)
def test_submit_errors(self):
self.assertResponseError(self.submit_exam.post(),
errors.InvalidParameter('session_id'))
self.assertResponseError(self.submit_exam.post(session_id=123),
errors.InvalidParameter('session_id'))
self.assertResponseError(self.submit_exam.post(session_id=uuid_str()),
errors.InvalidParameter('answers'))
self.assertResponseError(
self.submit_exam.post(session_id=uuid_str(), answers=[]),
errors.InvalidParameter('answers'))
self.assertResponseError(
self.submit_exam.post(session_id=uuid_str(), answers={}),
errors.ExamNotFound)
self.session.start_time += self.session.duration
self.session.save()
self.assertResponseError(self.submit_exam.post(
session_id=self.student_session.id, answers={}),
errors.ExamNotAvailable)
self.student_session.start_time = timezone.now()
self.student_session.save()
self.assertResponseError(self.submit_exam.post(
session_id=self.student_session.id, answers={}),
errors.ExamNotAvailable)
|
normal
|
{
"blob_id": "44e4151279884ce7c5d5a9e5c82916ce2d3ccbc2",
"index": 9789,
"step-1": "<mask token>\n\n\nclass TestGetExamTickets(ApiTestCase):\n get_exams: ApiClient\n session: ExamSession\n student_session: UserSession\n questions: List[Question]\n tickets: List[ExamTicket]\n ticket_map: Dict[str, ExamTicket]\n\n def setUp(self):\n super().setUp()\n self.get_exam_questions = ApiClient('/api/tickets', student=self.\n student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_get_exam_questions(self):\n self.assertFalse(self.student_session.check_in)\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.available)\n self.assertEqual(result['score'], None)\n self.student_session.refresh_from_db()\n self.assertTrue(self.student_session.check_in)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n self.assertEqual([x['id'] for x in questions], [x.id for x in\n sorted(self.tickets, key=lambda x: x.question.stage)])\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket_question = ticket.question\n self.assertEqual(question.pop('id'), ticket.id)\n view = ticket_question.as_dict\n view.pop('id')\n self.assertEqual(question, view)\n\n def test_get_exam_questions_already_checked_in(self):\n self.student_session.check_in = True\n checkin_date = self.student_session.started_at\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.available)\n self.assertEqual(result['score'], None)\n self.student_session.refresh_from_db()\n self.assertTrue(self.student_session.check_in)\n self.assertEqual(self.student_session.started_at, checkin_date)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n <mask token>\n\n def test_get_exam_questions_submitted(self):\n self.student_session.finished_at = timezone.now()\n self.student_session.save()\n ANSWER = 'answer'\n for ticket in self.tickets:\n ticket.answer = ANSWER\n ticket.save()\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.submitted)\n self.assertEqual(result['score'], None)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket.refresh_from_db()\n answer = question.pop('answer')\n self.assertEqual(answer, ticket.answer)\n self.assertEqual(question['score'], None)\n\n def test_get_exam_questions_submitted_and_scored(self):\n self.student_session.finished_at = timezone.now()\n self.student_session.save()\n ANSWER = 'answer'\n for ticket in self.tickets:\n ticket.answer = ANSWER\n ticket.score = 1.0\n ticket.save()\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.submitted)\n self.assertEqual(result['score'], sum(t.score for t in self.tickets))\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket.refresh_from_db()\n self.assertEqual(question['score'], ticket.score)\n\n def test_get_exam_questions_invalid_params(self):\n self.assertResponseError(self.get_exam_questions.post(), errors.\n InvalidParameter('session_id'))\n self.assertResponseError(self.get_exam_questions.post(session_id=\n uuid_str()), errors.ExamNotFound)\n self.get_exam_questions.cookies = {}\n self.assertResponseError(self.get_exam_questions.post(session_id=\n self.student_session.id), errors.Unauthorized)\n response = self.get_exam_questions.get()\n self.assertEqual(response.status_code, 405)\n\n\nclass TestSubmitExam(ApiTestCase):\n\n def setUp(self):\n super().setUp()\n self.submit_exam = ApiClient('/api/submit', student=self.student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_submit_exam(self):\n answers = {}\n ANSWER = 'answer'\n for ticket in self.tickets:\n if ticket.question.type == QuestionType.single:\n answers[ticket.id] = random.randint(0, len(ticket.question.\n options) - 1)\n elif ticket.question.type == QuestionType.multi:\n answers[ticket.id] = random.sample(list(range(0, len(ticket\n .question.options))), k=random.randint(0, len(ticket.\n question.options)))\n else:\n answers[ticket.id] = ANSWER\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers=answers))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n if ticket.question.type == QuestionType.single:\n self.assertEqual(ticket.answer, ticket.question.options[\n answers[ticket.id]])\n elif ticket.question.type == QuestionType.multi:\n self.assertEqual(ticket.answer, ';'.join([ticket.question.\n options[x] for x in sorted(answers[ticket.id])]))\n self.assertIsNotNone(ticket.answered_at)\n\n def test_submit_without_any_answer(self):\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers={}))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n self.assertIsNone(ticket.answered_at)\n self.assertIsNone(ticket.answer)\n\n def test_submit_partial_answer_errors(self):\n ANSWER = 'answer'\n answers = {self.tickets[0].id: len(self.tickets[0].question.options\n ), self.tickets[1].id: 0, self.tickets[2].id: ANSWER, uuid_str(\n ): ANSWER, (self.tickets[2].id + 1): ANSWER}\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers=answers))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n self.assertIsNone(self.tickets[0].answer)\n self.assertIsNone(self.tickets[0].answered_at)\n self.assertIsNone(self.tickets[1].answer)\n self.assertIsNone(self.tickets[1].answered_at)\n self.assertEqual(self.tickets[2].answer, ANSWER)\n self.assertIsNotNone(self.tickets[2].answered_at)\n\n def test_submit_errors(self):\n self.assertResponseError(self.submit_exam.post(), errors.\n InvalidParameter('session_id'))\n self.assertResponseError(self.submit_exam.post(session_id=123),\n errors.InvalidParameter('session_id'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n )), errors.InvalidParameter('answers'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n ), answers=[]), errors.InvalidParameter('answers'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n ), answers={}), errors.ExamNotFound)\n self.session.start_time += self.session.duration\n self.session.save()\n self.assertResponseError(self.submit_exam.post(session_id=self.\n student_session.id, answers={}), errors.ExamNotAvailable)\n self.student_session.start_time = timezone.now()\n self.student_session.save()\n self.assertResponseError(self.submit_exam.post(session_id=self.\n student_session.id, answers={}), errors.ExamNotAvailable)\n",
"step-2": "<mask token>\n\n\nclass ApiTestCase(TestCase):\n group: AcademyGroup\n student: Student\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.group = AcademyGroup.objects.create(name='test_group')\n cls.student = Student.objects.create(name='test user', group=cls.group)\n\n @classmethod\n def tearDownClass(cls):\n cls.student.delete()\n cls.group.delete()\n super().tearDownClass()\n <mask token>\n\n def teardown_exam_objects(self):\n for ticket in self.tickets:\n ticket.delete()\n for question in self.questions:\n question.delete()\n self.student_session.delete()\n <mask token>\n <mask token>\n\n\nclass TestAuthorize(ApiTestCase):\n authorize: ApiClient\n\n def setUp(self):\n super().setUp()\n self.authorize = ApiClient('/api/authorize')\n\n def test_authorized(self):\n response = self.authorize.post(token=self.student.id)\n result = self.assertResponseSuccess(response)\n self.assertEqual(response.cookies['student'].value, self.student.id)\n self.assertEqual(result['name'], self.student.name)\n self.assertEqual(result['group'], self.group.name)\n self.assertEqual(result['id'], self.student.id)\n\n def test_authorized_unknown_token(self):\n response = self.authorize.post(token=uuid_str())\n self.assertResponseError(response, errors.Unauthorized)\n\n def test_authorized_invalid_params(self):\n response = self.authorize.post()\n self.assertResponseError(response, errors.InvalidParameter('token'))\n response = self.authorize.post(token=12345678)\n self.assertResponseError(response, errors.InvalidParameter('token'))\n response = self.authorize.get()\n self.assertEqual(response.status_code, 405)\n\n\nclass TestGetExamSessions(ApiTestCase):\n get_exams: ApiClient\n session: ExamSession\n student_session: UserSession\n questions: List[Question]\n tickets: List[ExamTicket]\n\n def setUp(self):\n super().setUp()\n self.get_exams = ApiClient('/api/exams', student=self.student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_get_exams_available(self):\n result = self.assertResponseSuccess(self.get_exams.get())\n self.assertIsInstance(result, list)\n self.assertEqual(len(result), 1)\n user_session = result[0]\n self.assertEqual(user_session['started_at'], self.session.\n start_time.isoformat())\n self.assertEqual(user_session['duration'], self.session.duration.\n total_seconds() / 60)\n self.assertEqual(user_session['checked_in'], False)\n self.assertEqual(user_session['finished_at'], None)\n self.assertEqual(user_session['status'], ExamStatus.available.value)\n self.assertEqual(user_session['score'], None)\n\n def test_get_exams_check_in(self):\n self.student_session.started_at = timezone.now()\n self.student_session.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['checked_in'], True)\n\n def test_get_exams_submitted(self):\n now = timezone.now()\n self.student_session.started_at = timezone.now()\n self.student_session.finished_at = now\n self.student_session.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['finished_at'], now.isoformat())\n self.assertEqual(user_session['status'], ExamStatus.submitted)\n self.assertEqual(user_session['score'], None)\n\n def test_get_exams_non_available(self):\n self.session.start_time = timezone.now() + self.session.duration\n self.session.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['started_at'], self.session.\n start_time.isoformat())\n self.assertEqual(user_session['finished_at'], None)\n self.assertEqual(user_session['status'], ExamStatus.not_available)\n\n def test_get_exams_unauthorized(self):\n self.get_exams.cookies = {}\n self.assertResponseError(self.get_exams.get(), errors.Unauthorized)\n response = self.get_exams.post()\n self.assertEqual(response.status_code, 405)\n\n def test_get_exams_score(self):\n for ticket in self.tickets:\n ticket.score = 1.0\n ticket.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['score'], sum(t.score for t in self.\n tickets))\n self.tickets[0].score = None\n self.tickets[0].save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['score'], None)\n\n\nclass TestGetExamTickets(ApiTestCase):\n get_exams: ApiClient\n session: ExamSession\n student_session: UserSession\n questions: List[Question]\n tickets: List[ExamTicket]\n ticket_map: Dict[str, ExamTicket]\n\n def setUp(self):\n super().setUp()\n self.get_exam_questions = ApiClient('/api/tickets', student=self.\n student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_get_exam_questions(self):\n self.assertFalse(self.student_session.check_in)\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.available)\n self.assertEqual(result['score'], None)\n self.student_session.refresh_from_db()\n self.assertTrue(self.student_session.check_in)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n self.assertEqual([x['id'] for x in questions], [x.id for x in\n sorted(self.tickets, key=lambda x: x.question.stage)])\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket_question = ticket.question\n self.assertEqual(question.pop('id'), ticket.id)\n view = ticket_question.as_dict\n view.pop('id')\n self.assertEqual(question, view)\n\n def test_get_exam_questions_already_checked_in(self):\n self.student_session.check_in = True\n checkin_date = self.student_session.started_at\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.available)\n self.assertEqual(result['score'], None)\n self.student_session.refresh_from_db()\n self.assertTrue(self.student_session.check_in)\n self.assertEqual(self.student_session.started_at, checkin_date)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n\n def test_get_exam_questions_not_available(self):\n self.session.start_time += self.session.duration\n self.session.save()\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.not_available)\n self.assertEqual(result['score'], None)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), 0)\n\n def test_get_exam_questions_submitted(self):\n self.student_session.finished_at = timezone.now()\n self.student_session.save()\n ANSWER = 'answer'\n for ticket in self.tickets:\n ticket.answer = ANSWER\n ticket.save()\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.submitted)\n self.assertEqual(result['score'], None)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket.refresh_from_db()\n answer = question.pop('answer')\n self.assertEqual(answer, ticket.answer)\n self.assertEqual(question['score'], None)\n\n def test_get_exam_questions_submitted_and_scored(self):\n self.student_session.finished_at = timezone.now()\n self.student_session.save()\n ANSWER = 'answer'\n for ticket in self.tickets:\n ticket.answer = ANSWER\n ticket.score = 1.0\n ticket.save()\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.submitted)\n self.assertEqual(result['score'], sum(t.score for t in self.tickets))\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket.refresh_from_db()\n self.assertEqual(question['score'], ticket.score)\n\n def test_get_exam_questions_invalid_params(self):\n self.assertResponseError(self.get_exam_questions.post(), errors.\n InvalidParameter('session_id'))\n self.assertResponseError(self.get_exam_questions.post(session_id=\n uuid_str()), errors.ExamNotFound)\n self.get_exam_questions.cookies = {}\n self.assertResponseError(self.get_exam_questions.post(session_id=\n self.student_session.id), errors.Unauthorized)\n response = self.get_exam_questions.get()\n self.assertEqual(response.status_code, 405)\n\n\nclass TestSubmitExam(ApiTestCase):\n\n def setUp(self):\n super().setUp()\n self.submit_exam = ApiClient('/api/submit', student=self.student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_submit_exam(self):\n answers = {}\n ANSWER = 'answer'\n for ticket in self.tickets:\n if ticket.question.type == QuestionType.single:\n answers[ticket.id] = random.randint(0, len(ticket.question.\n options) - 1)\n elif ticket.question.type == QuestionType.multi:\n answers[ticket.id] = random.sample(list(range(0, len(ticket\n .question.options))), k=random.randint(0, len(ticket.\n question.options)))\n else:\n answers[ticket.id] = ANSWER\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers=answers))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n if ticket.question.type == QuestionType.single:\n self.assertEqual(ticket.answer, ticket.question.options[\n answers[ticket.id]])\n elif ticket.question.type == QuestionType.multi:\n self.assertEqual(ticket.answer, ';'.join([ticket.question.\n options[x] for x in sorted(answers[ticket.id])]))\n self.assertIsNotNone(ticket.answered_at)\n\n def test_submit_without_any_answer(self):\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers={}))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n self.assertIsNone(ticket.answered_at)\n self.assertIsNone(ticket.answer)\n\n def test_submit_partial_answer_errors(self):\n ANSWER = 'answer'\n answers = {self.tickets[0].id: len(self.tickets[0].question.options\n ), self.tickets[1].id: 0, self.tickets[2].id: ANSWER, uuid_str(\n ): ANSWER, (self.tickets[2].id + 1): ANSWER}\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers=answers))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n self.assertIsNone(self.tickets[0].answer)\n self.assertIsNone(self.tickets[0].answered_at)\n self.assertIsNone(self.tickets[1].answer)\n self.assertIsNone(self.tickets[1].answered_at)\n self.assertEqual(self.tickets[2].answer, ANSWER)\n self.assertIsNotNone(self.tickets[2].answered_at)\n\n def test_submit_errors(self):\n self.assertResponseError(self.submit_exam.post(), errors.\n InvalidParameter('session_id'))\n self.assertResponseError(self.submit_exam.post(session_id=123),\n errors.InvalidParameter('session_id'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n )), errors.InvalidParameter('answers'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n ), answers=[]), errors.InvalidParameter('answers'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n ), answers={}), errors.ExamNotFound)\n self.session.start_time += self.session.duration\n self.session.save()\n self.assertResponseError(self.submit_exam.post(session_id=self.\n student_session.id, answers={}), errors.ExamNotAvailable)\n self.student_session.start_time = timezone.now()\n self.student_session.save()\n self.assertResponseError(self.submit_exam.post(session_id=self.\n student_session.id, answers={}), errors.ExamNotAvailable)\n",
"step-3": "<mask token>\n\n\nclass ApiClient(Client):\n path: str\n\n def __init__(self, path: str, student: Student=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.student = student\n self.path = path\n self.headers = {'content_type': 'application/json'}\n if student:\n self.cookies['student'] = student.id\n\n def path_params(self, **params):\n return ApiClient(self.path.format(**params), self.student)\n\n def get(self, **kwargs):\n return super().get(self.path, data=kwargs, **self.headers)\n\n def post(self, **json):\n return super().post(self.path, data=json, **self.headers)\n <mask token>\n\n\nclass ApiTestCase(TestCase):\n group: AcademyGroup\n student: Student\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.group = AcademyGroup.objects.create(name='test_group')\n cls.student = Student.objects.create(name='test user', group=cls.group)\n\n @classmethod\n def tearDownClass(cls):\n cls.student.delete()\n cls.group.delete()\n super().tearDownClass()\n\n def setup_exam_objects(self):\n self.session = ExamSession.objects.create(start_time=timezone.now(),\n duration=timedelta(minutes=40))\n self.student_session = UserSession.objects.create(student=self.\n student, exam_session=self.session)\n self.questions = [Question.objects.create(stage=Stage.first, type=\n QuestionType.single, max_score=1, text='test single question',\n options=['a', 'b', 'c']), Question.objects.create(stage=Stage.\n first, type=QuestionType.multi, max_score=1, text=\n 'test multi question', options=['a', 'b', 'c']), Question.\n objects.create(stage=Stage.second, type=QuestionType.open,\n max_score=1, text='test open question', options=None)]\n self.tickets = [ExamTicket.objects.create(student=self.student,\n session=self.student_session, question=question) for question in\n self.questions]\n self.ticket_map = {x.id: x for x in self.tickets}\n\n def teardown_exam_objects(self):\n for ticket in self.tickets:\n ticket.delete()\n for question in self.questions:\n question.delete()\n self.student_session.delete()\n\n def assertResponseSuccess(self, response: http.HttpResponse):\n content = response.content.decode()\n self.assertEqual(response.status_code, 200, (response.status_code,\n content))\n content = response.json()\n self.assertIn('result', content, content)\n return content['result']\n\n def assertResponseError(self, response: http.JsonResponse, error: Union\n [errors.APIError, Type[errors.APIError]]=None) ->Tuple[int, str]:\n content = response.json()\n self.assertGreaterEqual(response.status_code, 400, (response.\n status_code, content))\n self.assertIn('error', content, content)\n if error is not None:\n if isinstance(error, type):\n error = error()\n self.assertEqual(response.status_code, error.status, (response.\n status_code, content))\n self.assertEqual(content['error'], error.message, (response.\n status_code, content))\n return response.status_code, content['error']\n\n\nclass TestAuthorize(ApiTestCase):\n authorize: ApiClient\n\n def setUp(self):\n super().setUp()\n self.authorize = ApiClient('/api/authorize')\n\n def test_authorized(self):\n response = self.authorize.post(token=self.student.id)\n result = self.assertResponseSuccess(response)\n self.assertEqual(response.cookies['student'].value, self.student.id)\n self.assertEqual(result['name'], self.student.name)\n self.assertEqual(result['group'], self.group.name)\n self.assertEqual(result['id'], self.student.id)\n\n def test_authorized_unknown_token(self):\n response = self.authorize.post(token=uuid_str())\n self.assertResponseError(response, errors.Unauthorized)\n\n def test_authorized_invalid_params(self):\n response = self.authorize.post()\n self.assertResponseError(response, errors.InvalidParameter('token'))\n response = self.authorize.post(token=12345678)\n self.assertResponseError(response, errors.InvalidParameter('token'))\n response = self.authorize.get()\n self.assertEqual(response.status_code, 405)\n\n\nclass TestGetExamSessions(ApiTestCase):\n get_exams: ApiClient\n session: ExamSession\n student_session: UserSession\n questions: List[Question]\n tickets: List[ExamTicket]\n\n def setUp(self):\n super().setUp()\n self.get_exams = ApiClient('/api/exams', student=self.student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_get_exams_available(self):\n result = self.assertResponseSuccess(self.get_exams.get())\n self.assertIsInstance(result, list)\n self.assertEqual(len(result), 1)\n user_session = result[0]\n self.assertEqual(user_session['started_at'], self.session.\n start_time.isoformat())\n self.assertEqual(user_session['duration'], self.session.duration.\n total_seconds() / 60)\n self.assertEqual(user_session['checked_in'], False)\n self.assertEqual(user_session['finished_at'], None)\n self.assertEqual(user_session['status'], ExamStatus.available.value)\n self.assertEqual(user_session['score'], None)\n\n def test_get_exams_check_in(self):\n self.student_session.started_at = timezone.now()\n self.student_session.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['checked_in'], True)\n\n def test_get_exams_submitted(self):\n now = timezone.now()\n self.student_session.started_at = timezone.now()\n self.student_session.finished_at = now\n self.student_session.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['finished_at'], now.isoformat())\n self.assertEqual(user_session['status'], ExamStatus.submitted)\n self.assertEqual(user_session['score'], None)\n\n def test_get_exams_non_available(self):\n self.session.start_time = timezone.now() + self.session.duration\n self.session.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['started_at'], self.session.\n start_time.isoformat())\n self.assertEqual(user_session['finished_at'], None)\n self.assertEqual(user_session['status'], ExamStatus.not_available)\n\n def test_get_exams_unauthorized(self):\n self.get_exams.cookies = {}\n self.assertResponseError(self.get_exams.get(), errors.Unauthorized)\n response = self.get_exams.post()\n self.assertEqual(response.status_code, 405)\n\n def test_get_exams_score(self):\n for ticket in self.tickets:\n ticket.score = 1.0\n ticket.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['score'], sum(t.score for t in self.\n tickets))\n self.tickets[0].score = None\n self.tickets[0].save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['score'], None)\n\n\nclass TestGetExamTickets(ApiTestCase):\n get_exams: ApiClient\n session: ExamSession\n student_session: UserSession\n questions: List[Question]\n tickets: List[ExamTicket]\n ticket_map: Dict[str, ExamTicket]\n\n def setUp(self):\n super().setUp()\n self.get_exam_questions = ApiClient('/api/tickets', student=self.\n student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_get_exam_questions(self):\n self.assertFalse(self.student_session.check_in)\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.available)\n self.assertEqual(result['score'], None)\n self.student_session.refresh_from_db()\n self.assertTrue(self.student_session.check_in)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n self.assertEqual([x['id'] for x in questions], [x.id for x in\n sorted(self.tickets, key=lambda x: x.question.stage)])\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket_question = ticket.question\n self.assertEqual(question.pop('id'), ticket.id)\n view = ticket_question.as_dict\n view.pop('id')\n self.assertEqual(question, view)\n\n def test_get_exam_questions_already_checked_in(self):\n self.student_session.check_in = True\n checkin_date = self.student_session.started_at\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.available)\n self.assertEqual(result['score'], None)\n self.student_session.refresh_from_db()\n self.assertTrue(self.student_session.check_in)\n self.assertEqual(self.student_session.started_at, checkin_date)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n\n def test_get_exam_questions_not_available(self):\n self.session.start_time += self.session.duration\n self.session.save()\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.not_available)\n self.assertEqual(result['score'], None)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), 0)\n\n def test_get_exam_questions_submitted(self):\n self.student_session.finished_at = timezone.now()\n self.student_session.save()\n ANSWER = 'answer'\n for ticket in self.tickets:\n ticket.answer = ANSWER\n ticket.save()\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.submitted)\n self.assertEqual(result['score'], None)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket.refresh_from_db()\n answer = question.pop('answer')\n self.assertEqual(answer, ticket.answer)\n self.assertEqual(question['score'], None)\n\n def test_get_exam_questions_submitted_and_scored(self):\n self.student_session.finished_at = timezone.now()\n self.student_session.save()\n ANSWER = 'answer'\n for ticket in self.tickets:\n ticket.answer = ANSWER\n ticket.score = 1.0\n ticket.save()\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.submitted)\n self.assertEqual(result['score'], sum(t.score for t in self.tickets))\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket.refresh_from_db()\n self.assertEqual(question['score'], ticket.score)\n\n def test_get_exam_questions_invalid_params(self):\n self.assertResponseError(self.get_exam_questions.post(), errors.\n InvalidParameter('session_id'))\n self.assertResponseError(self.get_exam_questions.post(session_id=\n uuid_str()), errors.ExamNotFound)\n self.get_exam_questions.cookies = {}\n self.assertResponseError(self.get_exam_questions.post(session_id=\n self.student_session.id), errors.Unauthorized)\n response = self.get_exam_questions.get()\n self.assertEqual(response.status_code, 405)\n\n\nclass TestSubmitExam(ApiTestCase):\n\n def setUp(self):\n super().setUp()\n self.submit_exam = ApiClient('/api/submit', student=self.student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_submit_exam(self):\n answers = {}\n ANSWER = 'answer'\n for ticket in self.tickets:\n if ticket.question.type == QuestionType.single:\n answers[ticket.id] = random.randint(0, len(ticket.question.\n options) - 1)\n elif ticket.question.type == QuestionType.multi:\n answers[ticket.id] = random.sample(list(range(0, len(ticket\n .question.options))), k=random.randint(0, len(ticket.\n question.options)))\n else:\n answers[ticket.id] = ANSWER\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers=answers))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n if ticket.question.type == QuestionType.single:\n self.assertEqual(ticket.answer, ticket.question.options[\n answers[ticket.id]])\n elif ticket.question.type == QuestionType.multi:\n self.assertEqual(ticket.answer, ';'.join([ticket.question.\n options[x] for x in sorted(answers[ticket.id])]))\n self.assertIsNotNone(ticket.answered_at)\n\n def test_submit_without_any_answer(self):\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers={}))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n self.assertIsNone(ticket.answered_at)\n self.assertIsNone(ticket.answer)\n\n def test_submit_partial_answer_errors(self):\n ANSWER = 'answer'\n answers = {self.tickets[0].id: len(self.tickets[0].question.options\n ), self.tickets[1].id: 0, self.tickets[2].id: ANSWER, uuid_str(\n ): ANSWER, (self.tickets[2].id + 1): ANSWER}\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers=answers))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n self.assertIsNone(self.tickets[0].answer)\n self.assertIsNone(self.tickets[0].answered_at)\n self.assertIsNone(self.tickets[1].answer)\n self.assertIsNone(self.tickets[1].answered_at)\n self.assertEqual(self.tickets[2].answer, ANSWER)\n self.assertIsNotNone(self.tickets[2].answered_at)\n\n def test_submit_errors(self):\n self.assertResponseError(self.submit_exam.post(), errors.\n InvalidParameter('session_id'))\n self.assertResponseError(self.submit_exam.post(session_id=123),\n errors.InvalidParameter('session_id'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n )), errors.InvalidParameter('answers'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n ), answers=[]), errors.InvalidParameter('answers'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n ), answers={}), errors.ExamNotFound)\n self.session.start_time += self.session.duration\n self.session.save()\n self.assertResponseError(self.submit_exam.post(session_id=self.\n student_session.id, answers={}), errors.ExamNotAvailable)\n self.student_session.start_time = timezone.now()\n self.student_session.save()\n self.assertResponseError(self.submit_exam.post(session_id=self.\n student_session.id, answers={}), errors.ExamNotAvailable)\n",
"step-4": "import random\nfrom datetime import timedelta\nfrom typing import Union, Type, Tuple, List, Dict\nfrom django import http\nfrom django.test import TestCase, Client\nfrom django.utils import timezone\nfrom exam_web import errors\nfrom exam_web.models import Student, AcademyGroup, uuid_str, ExamSession, UserSession, Question, Stage, QuestionType, ExamTicket, ExamStatus\n\n\nclass ApiClient(Client):\n path: str\n\n def __init__(self, path: str, student: Student=None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.student = student\n self.path = path\n self.headers = {'content_type': 'application/json'}\n if student:\n self.cookies['student'] = student.id\n\n def path_params(self, **params):\n return ApiClient(self.path.format(**params), self.student)\n\n def get(self, **kwargs):\n return super().get(self.path, data=kwargs, **self.headers)\n\n def post(self, **json):\n return super().post(self.path, data=json, **self.headers)\n\n def __call__(self, **kwargs):\n raise AttributeError('Use `get` or `post` methods instead')\n\n\nclass ApiTestCase(TestCase):\n group: AcademyGroup\n student: Student\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.group = AcademyGroup.objects.create(name='test_group')\n cls.student = Student.objects.create(name='test user', group=cls.group)\n\n @classmethod\n def tearDownClass(cls):\n cls.student.delete()\n cls.group.delete()\n super().tearDownClass()\n\n def setup_exam_objects(self):\n self.session = ExamSession.objects.create(start_time=timezone.now(),\n duration=timedelta(minutes=40))\n self.student_session = UserSession.objects.create(student=self.\n student, exam_session=self.session)\n self.questions = [Question.objects.create(stage=Stage.first, type=\n QuestionType.single, max_score=1, text='test single question',\n options=['a', 'b', 'c']), Question.objects.create(stage=Stage.\n first, type=QuestionType.multi, max_score=1, text=\n 'test multi question', options=['a', 'b', 'c']), Question.\n objects.create(stage=Stage.second, type=QuestionType.open,\n max_score=1, text='test open question', options=None)]\n self.tickets = [ExamTicket.objects.create(student=self.student,\n session=self.student_session, question=question) for question in\n self.questions]\n self.ticket_map = {x.id: x for x in self.tickets}\n\n def teardown_exam_objects(self):\n for ticket in self.tickets:\n ticket.delete()\n for question in self.questions:\n question.delete()\n self.student_session.delete()\n\n def assertResponseSuccess(self, response: http.HttpResponse):\n content = response.content.decode()\n self.assertEqual(response.status_code, 200, (response.status_code,\n content))\n content = response.json()\n self.assertIn('result', content, content)\n return content['result']\n\n def assertResponseError(self, response: http.JsonResponse, error: Union\n [errors.APIError, Type[errors.APIError]]=None) ->Tuple[int, str]:\n content = response.json()\n self.assertGreaterEqual(response.status_code, 400, (response.\n status_code, content))\n self.assertIn('error', content, content)\n if error is not None:\n if isinstance(error, type):\n error = error()\n self.assertEqual(response.status_code, error.status, (response.\n status_code, content))\n self.assertEqual(content['error'], error.message, (response.\n status_code, content))\n return response.status_code, content['error']\n\n\nclass TestAuthorize(ApiTestCase):\n authorize: ApiClient\n\n def setUp(self):\n super().setUp()\n self.authorize = ApiClient('/api/authorize')\n\n def test_authorized(self):\n response = self.authorize.post(token=self.student.id)\n result = self.assertResponseSuccess(response)\n self.assertEqual(response.cookies['student'].value, self.student.id)\n self.assertEqual(result['name'], self.student.name)\n self.assertEqual(result['group'], self.group.name)\n self.assertEqual(result['id'], self.student.id)\n\n def test_authorized_unknown_token(self):\n response = self.authorize.post(token=uuid_str())\n self.assertResponseError(response, errors.Unauthorized)\n\n def test_authorized_invalid_params(self):\n response = self.authorize.post()\n self.assertResponseError(response, errors.InvalidParameter('token'))\n response = self.authorize.post(token=12345678)\n self.assertResponseError(response, errors.InvalidParameter('token'))\n response = self.authorize.get()\n self.assertEqual(response.status_code, 405)\n\n\nclass TestGetExamSessions(ApiTestCase):\n get_exams: ApiClient\n session: ExamSession\n student_session: UserSession\n questions: List[Question]\n tickets: List[ExamTicket]\n\n def setUp(self):\n super().setUp()\n self.get_exams = ApiClient('/api/exams', student=self.student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_get_exams_available(self):\n result = self.assertResponseSuccess(self.get_exams.get())\n self.assertIsInstance(result, list)\n self.assertEqual(len(result), 1)\n user_session = result[0]\n self.assertEqual(user_session['started_at'], self.session.\n start_time.isoformat())\n self.assertEqual(user_session['duration'], self.session.duration.\n total_seconds() / 60)\n self.assertEqual(user_session['checked_in'], False)\n self.assertEqual(user_session['finished_at'], None)\n self.assertEqual(user_session['status'], ExamStatus.available.value)\n self.assertEqual(user_session['score'], None)\n\n def test_get_exams_check_in(self):\n self.student_session.started_at = timezone.now()\n self.student_session.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['checked_in'], True)\n\n def test_get_exams_submitted(self):\n now = timezone.now()\n self.student_session.started_at = timezone.now()\n self.student_session.finished_at = now\n self.student_session.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['finished_at'], now.isoformat())\n self.assertEqual(user_session['status'], ExamStatus.submitted)\n self.assertEqual(user_session['score'], None)\n\n def test_get_exams_non_available(self):\n self.session.start_time = timezone.now() + self.session.duration\n self.session.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['started_at'], self.session.\n start_time.isoformat())\n self.assertEqual(user_session['finished_at'], None)\n self.assertEqual(user_session['status'], ExamStatus.not_available)\n\n def test_get_exams_unauthorized(self):\n self.get_exams.cookies = {}\n self.assertResponseError(self.get_exams.get(), errors.Unauthorized)\n response = self.get_exams.post()\n self.assertEqual(response.status_code, 405)\n\n def test_get_exams_score(self):\n for ticket in self.tickets:\n ticket.score = 1.0\n ticket.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['score'], sum(t.score for t in self.\n tickets))\n self.tickets[0].score = None\n self.tickets[0].save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['score'], None)\n\n\nclass TestGetExamTickets(ApiTestCase):\n get_exams: ApiClient\n session: ExamSession\n student_session: UserSession\n questions: List[Question]\n tickets: List[ExamTicket]\n ticket_map: Dict[str, ExamTicket]\n\n def setUp(self):\n super().setUp()\n self.get_exam_questions = ApiClient('/api/tickets', student=self.\n student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_get_exam_questions(self):\n self.assertFalse(self.student_session.check_in)\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.available)\n self.assertEqual(result['score'], None)\n self.student_session.refresh_from_db()\n self.assertTrue(self.student_session.check_in)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n self.assertEqual([x['id'] for x in questions], [x.id for x in\n sorted(self.tickets, key=lambda x: x.question.stage)])\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket_question = ticket.question\n self.assertEqual(question.pop('id'), ticket.id)\n view = ticket_question.as_dict\n view.pop('id')\n self.assertEqual(question, view)\n\n def test_get_exam_questions_already_checked_in(self):\n self.student_session.check_in = True\n checkin_date = self.student_session.started_at\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.available)\n self.assertEqual(result['score'], None)\n self.student_session.refresh_from_db()\n self.assertTrue(self.student_session.check_in)\n self.assertEqual(self.student_session.started_at, checkin_date)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n\n def test_get_exam_questions_not_available(self):\n self.session.start_time += self.session.duration\n self.session.save()\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.not_available)\n self.assertEqual(result['score'], None)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), 0)\n\n def test_get_exam_questions_submitted(self):\n self.student_session.finished_at = timezone.now()\n self.student_session.save()\n ANSWER = 'answer'\n for ticket in self.tickets:\n ticket.answer = ANSWER\n ticket.save()\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.submitted)\n self.assertEqual(result['score'], None)\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket.refresh_from_db()\n answer = question.pop('answer')\n self.assertEqual(answer, ticket.answer)\n self.assertEqual(question['score'], None)\n\n def test_get_exam_questions_submitted_and_scored(self):\n self.student_session.finished_at = timezone.now()\n self.student_session.save()\n ANSWER = 'answer'\n for ticket in self.tickets:\n ticket.answer = ANSWER\n ticket.score = 1.0\n ticket.save()\n result = self.assertResponseSuccess(self.get_exam_questions.post(\n session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.submitted)\n self.assertEqual(result['score'], sum(t.score for t in self.tickets))\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket.refresh_from_db()\n self.assertEqual(question['score'], ticket.score)\n\n def test_get_exam_questions_invalid_params(self):\n self.assertResponseError(self.get_exam_questions.post(), errors.\n InvalidParameter('session_id'))\n self.assertResponseError(self.get_exam_questions.post(session_id=\n uuid_str()), errors.ExamNotFound)\n self.get_exam_questions.cookies = {}\n self.assertResponseError(self.get_exam_questions.post(session_id=\n self.student_session.id), errors.Unauthorized)\n response = self.get_exam_questions.get()\n self.assertEqual(response.status_code, 405)\n\n\nclass TestSubmitExam(ApiTestCase):\n\n def setUp(self):\n super().setUp()\n self.submit_exam = ApiClient('/api/submit', student=self.student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_submit_exam(self):\n answers = {}\n ANSWER = 'answer'\n for ticket in self.tickets:\n if ticket.question.type == QuestionType.single:\n answers[ticket.id] = random.randint(0, len(ticket.question.\n options) - 1)\n elif ticket.question.type == QuestionType.multi:\n answers[ticket.id] = random.sample(list(range(0, len(ticket\n .question.options))), k=random.randint(0, len(ticket.\n question.options)))\n else:\n answers[ticket.id] = ANSWER\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers=answers))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n if ticket.question.type == QuestionType.single:\n self.assertEqual(ticket.answer, ticket.question.options[\n answers[ticket.id]])\n elif ticket.question.type == QuestionType.multi:\n self.assertEqual(ticket.answer, ';'.join([ticket.question.\n options[x] for x in sorted(answers[ticket.id])]))\n self.assertIsNotNone(ticket.answered_at)\n\n def test_submit_without_any_answer(self):\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers={}))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n self.assertIsNone(ticket.answered_at)\n self.assertIsNone(ticket.answer)\n\n def test_submit_partial_answer_errors(self):\n ANSWER = 'answer'\n answers = {self.tickets[0].id: len(self.tickets[0].question.options\n ), self.tickets[1].id: 0, self.tickets[2].id: ANSWER, uuid_str(\n ): ANSWER, (self.tickets[2].id + 1): ANSWER}\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers=answers))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n self.assertIsNone(self.tickets[0].answer)\n self.assertIsNone(self.tickets[0].answered_at)\n self.assertIsNone(self.tickets[1].answer)\n self.assertIsNone(self.tickets[1].answered_at)\n self.assertEqual(self.tickets[2].answer, ANSWER)\n self.assertIsNotNone(self.tickets[2].answered_at)\n\n def test_submit_errors(self):\n self.assertResponseError(self.submit_exam.post(), errors.\n InvalidParameter('session_id'))\n self.assertResponseError(self.submit_exam.post(session_id=123),\n errors.InvalidParameter('session_id'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n )), errors.InvalidParameter('answers'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n ), answers=[]), errors.InvalidParameter('answers'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str(\n ), answers={}), errors.ExamNotFound)\n self.session.start_time += self.session.duration\n self.session.save()\n self.assertResponseError(self.submit_exam.post(session_id=self.\n student_session.id, answers={}), errors.ExamNotAvailable)\n self.student_session.start_time = timezone.now()\n self.student_session.save()\n self.assertResponseError(self.submit_exam.post(session_id=self.\n student_session.id, answers={}), errors.ExamNotAvailable)\n",
"step-5": "import random\nfrom datetime import timedelta\nfrom typing import Union, Type, Tuple, List, Dict\n\nfrom django import http\nfrom django.test import TestCase, Client\nfrom django.utils import timezone\n\nfrom exam_web import errors\nfrom exam_web.models import Student, AcademyGroup, uuid_str, ExamSession, \\\n UserSession, Question, Stage, QuestionType, ExamTicket, ExamStatus\n\n\nclass ApiClient(Client):\n path: str\n\n def __init__(self, path: str, student: Student = None, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.student = student\n self.path = path\n self.headers = {'content_type': 'application/json'}\n if student:\n self.cookies['student'] = student.id\n\n def path_params(self, **params):\n return ApiClient(self.path.format(**params), self.student)\n\n def get(self, **kwargs):\n return super().get(self.path, data=kwargs, **self.headers)\n\n def post(self, **json):\n return super().post(self.path, data=json, **self.headers)\n\n def __call__(self, **kwargs):\n raise AttributeError('Use `get` or `post` methods instead')\n\n\nclass ApiTestCase(TestCase):\n group: AcademyGroup\n student: Student\n\n @classmethod\n def setUpClass(cls):\n super().setUpClass()\n cls.group = AcademyGroup.objects.create(name='test_group')\n cls.student = Student.objects.create(name='test user', group=cls.group)\n\n @classmethod\n def tearDownClass(cls):\n cls.student.delete()\n cls.group.delete()\n super().tearDownClass()\n\n def setup_exam_objects(self):\n self.session = ExamSession.objects.create(\n start_time=timezone.now(), duration=timedelta(minutes=40))\n self.student_session = UserSession.objects.create(\n student=self.student, exam_session=self.session)\n self.questions = [\n Question.objects.create(\n stage=Stage.first, type=QuestionType.single, max_score=1,\n text='test single question', options=['a', 'b', 'c']\n ),\n Question.objects.create(\n stage=Stage.first, type=QuestionType.multi, max_score=1,\n text='test multi question', options=['a', 'b', 'c']\n ),\n Question.objects.create(\n stage=Stage.second, type=QuestionType.open, max_score=1,\n text='test open question', options=None,\n ),\n ]\n self.tickets = [\n ExamTicket.objects.create(\n student=self.student, session=self.student_session,\n question=question) for question in self.questions\n ]\n self.ticket_map = {x.id: x for x in self.tickets}\n\n def teardown_exam_objects(self):\n for ticket in self.tickets:\n ticket.delete()\n for question in self.questions:\n question.delete()\n self.student_session.delete()\n\n def assertResponseSuccess(self, response: http.HttpResponse):\n content = response.content.decode()\n self.assertEqual(response.status_code, 200,\n (response.status_code, content))\n content = response.json()\n self.assertIn('result', content, content)\n return content['result']\n\n def assertResponseError(\n self, response: http.JsonResponse,\n error: Union[errors.APIError, Type[errors.APIError]] = None\n ) -> Tuple[int, str]:\n content = response.json()\n self.assertGreaterEqual(response.status_code, 400,\n (response.status_code, content))\n self.assertIn('error', content, content)\n if error is not None:\n if isinstance(error, type):\n error = error()\n self.assertEqual(response.status_code, error.status,\n (response.status_code, content))\n self.assertEqual(content['error'], error.message,\n (response.status_code, content))\n return response.status_code, content['error']\n\n\nclass TestAuthorize(ApiTestCase):\n authorize: ApiClient\n\n def setUp(self):\n super().setUp()\n self.authorize = ApiClient('/api/authorize')\n\n def test_authorized(self):\n response = self.authorize.post(token=self.student.id)\n result = self.assertResponseSuccess(response)\n\n self.assertEqual(response.cookies['student'].value, self.student.id)\n\n self.assertEqual(result['name'], self.student.name)\n self.assertEqual(result['group'], self.group.name)\n self.assertEqual(result['id'], self.student.id)\n\n def test_authorized_unknown_token(self):\n response = self.authorize.post(token=uuid_str())\n self.assertResponseError(response, errors.Unauthorized)\n\n def test_authorized_invalid_params(self):\n response = self.authorize.post()\n self.assertResponseError(response, errors.InvalidParameter('token'))\n\n response = self.authorize.post(token=12345678)\n self.assertResponseError(response, errors.InvalidParameter('token'))\n\n response = self.authorize.get()\n self.assertEqual(response.status_code, 405)\n\n\nclass TestGetExamSessions(ApiTestCase):\n get_exams: ApiClient\n session: ExamSession\n student_session: UserSession\n questions: List[Question]\n tickets: List[ExamTicket]\n\n def setUp(self):\n super().setUp()\n self.get_exams = ApiClient('/api/exams', student=self.student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_get_exams_available(self):\n result = self.assertResponseSuccess(self.get_exams.get())\n self.assertIsInstance(result, list)\n self.assertEqual(len(result), 1)\n user_session = result[0]\n self.assertEqual(\n user_session['started_at'], self.session.start_time.isoformat())\n self.assertEqual(user_session['duration'],\n self.session.duration.total_seconds() / 60)\n self.assertEqual(user_session['checked_in'], False)\n self.assertEqual(user_session['finished_at'], None)\n self.assertEqual(user_session['status'], ExamStatus.available.value)\n self.assertEqual(user_session['score'], None)\n\n def test_get_exams_check_in(self):\n self.student_session.started_at = timezone.now()\n self.student_session.save()\n\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['checked_in'], True)\n\n def test_get_exams_submitted(self):\n now = timezone.now()\n self.student_session.started_at = timezone.now()\n self.student_session.finished_at = now\n self.student_session.save()\n\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['finished_at'], now.isoformat())\n self.assertEqual(user_session['status'], ExamStatus.submitted)\n self.assertEqual(user_session['score'], None)\n\n def test_get_exams_non_available(self):\n self.session.start_time = timezone.now() + self.session.duration\n self.session.save()\n\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['started_at'],\n self.session.start_time.isoformat())\n self.assertEqual(user_session['finished_at'], None)\n self.assertEqual(user_session['status'], ExamStatus.not_available)\n\n def test_get_exams_unauthorized(self):\n self.get_exams.cookies = {}\n self.assertResponseError(self.get_exams.get(), errors.Unauthorized)\n\n response = self.get_exams.post()\n self.assertEqual(response.status_code, 405)\n\n def test_get_exams_score(self):\n for ticket in self.tickets:\n ticket.score = 1.0\n ticket.save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['score'],\n sum(t.score for t in self.tickets))\n\n self.tickets[0].score = None\n self.tickets[0].save()\n result = self.assertResponseSuccess(self.get_exams.get())\n user_session = result[0]\n self.assertEqual(user_session['score'], None)\n\n\nclass TestGetExamTickets(ApiTestCase):\n get_exams: ApiClient\n session: ExamSession\n student_session: UserSession\n questions: List[Question]\n tickets: List[ExamTicket]\n ticket_map: Dict[str, ExamTicket]\n\n def setUp(self):\n super().setUp()\n self.get_exam_questions = \\\n ApiClient('/api/tickets', student=self.student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_get_exam_questions(self):\n self.assertFalse(self.student_session.check_in)\n\n result = self.assertResponseSuccess(\n self.get_exam_questions.post(session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.available)\n self.assertEqual(result['score'], None)\n self.student_session.refresh_from_db()\n self.assertTrue(self.student_session.check_in)\n\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n self.assertEqual([x['id'] for x in questions], [\n x.id for x in sorted(self.tickets, key=lambda x: x.question.stage)\n ])\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket_question = ticket.question\n self.assertEqual(question.pop('id'), ticket.id)\n view = ticket_question.as_dict\n view.pop('id')\n self.assertEqual(question, view)\n\n def test_get_exam_questions_already_checked_in(self):\n self.student_session.check_in = True\n checkin_date = self.student_session.started_at\n\n result = self.assertResponseSuccess(\n self.get_exam_questions.post(session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.available)\n self.assertEqual(result['score'], None)\n self.student_session.refresh_from_db()\n self.assertTrue(self.student_session.check_in)\n self.assertEqual(self.student_session.started_at, checkin_date)\n\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n\n def test_get_exam_questions_not_available(self):\n self.session.start_time += self.session.duration\n self.session.save()\n\n result = self.assertResponseSuccess(\n self.get_exam_questions.post(session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.not_available)\n self.assertEqual(result['score'], None)\n\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), 0)\n\n def test_get_exam_questions_submitted(self):\n self.student_session.finished_at = timezone.now()\n self.student_session.save()\n ANSWER = 'answer'\n for ticket in self.tickets:\n ticket.answer = ANSWER\n ticket.save()\n\n result = self.assertResponseSuccess(\n self.get_exam_questions.post(session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.submitted)\n self.assertEqual(result['score'], None)\n\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket.refresh_from_db()\n answer = question.pop('answer')\n self.assertEqual(answer, ticket.answer)\n self.assertEqual(question['score'], None)\n\n def test_get_exam_questions_submitted_and_scored(self):\n self.student_session.finished_at = timezone.now()\n self.student_session.save()\n ANSWER = 'answer'\n for ticket in self.tickets:\n ticket.answer = ANSWER\n ticket.score = 1.0\n ticket.save()\n\n result = self.assertResponseSuccess(\n self.get_exam_questions.post(session_id=self.student_session.id))\n self.assertEqual(result['status'], ExamStatus.submitted)\n self.assertEqual(result['score'], sum(t.score for t in self.tickets))\n\n questions = result['questions']\n self.assertIsInstance(questions, list)\n self.assertEqual(len(questions), len(self.tickets))\n for question in questions:\n ticket = self.ticket_map[question['id']]\n ticket.refresh_from_db()\n self.assertEqual(question['score'], ticket.score)\n\n def test_get_exam_questions_invalid_params(self):\n self.assertResponseError(self.get_exam_questions.post(),\n errors.InvalidParameter('session_id'))\n self.assertResponseError(\n self.get_exam_questions.post(session_id=uuid_str()),\n errors.ExamNotFound)\n self.get_exam_questions.cookies = {}\n self.assertResponseError(\n self.get_exam_questions.post(session_id=self.student_session.id),\n errors.Unauthorized)\n\n response = self.get_exam_questions.get()\n self.assertEqual(response.status_code, 405)\n\n\nclass TestSubmitExam(ApiTestCase):\n def setUp(self):\n super().setUp()\n self.submit_exam = ApiClient('/api/submit', student=self.student)\n self.setup_exam_objects()\n\n def tearDown(self):\n self.teardown_exam_objects()\n super().tearDown()\n\n def test_submit_exam(self):\n answers = {}\n ANSWER = 'answer'\n for ticket in self.tickets:\n if ticket.question.type == QuestionType.single:\n answers[ticket.id] = \\\n random.randint(0, len(ticket.question.options)-1)\n elif ticket.question.type == QuestionType.multi:\n answers[ticket.id] = random.sample(\n list(range(0, len(ticket.question.options))),\n k=random.randint(0, len(ticket.question.options))\n )\n else:\n answers[ticket.id] = ANSWER\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers=answers))\n self.assertEqual(result, True)\n\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n if ticket.question.type == QuestionType.single:\n self.assertEqual(\n ticket.answer, ticket.question.options[answers[ticket.id]])\n elif ticket.question.type == QuestionType.multi:\n self.assertEqual(ticket.answer, ';'.join([\n ticket.question.options[x]\n for x in sorted(answers[ticket.id])\n ]))\n self.assertIsNotNone(ticket.answered_at)\n\n def test_submit_without_any_answer(self):\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers={}))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n self.assertIsNone(ticket.answered_at)\n self.assertIsNone(ticket.answer)\n\n def test_submit_partial_answer_errors(self):\n ANSWER = 'answer'\n answers = {\n # неверный порядковый индекс ответа\n self.tickets[0].id: len(self.tickets[0].question.options),\n # неверный тип ответа\n self.tickets[1].id: 0,\n # корректный ответ\n self.tickets[2].id: ANSWER,\n # неверный ид билета\n uuid_str(): ANSWER,\n # несуществующий тикет\n self.tickets[2].id + 1: ANSWER,\n }\n result = self.assertResponseSuccess(self.submit_exam.post(\n session_id=self.student_session.id, answers=answers))\n self.assertEqual(result, True)\n self.student_session.refresh_from_db()\n self.assertEqual(self.student_session.status, ExamStatus.submitted)\n for ticket in self.tickets:\n ticket.refresh_from_db()\n self.assertIsNone(self.tickets[0].answer)\n self.assertIsNone(self.tickets[0].answered_at)\n self.assertIsNone(self.tickets[1].answer)\n self.assertIsNone(self.tickets[1].answered_at)\n self.assertEqual(self.tickets[2].answer, ANSWER)\n self.assertIsNotNone(self.tickets[2].answered_at)\n\n def test_submit_errors(self):\n self.assertResponseError(self.submit_exam.post(),\n errors.InvalidParameter('session_id'))\n self.assertResponseError(self.submit_exam.post(session_id=123),\n errors.InvalidParameter('session_id'))\n self.assertResponseError(self.submit_exam.post(session_id=uuid_str()),\n errors.InvalidParameter('answers'))\n self.assertResponseError(\n self.submit_exam.post(session_id=uuid_str(), answers=[]),\n errors.InvalidParameter('answers'))\n self.assertResponseError(\n self.submit_exam.post(session_id=uuid_str(), answers={}),\n errors.ExamNotFound)\n\n self.session.start_time += self.session.duration\n self.session.save()\n self.assertResponseError(self.submit_exam.post(\n session_id=self.student_session.id, answers={}),\n errors.ExamNotAvailable)\n self.student_session.start_time = timezone.now()\n self.student_session.save()\n self.assertResponseError(self.submit_exam.post(\n session_id=self.student_session.id, answers={}),\n errors.ExamNotAvailable)\n",
"step-ids": [
15,
34,
42,
44,
45
]
}
|
[
15,
34,
42,
44,
45
] |
# -*- coding: utf-8 -*-
import io
import urllib.request
from pymarc import MARCReader
class Item:
"""
Represents an item from our
Library catalogue (https://www-lib.soton.ac.uk)
Usage:
#>>> import findbooks
#>>> item = findbooks.Item('12345678')
#>>> item.getMarcFields()
#>>> print(item.title)
"""
webcat = "http://lms.soton.ac.uk/cgi-bin/goobi_marc.cgi?itemid="
def __init__(self, barcode):
self.barcode = barcode
self.marc = None
self.record = None
self.title = None
self.author = None
self.year = None
def _get_marc(self):
with urllib.request.urlopen(Item.webcat + self.barcode) as response:
html = response.read().decode("utf-8")
marc = html[html.find(">")+1:html.rfind("<")].strip('''
''')
if "Barcode not found" not in marc:
self.marc = marc
def _get_title(self):
if self.record['245']:
title = self.record['245']['a'].strip(' /:,.')
return title
def _get_long_title(self):
title = self.record.title().strip(' /:,.')
return title
def _get_author(self):
if self.record['100']:
return self.record['100']['a']
elif self.record['110']:
return self.record['110']['a']
elif self.record['111']:
return self.record['111']['a']
else:
return None
def _get_year(self):
date = self.record.pubyear()
if date:
# dates should only have numbers
nums = '1234567890'
new_date = ''
for ch in date:
if ch in nums:
new_date += ch
# dates should have '1' as the first char
if not new_date[0] == "1":
return None
# dates should eb 4 chars long
if not len(new_date) == 4:
return None
return new_date
else:
return None
def get_marc_fields(self, len_title):
self._get_marc()
if self.marc:
with io.BytesIO(self.marc.encode('utf-8')) as fh:
reader = MARCReader(fh)
for record in reader:
self.record = record
self.title = self._get_title() if len_title == "short" else self._get_long_title()
self.author = self._get_author()
self.year = self._get_year()
# item = Item('59571478')
# item.get_marc_fields()
# print(item.title)
|
normal
|
{
"blob_id": "abfff0901e5f825a473119c93f53cba206609428",
"index": 7482,
"step-1": "<mask token>\n\n\nclass Item:\n <mask token>\n <mask token>\n\n def __init__(self, barcode):\n self.barcode = barcode\n self.marc = None\n self.record = None\n self.title = None\n self.author = None\n self.year = None\n\n def _get_marc(self):\n with urllib.request.urlopen(Item.webcat + self.barcode) as response:\n html = response.read().decode('utf-8')\n marc = html[html.find('>') + 1:html.rfind('<')].strip('\\n\\n ')\n if 'Barcode not found' not in marc:\n self.marc = marc\n\n def _get_title(self):\n if self.record['245']:\n title = self.record['245']['a'].strip(' /:,.')\n return title\n <mask token>\n <mask token>\n\n def _get_year(self):\n date = self.record.pubyear()\n if date:\n nums = '1234567890'\n new_date = ''\n for ch in date:\n if ch in nums:\n new_date += ch\n if not new_date[0] == '1':\n return None\n if not len(new_date) == 4:\n return None\n return new_date\n else:\n return None\n\n def get_marc_fields(self, len_title):\n self._get_marc()\n if self.marc:\n with io.BytesIO(self.marc.encode('utf-8')) as fh:\n reader = MARCReader(fh)\n for record in reader:\n self.record = record\n self.title = self._get_title(\n ) if len_title == 'short' else self._get_long_title()\n self.author = self._get_author()\n self.year = self._get_year()\n",
"step-2": "<mask token>\n\n\nclass Item:\n <mask token>\n <mask token>\n\n def __init__(self, barcode):\n self.barcode = barcode\n self.marc = None\n self.record = None\n self.title = None\n self.author = None\n self.year = None\n\n def _get_marc(self):\n with urllib.request.urlopen(Item.webcat + self.barcode) as response:\n html = response.read().decode('utf-8')\n marc = html[html.find('>') + 1:html.rfind('<')].strip('\\n\\n ')\n if 'Barcode not found' not in marc:\n self.marc = marc\n\n def _get_title(self):\n if self.record['245']:\n title = self.record['245']['a'].strip(' /:,.')\n return title\n <mask token>\n\n def _get_author(self):\n if self.record['100']:\n return self.record['100']['a']\n elif self.record['110']:\n return self.record['110']['a']\n elif self.record['111']:\n return self.record['111']['a']\n else:\n return None\n\n def _get_year(self):\n date = self.record.pubyear()\n if date:\n nums = '1234567890'\n new_date = ''\n for ch in date:\n if ch in nums:\n new_date += ch\n if not new_date[0] == '1':\n return None\n if not len(new_date) == 4:\n return None\n return new_date\n else:\n return None\n\n def get_marc_fields(self, len_title):\n self._get_marc()\n if self.marc:\n with io.BytesIO(self.marc.encode('utf-8')) as fh:\n reader = MARCReader(fh)\n for record in reader:\n self.record = record\n self.title = self._get_title(\n ) if len_title == 'short' else self._get_long_title()\n self.author = self._get_author()\n self.year = self._get_year()\n",
"step-3": "<mask token>\n\n\nclass Item:\n <mask token>\n <mask token>\n\n def __init__(self, barcode):\n self.barcode = barcode\n self.marc = None\n self.record = None\n self.title = None\n self.author = None\n self.year = None\n\n def _get_marc(self):\n with urllib.request.urlopen(Item.webcat + self.barcode) as response:\n html = response.read().decode('utf-8')\n marc = html[html.find('>') + 1:html.rfind('<')].strip('\\n\\n ')\n if 'Barcode not found' not in marc:\n self.marc = marc\n\n def _get_title(self):\n if self.record['245']:\n title = self.record['245']['a'].strip(' /:,.')\n return title\n\n def _get_long_title(self):\n title = self.record.title().strip(' /:,.')\n return title\n\n def _get_author(self):\n if self.record['100']:\n return self.record['100']['a']\n elif self.record['110']:\n return self.record['110']['a']\n elif self.record['111']:\n return self.record['111']['a']\n else:\n return None\n\n def _get_year(self):\n date = self.record.pubyear()\n if date:\n nums = '1234567890'\n new_date = ''\n for ch in date:\n if ch in nums:\n new_date += ch\n if not new_date[0] == '1':\n return None\n if not len(new_date) == 4:\n return None\n return new_date\n else:\n return None\n\n def get_marc_fields(self, len_title):\n self._get_marc()\n if self.marc:\n with io.BytesIO(self.marc.encode('utf-8')) as fh:\n reader = MARCReader(fh)\n for record in reader:\n self.record = record\n self.title = self._get_title(\n ) if len_title == 'short' else self._get_long_title()\n self.author = self._get_author()\n self.year = self._get_year()\n",
"step-4": "import io\nimport urllib.request\nfrom pymarc import MARCReader\n\n\nclass Item:\n \"\"\"\n Represents an item from our\n Library catalogue (https://www-lib.soton.ac.uk)\n Usage:\n\n #>>> import findbooks\n #>>> item = findbooks.Item('12345678')\n #>>> item.getMarcFields()\n #>>> print(item.title)\n\n \"\"\"\n webcat = 'http://lms.soton.ac.uk/cgi-bin/goobi_marc.cgi?itemid='\n\n def __init__(self, barcode):\n self.barcode = barcode\n self.marc = None\n self.record = None\n self.title = None\n self.author = None\n self.year = None\n\n def _get_marc(self):\n with urllib.request.urlopen(Item.webcat + self.barcode) as response:\n html = response.read().decode('utf-8')\n marc = html[html.find('>') + 1:html.rfind('<')].strip('\\n\\n ')\n if 'Barcode not found' not in marc:\n self.marc = marc\n\n def _get_title(self):\n if self.record['245']:\n title = self.record['245']['a'].strip(' /:,.')\n return title\n\n def _get_long_title(self):\n title = self.record.title().strip(' /:,.')\n return title\n\n def _get_author(self):\n if self.record['100']:\n return self.record['100']['a']\n elif self.record['110']:\n return self.record['110']['a']\n elif self.record['111']:\n return self.record['111']['a']\n else:\n return None\n\n def _get_year(self):\n date = self.record.pubyear()\n if date:\n nums = '1234567890'\n new_date = ''\n for ch in date:\n if ch in nums:\n new_date += ch\n if not new_date[0] == '1':\n return None\n if not len(new_date) == 4:\n return None\n return new_date\n else:\n return None\n\n def get_marc_fields(self, len_title):\n self._get_marc()\n if self.marc:\n with io.BytesIO(self.marc.encode('utf-8')) as fh:\n reader = MARCReader(fh)\n for record in reader:\n self.record = record\n self.title = self._get_title(\n ) if len_title == 'short' else self._get_long_title()\n self.author = self._get_author()\n self.year = self._get_year()\n",
"step-5": "# -*- coding: utf-8 -*-\nimport io\nimport urllib.request\nfrom pymarc import MARCReader\n\n\nclass Item:\n \"\"\"\n Represents an item from our\n Library catalogue (https://www-lib.soton.ac.uk)\n Usage:\n\n #>>> import findbooks\n #>>> item = findbooks.Item('12345678')\n #>>> item.getMarcFields()\n #>>> print(item.title)\n\n \"\"\"\n webcat = \"http://lms.soton.ac.uk/cgi-bin/goobi_marc.cgi?itemid=\"\n\n def __init__(self, barcode):\n self.barcode = barcode\n self.marc = None\n self.record = None\n self.title = None\n self.author = None\n self.year = None\n\n def _get_marc(self):\n with urllib.request.urlopen(Item.webcat + self.barcode) as response:\n html = response.read().decode(\"utf-8\")\n marc = html[html.find(\">\")+1:html.rfind(\"<\")].strip('''\n\n ''')\n if \"Barcode not found\" not in marc:\n self.marc = marc\n\n def _get_title(self):\n if self.record['245']:\n title = self.record['245']['a'].strip(' /:,.')\n return title\n\n def _get_long_title(self):\n title = self.record.title().strip(' /:,.')\n return title\n\n def _get_author(self):\n if self.record['100']:\n return self.record['100']['a']\n elif self.record['110']:\n return self.record['110']['a']\n elif self.record['111']:\n return self.record['111']['a']\n else:\n return None\n\n def _get_year(self):\n date = self.record.pubyear()\n if date:\n # dates should only have numbers\n nums = '1234567890'\n new_date = ''\n for ch in date:\n if ch in nums:\n new_date += ch\n # dates should have '1' as the first char\n if not new_date[0] == \"1\":\n return None\n # dates should eb 4 chars long\n if not len(new_date) == 4:\n return None\n return new_date\n else:\n return None\n\n def get_marc_fields(self, len_title):\n self._get_marc()\n if self.marc:\n with io.BytesIO(self.marc.encode('utf-8')) as fh:\n reader = MARCReader(fh)\n for record in reader:\n self.record = record\n self.title = self._get_title() if len_title == \"short\" else self._get_long_title()\n self.author = self._get_author()\n self.year = self._get_year()\n\n# item = Item('59571478')\n# item.get_marc_fields()\n# print(item.title)\n",
"step-ids": [
6,
7,
8,
11,
12
]
}
|
[
6,
7,
8,
11,
12
] |
"""
Task. Given two integers a and b, find their greatest common divisor.
Input Format. The two integers a, b are given in the same line separated by space.
Constraints. 1<=a,b<=2·109.
Output Format. Output GCD(a, b).
"""
def EuclidGCD(a, b):
if b == 0:
return a
else:
a = a%b
return EuclidGCD(b, a)
in_ = [int(n) for n in input().split()]
print(EuclidGCD(in_[0], in_[1]))
|
normal
|
{
"blob_id": "39d82267f966ca106ee384e540c31a3e5e433318",
"index": 2248,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a % b\n return EuclidGCD(b, a)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a % b\n return EuclidGCD(b, a)\n\n\n<mask token>\nprint(EuclidGCD(in_[0], in_[1]))\n",
"step-4": "<mask token>\n\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a % b\n return EuclidGCD(b, a)\n\n\nin_ = [int(n) for n in input().split()]\nprint(EuclidGCD(in_[0], in_[1]))\n",
"step-5": "\"\"\"\nTask. Given two integers a and b, find their greatest common divisor.\nInput Format. The two integers a, b are given in the same line separated by space.\nConstraints. 1<=a,b<=2·109.\nOutput Format. Output GCD(a, b).\n\"\"\"\n\ndef EuclidGCD(a, b):\n if b == 0:\n return a\n else:\n a = a%b\n return EuclidGCD(b, a)\n\nin_ = [int(n) for n in input().split()]\nprint(EuclidGCD(in_[0], in_[1]))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# CSE 415 Winter 2019
# Assignment 1
# Jichun Li 1531264
# Part A
# 1
def five_x_cubed_plus_1(x):
return 5 * (x ** 3) + 1
#2
def pair_off(ary):
result = []
for i in range(0, int(len(ary) / 2 * 2), 2):
result.append([ary[i], ary[i + 1]])
if (int (len(ary) % 2) == 1):
result.append([ary[-1]])
return result
#3
def mystery_code(input_string):
result = ''
for c in input_string:
next_char = c
if str.isalpha(c):
if c.upper() < 'H':
if c.islower():
next_char = chr(ord(c) + 19).upper()
else:
next_char = chr(ord(c) + 19).lower()
else:
if c.islower():
next_char = chr(ord(c) - 7).upper()
else:
next_char = chr(ord(c) - 7).lower()
result = result + next_char
return result
#4
def past_tense(words):
result = []
irregular_dict = {'have':'had',
'be':'was',
'eat':'ate',
'go':'went'}
for word in words:
word = str.lower(word)
if word in irregular_dict.keys():
result.append(irregular_dict[word])
elif word[-1] is 'e':
result.append(word + 'd')
elif word[-1] is 'y' and word[-2] not in 'aeiou':
result.append(word[:-1] + 'ied')
elif word[-2] in 'aeiou' and word[-1] not in 'aeiouwy' and word[-3] not in 'aeiou':
result.append(word + word[-1] + 'ed')
else:
result.append(word + 'ed')
return result
|
normal
|
{
"blob_id": "681788ffe7672458e8d334316aa87936746352b1",
"index": 4054,
"step-1": "def five_x_cubed_plus_1(x):\n return 5 * x ** 3 + 1\n\n\n<mask token>\n",
"step-2": "def five_x_cubed_plus_1(x):\n return 5 * x ** 3 + 1\n\n\ndef pair_off(ary):\n result = []\n for i in range(0, int(len(ary) / 2 * 2), 2):\n result.append([ary[i], ary[i + 1]])\n if int(len(ary) % 2) == 1:\n result.append([ary[-1]])\n return result\n\n\n<mask token>\n",
"step-3": "def five_x_cubed_plus_1(x):\n return 5 * x ** 3 + 1\n\n\ndef pair_off(ary):\n result = []\n for i in range(0, int(len(ary) / 2 * 2), 2):\n result.append([ary[i], ary[i + 1]])\n if int(len(ary) % 2) == 1:\n result.append([ary[-1]])\n return result\n\n\ndef mystery_code(input_string):\n result = ''\n for c in input_string:\n next_char = c\n if str.isalpha(c):\n if c.upper() < 'H':\n if c.islower():\n next_char = chr(ord(c) + 19).upper()\n else:\n next_char = chr(ord(c) + 19).lower()\n elif c.islower():\n next_char = chr(ord(c) - 7).upper()\n else:\n next_char = chr(ord(c) - 7).lower()\n result = result + next_char\n return result\n\n\n<mask token>\n",
"step-4": "def five_x_cubed_plus_1(x):\n return 5 * x ** 3 + 1\n\n\ndef pair_off(ary):\n result = []\n for i in range(0, int(len(ary) / 2 * 2), 2):\n result.append([ary[i], ary[i + 1]])\n if int(len(ary) % 2) == 1:\n result.append([ary[-1]])\n return result\n\n\ndef mystery_code(input_string):\n result = ''\n for c in input_string:\n next_char = c\n if str.isalpha(c):\n if c.upper() < 'H':\n if c.islower():\n next_char = chr(ord(c) + 19).upper()\n else:\n next_char = chr(ord(c) + 19).lower()\n elif c.islower():\n next_char = chr(ord(c) - 7).upper()\n else:\n next_char = chr(ord(c) - 7).lower()\n result = result + next_char\n return result\n\n\ndef past_tense(words):\n result = []\n irregular_dict = {'have': 'had', 'be': 'was', 'eat': 'ate', 'go': 'went'}\n for word in words:\n word = str.lower(word)\n if word in irregular_dict.keys():\n result.append(irregular_dict[word])\n elif word[-1] is 'e':\n result.append(word + 'd')\n elif word[-1] is 'y' and word[-2] not in 'aeiou':\n result.append(word[:-1] + 'ied')\n elif word[-2] in 'aeiou' and word[-1] not in 'aeiouwy' and word[-3\n ] not in 'aeiou':\n result.append(word + word[-1] + 'ed')\n else:\n result.append(word + 'ed')\n return result\n",
"step-5": "# CSE 415 Winter 2019\n# Assignment 1\n# Jichun Li 1531264\n\n# Part A\n# 1\ndef five_x_cubed_plus_1(x):\n\treturn 5 * (x ** 3) + 1\n\n#2\ndef pair_off(ary):\n result = []\n \n for i in range(0, int(len(ary) / 2 * 2), 2):\n result.append([ary[i], ary[i + 1]])\n if (int (len(ary) % 2) == 1):\n result.append([ary[-1]])\n return result\n\n#3\ndef mystery_code(input_string):\n\tresult = ''\n\tfor c in input_string:\n\t\tnext_char = c\n\t\tif str.isalpha(c):\n\t\t\tif c.upper() < 'H':\n\t\t\t\tif c.islower():\n\t\t\t\t\tnext_char = chr(ord(c) + 19).upper()\n\t\t\t\telse:\n\t\t\t\t\tnext_char = chr(ord(c) + 19).lower()\n\t\t\telse:\n\t\t\t\tif c.islower():\n\t\t\t\t\tnext_char = chr(ord(c) - 7).upper()\n\t\t\t\telse:\n\t\t\t\t\tnext_char = chr(ord(c) - 7).lower()\n\t\tresult = result + next_char\n\treturn result\n\n#4\ndef past_tense(words):\n\tresult = []\n\tirregular_dict = {'have':'had',\n\t\t\t 'be':'was',\n\t\t\t 'eat':'ate',\n\t\t\t 'go':'went'}\n\tfor word in words:\n\t\tword = str.lower(word)\n\t\tif word in irregular_dict.keys():\n\t\t\tresult.append(irregular_dict[word])\n\t\telif word[-1] is 'e':\n\t\t\tresult.append(word + 'd')\n\t\telif word[-1] is 'y' and word[-2] not in 'aeiou':\n\t\t\tresult.append(word[:-1] + 'ied')\n\t\telif word[-2] in 'aeiou' and word[-1] not in 'aeiouwy' and word[-3] not in 'aeiou':\n\t\t\tresult.append(word + word[-1] + 'ed')\n\t\telse:\n\t\t\tresult.append(word + 'ed')\n\treturn result\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#!/usr/bin/env python3
# ---------------------------------------------------
# SSHSploit Framework
# ---------------------------------------------------
# Copyright (C) <2020> <Entynetproject>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
os.system("printf '\033]2;SSHSploit Framework\a'")
import sys
import subprocess
import readline
import time
Q = '\033[1;77m[?] \033[0m'
G = '\033[1;34m[*] \033[0m'
S = '\033[1;32m[+] \033[0m'
W = '\033[1;33m[!] \033[0m'
E = '\033[1;31m[-] \033[0m'
rhost = ""
rport = ""
cmd = ""
attack = ""
pwd = 0
location = []
readline.parse_and_bind("tab: complete")
def banner():
os.system("clear")
os.system("cat banner/banner.txt")
print("")
print("SSHSploit Framework v1.0")
print("------------------------")
print("")
def main():
ui = input('\033[4msshsploit\033[0m> ').strip(" ")
ui = ui.split()
while True:
if ui == []:
pass
elif ui[0] == "exit":
sys.exit()
elif ui[0] == "clear":
os.system("clear")
elif ui[0] == "update":
os.system("chmod +x etc/update.sh && etc/update.sh")
elif ui[0] == "help":
print("")
print("Core Commands")
print("=============")
os.system("cat data/cmds/core_cmds.txt")
print("")
elif ui[0] == "modules":
print("")
print("Modules")
print("=======")
print("")
os.system("cat data/modules/modules.txt")
print("")
elif ui[0] == "use":
if len(ui) < 2:
print("Usage: use <module>")
else:
attack = ui[1]
if attack == "libssh_rce_noauth" or attack == "libssh_shell_noauth":
location[pwd] = c[1]
mod = input('\033[4msshsploit\033[0m(\033[1;31m'+attack+'\033[0m)> ').strip(" ")
mod = mod.split()
while True:
if mod == []:
pass
elif mod[0] == "back":
pwd -= 1
location = location[0:-1]
if location == []:
pwd = 0
break
elif mod[0] == "set":
if len(mod) < 3:
print("Usage: set <option> <value>")
else:
if attack == "libssh_rce_noauth":
if mod[1].lower() == "rhost":
rhost = mod[2]
elif mod[1].lower() == "rport":
rport = mod[2]
elif mod[1].lower() == "cmd":
cmd = mod[2]
else:
print(E+"Options is not found!")
else:
if mod[1].lower() == "rhost":
rhost = mod[2]
elif mod[1].lower() == "rport":
rport = mod[2]
else:
print(E+"Options is not found!")
elif mod[0] == "options":
if attack == "libssh_rce_noauth":
os.system("ruby data/options/options.rb libssh_rce_noauth "+rhost+" "+rport+" "+cmd)
else:
os.system("ruby data/options/options.rb libssh_shell_noauth "+rhost+" "+rport)
elif mod[0] == "use":
if len(mod) < 2:
print("Usage: use <module>")
else:
attack = mod[1]
if attack == "libssh_rce_noauth" or attack == "libssh_shell_noauth":
pwd += 1
location[pwd] = mod[1]
else:
print(E+"Module is not found!")
elif mod[0] == "run":
if rhost == "" or rport == "":
print(E+"Target is not specified!")
else:
if attack == "libssh_rce_noauth":
if cmd == "":
print(E+"Command for RCE is not specified!")
else:
print(G+"Starting libssh_rce_noauth attack...")
os.system("python3 modules/libssh_rce_noauth.py "+rhost+" -p "+rport+" -v '"+cmd+"'")
elif attack == "libssh_shell_noauth":
print(G+"Starting libssh_shell_noauth attack...")
os.system("python3 modules/libssh_shell_noauth.py "+rhost+" -p "+rport+" -v --shell")
elif mod[0] == "clear":
os.system("clear")
elif mod[0] == "exit":
sys.exit()
elif mod[0] == "update":
os.system("chmod +x etc/update.sh && etc/update.sh")
elif mod[0] == "help":
print("")
print("Core Commands")
print("=============")
os.system("cat data/cmds/core_cmds.txt")
print("")
print("Module Commands")
print("===============")
os.system("cat data/cmds/module_cmds.txt")
print("")
else:
print(E+"Unrecognized command!")
mod = input('\033[4msshsploit\033[0m(\033[1;31m'+attack+'\033[0m)> ').strip(" ")
mod = mod.split()
else:
print(E+"Module is not found!")
else:
print(E+"Unrecognized command!")
ui = input('\033[4msshsploit\033[0m> ').strip(" ")
ui = ui.split()
banner()
main()
|
normal
|
{
"blob_id": "caf83d35ce6e0bd4e92f3de3a32221705a529ec1",
"index": 9467,
"step-1": "<mask token>\n\n\ndef banner():\n os.system('clear')\n os.system('cat banner/banner.txt')\n print('')\n print('SSHSploit Framework v1.0')\n print('------------------------')\n print('')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef banner():\n os.system('clear')\n os.system('cat banner/banner.txt')\n print('')\n print('SSHSploit Framework v1.0')\n print('------------------------')\n print('')\n\n\ndef main():\n ui = input('\\x1b[4msshsploit\\x1b[0m> ').strip(' ')\n ui = ui.split()\n while True:\n if ui == []:\n pass\n elif ui[0] == 'exit':\n sys.exit()\n elif ui[0] == 'clear':\n os.system('clear')\n elif ui[0] == 'update':\n os.system('chmod +x etc/update.sh && etc/update.sh')\n elif ui[0] == 'help':\n print('')\n print('Core Commands')\n print('=============')\n os.system('cat data/cmds/core_cmds.txt')\n print('')\n elif ui[0] == 'modules':\n print('')\n print('Modules')\n print('=======')\n print('')\n os.system('cat data/modules/modules.txt')\n print('')\n elif ui[0] == 'use':\n if len(ui) < 2:\n print('Usage: use <module>')\n else:\n attack = ui[1]\n if (attack == 'libssh_rce_noauth' or attack ==\n 'libssh_shell_noauth'):\n location[pwd] = c[1]\n mod = input('\\x1b[4msshsploit\\x1b[0m(\\x1b[1;31m' +\n attack + '\\x1b[0m)> ').strip(' ')\n mod = mod.split()\n while True:\n if mod == []:\n pass\n elif mod[0] == 'back':\n pwd -= 1\n location = location[0:-1]\n if location == []:\n pwd = 0\n break\n elif mod[0] == 'set':\n if len(mod) < 3:\n print('Usage: set <option> <value>')\n elif attack == 'libssh_rce_noauth':\n if mod[1].lower() == 'rhost':\n rhost = mod[2]\n elif mod[1].lower() == 'rport':\n rport = mod[2]\n elif mod[1].lower() == 'cmd':\n cmd = mod[2]\n else:\n print(E + 'Options is not found!')\n elif mod[1].lower() == 'rhost':\n rhost = mod[2]\n elif mod[1].lower() == 'rport':\n rport = mod[2]\n else:\n print(E + 'Options is not found!')\n elif mod[0] == 'options':\n if attack == 'libssh_rce_noauth':\n os.system(\n 'ruby data/options/options.rb libssh_rce_noauth '\n + rhost + ' ' + rport + ' ' + cmd)\n else:\n os.system(\n 'ruby data/options/options.rb libssh_shell_noauth '\n + rhost + ' ' + rport)\n elif mod[0] == 'use':\n if len(mod) < 2:\n print('Usage: use <module>')\n else:\n attack = mod[1]\n if (attack == 'libssh_rce_noauth' or attack ==\n 'libssh_shell_noauth'):\n pwd += 1\n location[pwd] = mod[1]\n else:\n print(E + 'Module is not found!')\n elif mod[0] == 'run':\n if rhost == '' or rport == '':\n print(E + 'Target is not specified!')\n elif attack == 'libssh_rce_noauth':\n if cmd == '':\n print(E +\n 'Command for RCE is not specified!')\n else:\n print(G +\n 'Starting libssh_rce_noauth attack...')\n os.system(\n 'python3 modules/libssh_rce_noauth.py '\n + rhost + ' -p ' + rport + \" -v '\" +\n cmd + \"'\")\n elif attack == 'libssh_shell_noauth':\n print(G +\n 'Starting libssh_shell_noauth attack...')\n os.system(\n 'python3 modules/libssh_shell_noauth.py ' +\n rhost + ' -p ' + rport + ' -v --shell')\n elif mod[0] == 'clear':\n os.system('clear')\n elif mod[0] == 'exit':\n sys.exit()\n elif mod[0] == 'update':\n os.system('chmod +x etc/update.sh && etc/update.sh'\n )\n elif mod[0] == 'help':\n print('')\n print('Core Commands')\n print('=============')\n os.system('cat data/cmds/core_cmds.txt')\n print('')\n print('Module Commands')\n print('===============')\n os.system('cat data/cmds/module_cmds.txt')\n print('')\n else:\n print(E + 'Unrecognized command!')\n mod = input('\\x1b[4msshsploit\\x1b[0m(\\x1b[1;31m' +\n attack + '\\x1b[0m)> ').strip(' ')\n mod = mod.split()\n else:\n print(E + 'Module is not found!')\n else:\n print(E + 'Unrecognized command!')\n ui = input('\\x1b[4msshsploit\\x1b[0m> ').strip(' ')\n ui = ui.split()\n\n\n<mask token>\n",
"step-3": "<mask token>\nos.system(\"printf '\\x1b]2;SSHSploit Framework\\x07'\")\n<mask token>\nQ = '\\x1b[1;77m[?] \\x1b[0m'\nG = '\\x1b[1;34m[*] \\x1b[0m'\nS = '\\x1b[1;32m[+] \\x1b[0m'\nW = '\\x1b[1;33m[!] \\x1b[0m'\nE = '\\x1b[1;31m[-] \\x1b[0m'\nrhost = ''\nrport = ''\ncmd = ''\nattack = ''\npwd = 0\nlocation = []\nreadline.parse_and_bind('tab: complete')\n\n\ndef banner():\n os.system('clear')\n os.system('cat banner/banner.txt')\n print('')\n print('SSHSploit Framework v1.0')\n print('------------------------')\n print('')\n\n\ndef main():\n ui = input('\\x1b[4msshsploit\\x1b[0m> ').strip(' ')\n ui = ui.split()\n while True:\n if ui == []:\n pass\n elif ui[0] == 'exit':\n sys.exit()\n elif ui[0] == 'clear':\n os.system('clear')\n elif ui[0] == 'update':\n os.system('chmod +x etc/update.sh && etc/update.sh')\n elif ui[0] == 'help':\n print('')\n print('Core Commands')\n print('=============')\n os.system('cat data/cmds/core_cmds.txt')\n print('')\n elif ui[0] == 'modules':\n print('')\n print('Modules')\n print('=======')\n print('')\n os.system('cat data/modules/modules.txt')\n print('')\n elif ui[0] == 'use':\n if len(ui) < 2:\n print('Usage: use <module>')\n else:\n attack = ui[1]\n if (attack == 'libssh_rce_noauth' or attack ==\n 'libssh_shell_noauth'):\n location[pwd] = c[1]\n mod = input('\\x1b[4msshsploit\\x1b[0m(\\x1b[1;31m' +\n attack + '\\x1b[0m)> ').strip(' ')\n mod = mod.split()\n while True:\n if mod == []:\n pass\n elif mod[0] == 'back':\n pwd -= 1\n location = location[0:-1]\n if location == []:\n pwd = 0\n break\n elif mod[0] == 'set':\n if len(mod) < 3:\n print('Usage: set <option> <value>')\n elif attack == 'libssh_rce_noauth':\n if mod[1].lower() == 'rhost':\n rhost = mod[2]\n elif mod[1].lower() == 'rport':\n rport = mod[2]\n elif mod[1].lower() == 'cmd':\n cmd = mod[2]\n else:\n print(E + 'Options is not found!')\n elif mod[1].lower() == 'rhost':\n rhost = mod[2]\n elif mod[1].lower() == 'rport':\n rport = mod[2]\n else:\n print(E + 'Options is not found!')\n elif mod[0] == 'options':\n if attack == 'libssh_rce_noauth':\n os.system(\n 'ruby data/options/options.rb libssh_rce_noauth '\n + rhost + ' ' + rport + ' ' + cmd)\n else:\n os.system(\n 'ruby data/options/options.rb libssh_shell_noauth '\n + rhost + ' ' + rport)\n elif mod[0] == 'use':\n if len(mod) < 2:\n print('Usage: use <module>')\n else:\n attack = mod[1]\n if (attack == 'libssh_rce_noauth' or attack ==\n 'libssh_shell_noauth'):\n pwd += 1\n location[pwd] = mod[1]\n else:\n print(E + 'Module is not found!')\n elif mod[0] == 'run':\n if rhost == '' or rport == '':\n print(E + 'Target is not specified!')\n elif attack == 'libssh_rce_noauth':\n if cmd == '':\n print(E +\n 'Command for RCE is not specified!')\n else:\n print(G +\n 'Starting libssh_rce_noauth attack...')\n os.system(\n 'python3 modules/libssh_rce_noauth.py '\n + rhost + ' -p ' + rport + \" -v '\" +\n cmd + \"'\")\n elif attack == 'libssh_shell_noauth':\n print(G +\n 'Starting libssh_shell_noauth attack...')\n os.system(\n 'python3 modules/libssh_shell_noauth.py ' +\n rhost + ' -p ' + rport + ' -v --shell')\n elif mod[0] == 'clear':\n os.system('clear')\n elif mod[0] == 'exit':\n sys.exit()\n elif mod[0] == 'update':\n os.system('chmod +x etc/update.sh && etc/update.sh'\n )\n elif mod[0] == 'help':\n print('')\n print('Core Commands')\n print('=============')\n os.system('cat data/cmds/core_cmds.txt')\n print('')\n print('Module Commands')\n print('===============')\n os.system('cat data/cmds/module_cmds.txt')\n print('')\n else:\n print(E + 'Unrecognized command!')\n mod = input('\\x1b[4msshsploit\\x1b[0m(\\x1b[1;31m' +\n attack + '\\x1b[0m)> ').strip(' ')\n mod = mod.split()\n else:\n print(E + 'Module is not found!')\n else:\n print(E + 'Unrecognized command!')\n ui = input('\\x1b[4msshsploit\\x1b[0m> ').strip(' ')\n ui = ui.split()\n\n\nbanner()\nmain()\n",
"step-4": "import os\nos.system(\"printf '\\x1b]2;SSHSploit Framework\\x07'\")\nimport sys\nimport subprocess\nimport readline\nimport time\nQ = '\\x1b[1;77m[?] \\x1b[0m'\nG = '\\x1b[1;34m[*] \\x1b[0m'\nS = '\\x1b[1;32m[+] \\x1b[0m'\nW = '\\x1b[1;33m[!] \\x1b[0m'\nE = '\\x1b[1;31m[-] \\x1b[0m'\nrhost = ''\nrport = ''\ncmd = ''\nattack = ''\npwd = 0\nlocation = []\nreadline.parse_and_bind('tab: complete')\n\n\ndef banner():\n os.system('clear')\n os.system('cat banner/banner.txt')\n print('')\n print('SSHSploit Framework v1.0')\n print('------------------------')\n print('')\n\n\ndef main():\n ui = input('\\x1b[4msshsploit\\x1b[0m> ').strip(' ')\n ui = ui.split()\n while True:\n if ui == []:\n pass\n elif ui[0] == 'exit':\n sys.exit()\n elif ui[0] == 'clear':\n os.system('clear')\n elif ui[0] == 'update':\n os.system('chmod +x etc/update.sh && etc/update.sh')\n elif ui[0] == 'help':\n print('')\n print('Core Commands')\n print('=============')\n os.system('cat data/cmds/core_cmds.txt')\n print('')\n elif ui[0] == 'modules':\n print('')\n print('Modules')\n print('=======')\n print('')\n os.system('cat data/modules/modules.txt')\n print('')\n elif ui[0] == 'use':\n if len(ui) < 2:\n print('Usage: use <module>')\n else:\n attack = ui[1]\n if (attack == 'libssh_rce_noauth' or attack ==\n 'libssh_shell_noauth'):\n location[pwd] = c[1]\n mod = input('\\x1b[4msshsploit\\x1b[0m(\\x1b[1;31m' +\n attack + '\\x1b[0m)> ').strip(' ')\n mod = mod.split()\n while True:\n if mod == []:\n pass\n elif mod[0] == 'back':\n pwd -= 1\n location = location[0:-1]\n if location == []:\n pwd = 0\n break\n elif mod[0] == 'set':\n if len(mod) < 3:\n print('Usage: set <option> <value>')\n elif attack == 'libssh_rce_noauth':\n if mod[1].lower() == 'rhost':\n rhost = mod[2]\n elif mod[1].lower() == 'rport':\n rport = mod[2]\n elif mod[1].lower() == 'cmd':\n cmd = mod[2]\n else:\n print(E + 'Options is not found!')\n elif mod[1].lower() == 'rhost':\n rhost = mod[2]\n elif mod[1].lower() == 'rport':\n rport = mod[2]\n else:\n print(E + 'Options is not found!')\n elif mod[0] == 'options':\n if attack == 'libssh_rce_noauth':\n os.system(\n 'ruby data/options/options.rb libssh_rce_noauth '\n + rhost + ' ' + rport + ' ' + cmd)\n else:\n os.system(\n 'ruby data/options/options.rb libssh_shell_noauth '\n + rhost + ' ' + rport)\n elif mod[0] == 'use':\n if len(mod) < 2:\n print('Usage: use <module>')\n else:\n attack = mod[1]\n if (attack == 'libssh_rce_noauth' or attack ==\n 'libssh_shell_noauth'):\n pwd += 1\n location[pwd] = mod[1]\n else:\n print(E + 'Module is not found!')\n elif mod[0] == 'run':\n if rhost == '' or rport == '':\n print(E + 'Target is not specified!')\n elif attack == 'libssh_rce_noauth':\n if cmd == '':\n print(E +\n 'Command for RCE is not specified!')\n else:\n print(G +\n 'Starting libssh_rce_noauth attack...')\n os.system(\n 'python3 modules/libssh_rce_noauth.py '\n + rhost + ' -p ' + rport + \" -v '\" +\n cmd + \"'\")\n elif attack == 'libssh_shell_noauth':\n print(G +\n 'Starting libssh_shell_noauth attack...')\n os.system(\n 'python3 modules/libssh_shell_noauth.py ' +\n rhost + ' -p ' + rport + ' -v --shell')\n elif mod[0] == 'clear':\n os.system('clear')\n elif mod[0] == 'exit':\n sys.exit()\n elif mod[0] == 'update':\n os.system('chmod +x etc/update.sh && etc/update.sh'\n )\n elif mod[0] == 'help':\n print('')\n print('Core Commands')\n print('=============')\n os.system('cat data/cmds/core_cmds.txt')\n print('')\n print('Module Commands')\n print('===============')\n os.system('cat data/cmds/module_cmds.txt')\n print('')\n else:\n print(E + 'Unrecognized command!')\n mod = input('\\x1b[4msshsploit\\x1b[0m(\\x1b[1;31m' +\n attack + '\\x1b[0m)> ').strip(' ')\n mod = mod.split()\n else:\n print(E + 'Module is not found!')\n else:\n print(E + 'Unrecognized command!')\n ui = input('\\x1b[4msshsploit\\x1b[0m> ').strip(' ')\n ui = ui.split()\n\n\nbanner()\nmain()\n",
"step-5": "#!/usr/bin/env python3\n\n# ---------------------------------------------------\n# SSHSploit Framework \n# ---------------------------------------------------\n# Copyright (C) <2020> <Entynetproject> \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport os\n\nos.system(\"printf '\\033]2;SSHSploit Framework\\a'\")\n\nimport sys\nimport subprocess\nimport readline\nimport time\n\nQ = '\\033[1;77m[?] \\033[0m'\nG = '\\033[1;34m[*] \\033[0m'\nS = '\\033[1;32m[+] \\033[0m'\nW = '\\033[1;33m[!] \\033[0m'\nE = '\\033[1;31m[-] \\033[0m'\n\nrhost = \"\"\nrport = \"\"\ncmd = \"\"\n\nattack = \"\"\npwd = 0\nlocation = []\n\nreadline.parse_and_bind(\"tab: complete\")\n\ndef banner():\n os.system(\"clear\")\n os.system(\"cat banner/banner.txt\")\n print(\"\")\n print(\"SSHSploit Framework v1.0\")\n print(\"------------------------\")\n print(\"\")\n\ndef main():\n ui = input('\\033[4msshsploit\\033[0m> ').strip(\" \")\n ui = ui.split()\n while True:\n if ui == []:\n pass\n elif ui[0] == \"exit\":\n sys.exit()\n elif ui[0] == \"clear\":\n os.system(\"clear\")\n elif ui[0] == \"update\":\n os.system(\"chmod +x etc/update.sh && etc/update.sh\")\n elif ui[0] == \"help\":\n print(\"\")\n print(\"Core Commands\")\n print(\"=============\")\n os.system(\"cat data/cmds/core_cmds.txt\")\n print(\"\")\n elif ui[0] == \"modules\":\n print(\"\")\n print(\"Modules\")\n print(\"=======\")\n print(\"\")\n os.system(\"cat data/modules/modules.txt\")\n print(\"\")\n elif ui[0] == \"use\":\n if len(ui) < 2:\n print(\"Usage: use <module>\")\n else:\n attack = ui[1]\n if attack == \"libssh_rce_noauth\" or attack == \"libssh_shell_noauth\":\n location[pwd] = c[1]\n mod = input('\\033[4msshsploit\\033[0m(\\033[1;31m'+attack+'\\033[0m)> ').strip(\" \")\n mod = mod.split()\n while True:\n if mod == []:\n pass\n elif mod[0] == \"back\":\n pwd -= 1\n location = location[0:-1]\n if location == []:\n pwd = 0\n break\n elif mod[0] == \"set\":\n if len(mod) < 3:\n print(\"Usage: set <option> <value>\")\n else:\n if attack == \"libssh_rce_noauth\":\n if mod[1].lower() == \"rhost\":\n rhost = mod[2]\n elif mod[1].lower() == \"rport\":\n rport = mod[2]\n elif mod[1].lower() == \"cmd\":\n cmd = mod[2]\n else:\n print(E+\"Options is not found!\")\n else:\n if mod[1].lower() == \"rhost\":\n rhost = mod[2]\n elif mod[1].lower() == \"rport\":\n rport = mod[2]\n else:\n print(E+\"Options is not found!\")\n elif mod[0] == \"options\":\n if attack == \"libssh_rce_noauth\":\n os.system(\"ruby data/options/options.rb libssh_rce_noauth \"+rhost+\" \"+rport+\" \"+cmd)\n else:\n os.system(\"ruby data/options/options.rb libssh_shell_noauth \"+rhost+\" \"+rport)\n elif mod[0] == \"use\":\n if len(mod) < 2:\n print(\"Usage: use <module>\")\n else:\n attack = mod[1]\n if attack == \"libssh_rce_noauth\" or attack == \"libssh_shell_noauth\":\n pwd += 1\n location[pwd] = mod[1]\n else:\n print(E+\"Module is not found!\")\n elif mod[0] == \"run\":\n if rhost == \"\" or rport == \"\":\n print(E+\"Target is not specified!\")\n else:\n if attack == \"libssh_rce_noauth\":\n if cmd == \"\":\n print(E+\"Command for RCE is not specified!\")\n else:\n print(G+\"Starting libssh_rce_noauth attack...\")\n os.system(\"python3 modules/libssh_rce_noauth.py \"+rhost+\" -p \"+rport+\" -v '\"+cmd+\"'\")\n elif attack == \"libssh_shell_noauth\":\n print(G+\"Starting libssh_shell_noauth attack...\")\n os.system(\"python3 modules/libssh_shell_noauth.py \"+rhost+\" -p \"+rport+\" -v --shell\")\n elif mod[0] == \"clear\":\n os.system(\"clear\")\n elif mod[0] == \"exit\":\n sys.exit()\n elif mod[0] == \"update\":\n os.system(\"chmod +x etc/update.sh && etc/update.sh\")\n elif mod[0] == \"help\":\n print(\"\")\n print(\"Core Commands\")\n print(\"=============\")\n os.system(\"cat data/cmds/core_cmds.txt\")\n print(\"\")\n print(\"Module Commands\")\n print(\"===============\")\n os.system(\"cat data/cmds/module_cmds.txt\")\n print(\"\")\n else:\n print(E+\"Unrecognized command!\")\n mod = input('\\033[4msshsploit\\033[0m(\\033[1;31m'+attack+'\\033[0m)> ').strip(\" \")\n mod = mod.split()\n else:\n print(E+\"Module is not found!\")\n else:\n print(E+\"Unrecognized command!\")\n ui = input('\\033[4msshsploit\\033[0m> ').strip(\" \")\n ui = ui.split()\n \nbanner()\nmain()\n",
"step-ids": [
1,
2,
4,
5,
6
]
}
|
[
1,
2,
4,
5,
6
] |
# $Header: //depot/cs/s/ajax_support.wsgi#10 $
from werkzeug.wrappers import Response
from p.DRequest import DRequest
from db.Support import SupportSession
from db.Exceptions import DbError, SupportSessionExpired
import db.Db as Db
import db.Support
import cgi
import simplejson as json
def application(environ, start_response):
"""AJAX scripts for email templates."""
request = DRequest(environ)
resp = None
try :
Db.start_transaction()
form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)
args = form['args'].value
req = json.loads(args)
support = SupportSession(key=request.support_key())
handler = handlers[req['command']]
resp = Response(json.dumps(handler(request, req)))
Db.finish_transaction()
except SupportSessionExpired:
Db.cancel_transaction()
resp = Response(json.dumps({ 'Error': 'Session Expired' }))
except DbError as e:
Db.cancel_transaction()
resp = Response(json.dumps({ 'Error': e.args[0]}))
except Exception as e:
Db.cancel_transaction()
import traceback
traceback.print_exc()
resp = Response(json.dumps({ 'Error': "Internal Error"}))
request.cookie_freshen(resp)
resp.headers['content-type'] = 'application/json'
resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'
return resp(environ, start_response)
def get(request, req):
return db.Support.get_all()
def edit(request, req):
return db.Support.edit(req);
def delete(request, req):
return db.Support.delete(req['support_id'])
def add(request, req):
return db.Support.new()
handlers = { 'get': get, 'edit': edit, 'delete': delete, 'add': add }
|
normal
|
{
"blob_id": "be58862b66708c9de8cf7642c9de52ec744b079e",
"index": 805,
"step-1": "<mask token>\n\n\ndef application(environ, start_response):\n \"\"\"AJAX scripts for email templates.\"\"\"\n request = DRequest(environ)\n resp = None\n try:\n Db.start_transaction()\n form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n args = form['args'].value\n req = json.loads(args)\n support = SupportSession(key=request.support_key())\n handler = handlers[req['command']]\n resp = Response(json.dumps(handler(request, req)))\n Db.finish_transaction()\n except SupportSessionExpired:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': 'Session Expired'}))\n except DbError as e:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': e.args[0]}))\n except Exception as e:\n Db.cancel_transaction()\n import traceback\n traceback.print_exc()\n resp = Response(json.dumps({'Error': 'Internal Error'}))\n request.cookie_freshen(resp)\n resp.headers['content-type'] = 'application/json'\n resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'\n return resp(environ, start_response)\n\n\ndef get(request, req):\n return db.Support.get_all()\n\n\n<mask token>\n\n\ndef delete(request, req):\n return db.Support.delete(req['support_id'])\n\n\ndef add(request, req):\n return db.Support.new()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef application(environ, start_response):\n \"\"\"AJAX scripts for email templates.\"\"\"\n request = DRequest(environ)\n resp = None\n try:\n Db.start_transaction()\n form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n args = form['args'].value\n req = json.loads(args)\n support = SupportSession(key=request.support_key())\n handler = handlers[req['command']]\n resp = Response(json.dumps(handler(request, req)))\n Db.finish_transaction()\n except SupportSessionExpired:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': 'Session Expired'}))\n except DbError as e:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': e.args[0]}))\n except Exception as e:\n Db.cancel_transaction()\n import traceback\n traceback.print_exc()\n resp = Response(json.dumps({'Error': 'Internal Error'}))\n request.cookie_freshen(resp)\n resp.headers['content-type'] = 'application/json'\n resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'\n return resp(environ, start_response)\n\n\ndef get(request, req):\n return db.Support.get_all()\n\n\ndef edit(request, req):\n return db.Support.edit(req)\n\n\ndef delete(request, req):\n return db.Support.delete(req['support_id'])\n\n\ndef add(request, req):\n return db.Support.new()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef application(environ, start_response):\n \"\"\"AJAX scripts for email templates.\"\"\"\n request = DRequest(environ)\n resp = None\n try:\n Db.start_transaction()\n form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n args = form['args'].value\n req = json.loads(args)\n support = SupportSession(key=request.support_key())\n handler = handlers[req['command']]\n resp = Response(json.dumps(handler(request, req)))\n Db.finish_transaction()\n except SupportSessionExpired:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': 'Session Expired'}))\n except DbError as e:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': e.args[0]}))\n except Exception as e:\n Db.cancel_transaction()\n import traceback\n traceback.print_exc()\n resp = Response(json.dumps({'Error': 'Internal Error'}))\n request.cookie_freshen(resp)\n resp.headers['content-type'] = 'application/json'\n resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'\n return resp(environ, start_response)\n\n\ndef get(request, req):\n return db.Support.get_all()\n\n\ndef edit(request, req):\n return db.Support.edit(req)\n\n\ndef delete(request, req):\n return db.Support.delete(req['support_id'])\n\n\ndef add(request, req):\n return db.Support.new()\n\n\nhandlers = {'get': get, 'edit': edit, 'delete': delete, 'add': add}\n",
"step-4": "from werkzeug.wrappers import Response\nfrom p.DRequest import DRequest\nfrom db.Support import SupportSession\nfrom db.Exceptions import DbError, SupportSessionExpired\nimport db.Db as Db\nimport db.Support\nimport cgi\nimport simplejson as json\n\n\ndef application(environ, start_response):\n \"\"\"AJAX scripts for email templates.\"\"\"\n request = DRequest(environ)\n resp = None\n try:\n Db.start_transaction()\n form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n args = form['args'].value\n req = json.loads(args)\n support = SupportSession(key=request.support_key())\n handler = handlers[req['command']]\n resp = Response(json.dumps(handler(request, req)))\n Db.finish_transaction()\n except SupportSessionExpired:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': 'Session Expired'}))\n except DbError as e:\n Db.cancel_transaction()\n resp = Response(json.dumps({'Error': e.args[0]}))\n except Exception as e:\n Db.cancel_transaction()\n import traceback\n traceback.print_exc()\n resp = Response(json.dumps({'Error': 'Internal Error'}))\n request.cookie_freshen(resp)\n resp.headers['content-type'] = 'application/json'\n resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'\n return resp(environ, start_response)\n\n\ndef get(request, req):\n return db.Support.get_all()\n\n\ndef edit(request, req):\n return db.Support.edit(req)\n\n\ndef delete(request, req):\n return db.Support.delete(req['support_id'])\n\n\ndef add(request, req):\n return db.Support.new()\n\n\nhandlers = {'get': get, 'edit': edit, 'delete': delete, 'add': add}\n",
"step-5": "# $Header: //depot/cs/s/ajax_support.wsgi#10 $\nfrom werkzeug.wrappers import Response\nfrom p.DRequest import DRequest\nfrom db.Support import SupportSession\nfrom db.Exceptions import DbError, SupportSessionExpired\nimport db.Db as Db\nimport db.Support\n\nimport cgi\nimport simplejson as json\n\ndef application(environ, start_response):\n \"\"\"AJAX scripts for email templates.\"\"\"\n\n request = DRequest(environ)\n\n resp = None\n\n try :\n Db.start_transaction()\n form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ)\n args = form['args'].value\n req = json.loads(args)\n\n support = SupportSession(key=request.support_key())\n handler = handlers[req['command']]\n resp = Response(json.dumps(handler(request, req)))\n Db.finish_transaction()\n\n except SupportSessionExpired:\n Db.cancel_transaction()\n resp = Response(json.dumps({ 'Error': 'Session Expired' }))\n except DbError as e:\n Db.cancel_transaction()\n resp = Response(json.dumps({ 'Error': e.args[0]}))\n except Exception as e:\n Db.cancel_transaction()\n import traceback\n traceback.print_exc()\n resp = Response(json.dumps({ 'Error': \"Internal Error\"}))\n\n request.cookie_freshen(resp)\n resp.headers['content-type'] = 'application/json'\n resp.headers['cache-control'] = 'no-cache, must-revalidate, no-store'\n return resp(environ, start_response)\n\n\ndef get(request, req):\n return db.Support.get_all()\n\ndef edit(request, req):\n return db.Support.edit(req);\n\ndef delete(request, req):\n return db.Support.delete(req['support_id'])\n\ndef add(request, req):\n return db.Support.new()\n\n\nhandlers = { 'get': get, 'edit': edit, 'delete': delete, 'add': add }\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import datetime
import time
import rfc822
from django.conf import settings
from urllib2 import Request, urlopen, URLError, HTTPError
from urllib import urlencode
import re
import string
try:
import django.utils.simplejson as json
except:
import json
from django.core.cache import cache
from tagging.models import Tag
from foodtruck.models import *
from foodtruck.tokens import *
import oauth2 as oauth
def fetch_json(url, service, list_key=None):
fetched = urlopen(url).read()
data = json.loads(fetched)
if list_key:
data = data[list_key]
return data
def oauth_req(url, key, secret, http_method="GET", post_body=None,http_headers=None):
consumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)
token = oauth.Token(key=key, secret=secret)
client = oauth.Client(consumer, token)
resp, content = client.request(
url,
method=http_method,
body=post_body,
headers=http_headers,
force_auth_header=True
)
return content
def get_all_tweets():
from dateutil.parser import parse, tz
url = LIST_URL
HERE = tz.tzlocal()
if cache.get('truck_tweets'):
tweets = cache.get('truck_tweets')
else:
tweets = []
all_tweets = oauth_req(url, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
data = json.loads(all_tweets)
for t in data:
m = dict(
name = t['user']['screen_name'],
pic_url = t['user']['profile_image_url'],
text = t['text'],
timestamp = parse(t['created_at']).astimezone(HERE),
url = 'http://twitter.com/'+t['user']['screen_name']+'/statuses/'+str(t['id']),
)
tweets += [m]
cache.set('truck_tweets',tweets, 62)
return tweets
def filter_trucks(hood):
tweets = get_all_tweets()
n = Hood.objects.get(id=hood)
tags = n.tags.all()
filtered = {'hood':n.name, 'tags':tags}
filtered['tweets'] = []
for t in tweets:
for w in tags:
if string.find(t['text'].lower(), w.name.lower()) > 0:
filtered['tweets'] += [t]
break
cache.set((('filtered_%s' % hood)), filtered, 62)
return filtered
def get_truck_names():
p = open('truck.cursor','r')
try: last_cursor = int(p.read())
except: last_cursor=1353949495935930905 # this is just the last cursor number i looked up, to save on API calls -- can change.
p.close()
url = LIST_MEMBERS_URL
get_truck_list = oauth_req(url, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
truck_list = json.loads(get_truck_list)
all_trucks = truck_list['users']
cursor = truck_list['next_cursor']
f = open('truck.cursor','w')
f.write(str(cursor))
f.close
while cursor > last_cursor:
truck_url = LIST_MEMBERS_URL +'?cursor=' + str(cursor)
get_truck_list = oauth_req(truck_url,OAUTH_TOKEN,OAUTH_TOKEN_SECRET)
truck_list = json.loads(get_truck_list)
all_trucks += truck_list['users']
cursor = truck_list['next_cursor']
for truck in all_trucks:
description=truck['description'] or ''
truck_url= truck['url'] or 'http://twitter.com/'+truck['screen_name']
profile_icon= truck['profile_image_url'] or ''
real_name=truck['name'] or truck['screen_name']
t = Truck.objects.get_or_create(id_str__exact=truck['id_str'], defaults = {'name':truck['screen_name'], 'description':description, 'profile_icon':profile_icon, 'truck_url':truck_url, 'geo_enabled':truck['geo_enabled'], 'real_name':real_name, 'id_str':truck['id_str']})
if __name__=='__main__':
import sys
try:
func = sys.argv[1]
except: func = None
if func:
try:
exec 'print %s' % func
except:
print "Error: incorrect syntax '%s'" % func
else: print "Please name your function"
|
normal
|
{
"blob_id": "f720eaf1ea96ccc70730e8ba1513e1a2bb95d29d",
"index": 4842,
"step-1": "import datetime\nimport time\nimport rfc822\nfrom django.conf import settings\nfrom urllib2 import Request, urlopen, URLError, HTTPError\nfrom urllib import urlencode\nimport re \nimport string\ntry:\n import django.utils.simplejson as json\nexcept:\n import json\nfrom django.core.cache import cache\n\nfrom tagging.models import Tag\n\nfrom foodtruck.models import *\nfrom foodtruck.tokens import *\n\nimport oauth2 as oauth\n\ndef fetch_json(url, service, list_key=None):\n fetched = urlopen(url).read()\n data = json.loads(fetched)\n if list_key:\n data = data[list_key]\n return data\n \ndef oauth_req(url, key, secret, http_method=\"GET\", post_body=None,http_headers=None):\n\tconsumer = oauth.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)\n\ttoken = oauth.Token(key=key, secret=secret)\n\tclient = oauth.Client(consumer, token)\n\tresp, content = client.request(\n\t\turl,\n\t\tmethod=http_method,\n\t\tbody=post_body,\n\t\theaders=http_headers,\n\t\tforce_auth_header=True\n\t)\n\treturn content\n\ndef get_all_tweets():\n from dateutil.parser import parse, tz\n url = LIST_URL\n HERE = tz.tzlocal()\n if cache.get('truck_tweets'):\n tweets = cache.get('truck_tweets')\n else:\n tweets = []\n all_tweets = oauth_req(url, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n data = json.loads(all_tweets)\n for t in data:\n m = dict(\n name = t['user']['screen_name'],\n pic_url = t['user']['profile_image_url'],\n text = t['text'],\n timestamp = parse(t['created_at']).astimezone(HERE),\n url = 'http://twitter.com/'+t['user']['screen_name']+'/statuses/'+str(t['id']),\n ) \n tweets += [m]\n cache.set('truck_tweets',tweets, 62)\n return tweets \n\n\ndef filter_trucks(hood):\n tweets = get_all_tweets() \n n = Hood.objects.get(id=hood)\n tags = n.tags.all()\n filtered = {'hood':n.name, 'tags':tags}\n filtered['tweets'] = []\n for t in tweets:\n for w in tags:\n if string.find(t['text'].lower(), w.name.lower()) > 0: \n filtered['tweets'] += [t]\n break\n cache.set((('filtered_%s' % hood)), filtered, 62)\n return filtered\n \n \ndef get_truck_names():\n p = open('truck.cursor','r')\n try: last_cursor = int(p.read())\n except: last_cursor=1353949495935930905 # this is just the last cursor number i looked up, to save on API calls -- can change.\n p.close()\n\n url = LIST_MEMBERS_URL\n get_truck_list = oauth_req(url, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)\n truck_list = json.loads(get_truck_list)\n all_trucks = truck_list['users']\n cursor = truck_list['next_cursor']\n f = open('truck.cursor','w')\n f.write(str(cursor))\n f.close\n\n while cursor > last_cursor:\n truck_url = LIST_MEMBERS_URL +'?cursor=' + str(cursor)\n get_truck_list = oauth_req(truck_url,OAUTH_TOKEN,OAUTH_TOKEN_SECRET)\n truck_list = json.loads(get_truck_list)\n all_trucks += truck_list['users']\n cursor = truck_list['next_cursor']\n for truck in all_trucks:\n description=truck['description'] or ''\n truck_url= truck['url'] or 'http://twitter.com/'+truck['screen_name']\n profile_icon= truck['profile_image_url'] or ''\n real_name=truck['name'] or truck['screen_name']\n t = Truck.objects.get_or_create(id_str__exact=truck['id_str'], defaults = {'name':truck['screen_name'], 'description':description, 'profile_icon':profile_icon, 'truck_url':truck_url, 'geo_enabled':truck['geo_enabled'], 'real_name':real_name, 'id_str':truck['id_str']})\n\n\nif __name__=='__main__':\n import sys\n try:\n func = sys.argv[1]\n except: func = None\n if func:\n try:\n exec 'print %s' % func\n except:\n print \"Error: incorrect syntax '%s'\" % func\n else: print \"Please name your function\"\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Generate some object patterns as save as JSON format
import json
import math
import random
from obstacle import *
def main(map):
obs = []
for x in range(1,35):
obs.append(Obstacle(random.randint(0,map.getHeight()), y=random.randint(0,map.getWidth()), radius=20).toJsonObject())
jsonOb={'map': {'obstacle': obs}}
print jsonOb
F = open('testDump.json', 'w')
json.dump(jsonOb, F, indent=4, separators=(',', ': '))
F.close()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "b849a2902c8596daa2c6da4de7b9d1c07b34d136",
"index": 7883,
"step-1": "# Generate some object patterns as save as JSON format\nimport json\nimport math\nimport random\nfrom obstacle import *\n\ndef main(map):\n\tobs = []\n\tfor x in range(1,35):\n\t\tobs.append(Obstacle(random.randint(0,map.getHeight()), y=random.randint(0,map.getWidth()), radius=20).toJsonObject())\n\n\tjsonOb={'map': {'obstacle': obs}}\n\t\n\tprint jsonOb\n\tF = open('testDump.json', 'w')\n\tjson.dump(jsonOb, F, indent=4, separators=(',', ': '))\n\tF.close()\n\nif __name__ == '__main__':\n\tmain()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# MÁSTER EN BIG DATA Y BUSINESS ANALYTICS
# MOD 1 - FINAL EVALUATION - EX. 2: dado un archivo que contiene en cada línea
# una palabra o conjunto de palabras seguido de un valor numérico denominado
# “sentimiento” y un conjunto de tweets, se pide calcular el sentimiento de
# aquellas palabras o conjunto de palabras que no tienen un valor asociado en el
# archivo de “sentimientos”. Se pueden seguir distintas estrategias para asignar
# un valor. Por ejemplo, se podría asignar como valor el valor del “sentimiento”
# del tweet en que se encuentra la palabra o conjunto de palabras sin valor, o
# el valor medio del “sentimiento” del tweet.
import json
import pandas as pd
# ---- FUNCTIONS ---------------------------------------------------------------
def get_tweets(filename):
""" Process a json formatted file with tweets using pandas read_json """
try:
tweets = []
pd_tweets = pd.read_json(filename, lines=True) # use parameter lines=True to read the file as a json object per line
pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']
tweets = pd_tweets.to_list()
return tweets
except:
print("Something went wrong parsing the file " + filename)
def get_sentiments(filename):
""" Process a file that contains in each line a word or
set of words followed by a numerical value, called "feeling
- returns a dictionary with pairs of words and sentiments
"""
valores = {}
for linea in open(filename, 'r'):
termino, valor = linea.split('\t')
valores[termino] = int(valor)
return valores
# ---- MAIN PROGRAM -------------------------------------------------------------------------------------------------
# ---- Filenames (including path)
file_tweet = 'Tweets.txt'
file_sentimientos = 'Sentimientos.txt'
# -- PROCESS TWEETS FILE WITH PANDAS READ_JSON
list_of_tweets = get_tweets(file_tweet)
# -- PROCESS SENTIMIENTOS FILE TO A DICITIONARY
valores = get_sentiments(file_sentimientos)
# -- PROCESS TWEETS SENTIMENT AND PRINT
for tweet in list_of_tweets:
tweet_sentimiento = 0
words_without_sent = []
number_of_words = 0
for word in tweet.split(" "):
tweet_sentimiento += valores.get(word.lower(),0)
number_of_words += 1
if valores.get(word.lower(),0)==0:
words_without_sent.append(word)
# asignar como valor el valor medio del “sentimiento” del tweet
for item in words_without_sent:
print(item + ': ' + str(tweet_sentimiento/number_of_words))
print("\n")
print("--- THE END ---")
|
normal
|
{
"blob_id": "acd2d84529e197d6f9d134e8d7e25a51a442f3ae",
"index": 8615,
"step-1": "<mask token>\n\n\ndef get_tweets(filename):\n \"\"\" Process a json formatted file with tweets using pandas read_json \"\"\"\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True)\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print('Something went wrong parsing the file ' + filename)\n\n\ndef get_sentiments(filename):\n \"\"\" Process a file that contains in each line a word or\n set of words followed by a numerical value, called \"feeling\n - returns a dictionary with pairs of words and sentiments\n \"\"\"\n valores = {}\n for linea in open(filename, 'r'):\n termino, valor = linea.split('\\t')\n valores[termino] = int(valor)\n return valores\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_tweets(filename):\n \"\"\" Process a json formatted file with tweets using pandas read_json \"\"\"\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True)\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print('Something went wrong parsing the file ' + filename)\n\n\ndef get_sentiments(filename):\n \"\"\" Process a file that contains in each line a word or\n set of words followed by a numerical value, called \"feeling\n - returns a dictionary with pairs of words and sentiments\n \"\"\"\n valores = {}\n for linea in open(filename, 'r'):\n termino, valor = linea.split('\\t')\n valores[termino] = int(valor)\n return valores\n\n\n<mask token>\nfor tweet in list_of_tweets:\n tweet_sentimiento = 0\n words_without_sent = []\n number_of_words = 0\n for word in tweet.split(' '):\n tweet_sentimiento += valores.get(word.lower(), 0)\n number_of_words += 1\n if valores.get(word.lower(), 0) == 0:\n words_without_sent.append(word)\n for item in words_without_sent:\n print(item + ': ' + str(tweet_sentimiento / number_of_words))\n print('\\n')\nprint('--- THE END ---')\n",
"step-3": "<mask token>\n\n\ndef get_tweets(filename):\n \"\"\" Process a json formatted file with tweets using pandas read_json \"\"\"\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True)\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print('Something went wrong parsing the file ' + filename)\n\n\ndef get_sentiments(filename):\n \"\"\" Process a file that contains in each line a word or\n set of words followed by a numerical value, called \"feeling\n - returns a dictionary with pairs of words and sentiments\n \"\"\"\n valores = {}\n for linea in open(filename, 'r'):\n termino, valor = linea.split('\\t')\n valores[termino] = int(valor)\n return valores\n\n\nfile_tweet = 'Tweets.txt'\nfile_sentimientos = 'Sentimientos.txt'\nlist_of_tweets = get_tweets(file_tweet)\nvalores = get_sentiments(file_sentimientos)\nfor tweet in list_of_tweets:\n tweet_sentimiento = 0\n words_without_sent = []\n number_of_words = 0\n for word in tweet.split(' '):\n tweet_sentimiento += valores.get(word.lower(), 0)\n number_of_words += 1\n if valores.get(word.lower(), 0) == 0:\n words_without_sent.append(word)\n for item in words_without_sent:\n print(item + ': ' + str(tweet_sentimiento / number_of_words))\n print('\\n')\nprint('--- THE END ---')\n",
"step-4": "import json\nimport pandas as pd\n\n\ndef get_tweets(filename):\n \"\"\" Process a json formatted file with tweets using pandas read_json \"\"\"\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True)\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print('Something went wrong parsing the file ' + filename)\n\n\ndef get_sentiments(filename):\n \"\"\" Process a file that contains in each line a word or\n set of words followed by a numerical value, called \"feeling\n - returns a dictionary with pairs of words and sentiments\n \"\"\"\n valores = {}\n for linea in open(filename, 'r'):\n termino, valor = linea.split('\\t')\n valores[termino] = int(valor)\n return valores\n\n\nfile_tweet = 'Tweets.txt'\nfile_sentimientos = 'Sentimientos.txt'\nlist_of_tweets = get_tweets(file_tweet)\nvalores = get_sentiments(file_sentimientos)\nfor tweet in list_of_tweets:\n tweet_sentimiento = 0\n words_without_sent = []\n number_of_words = 0\n for word in tweet.split(' '):\n tweet_sentimiento += valores.get(word.lower(), 0)\n number_of_words += 1\n if valores.get(word.lower(), 0) == 0:\n words_without_sent.append(word)\n for item in words_without_sent:\n print(item + ': ' + str(tweet_sentimiento / number_of_words))\n print('\\n')\nprint('--- THE END ---')\n",
"step-5": "# MÁSTER EN BIG DATA Y BUSINESS ANALYTICS\n# MOD 1 - FINAL EVALUATION - EX. 2: dado un archivo que contiene en cada línea\n# una palabra o conjunto de palabras seguido de un valor numérico denominado\n# “sentimiento” y un conjunto de tweets, se pide calcular el sentimiento de\n# aquellas palabras o conjunto de palabras que no tienen un valor asociado en el\n# archivo de “sentimientos”. Se pueden seguir distintas estrategias para asignar\n# un valor. Por ejemplo, se podría asignar como valor el valor del “sentimiento”\n# del tweet en que se encuentra la palabra o conjunto de palabras sin valor, o\n# el valor medio del “sentimiento” del tweet.\n\nimport json\nimport pandas as pd\n\n\n# ---- FUNCTIONS ---------------------------------------------------------------\ndef get_tweets(filename):\n \"\"\" Process a json formatted file with tweets using pandas read_json \"\"\"\n try:\n tweets = []\n pd_tweets = pd.read_json(filename, lines=True) # use parameter lines=True to read the file as a json object per line\n pd_tweets = pd_tweets[pd_tweets.text.notnull()]['text']\n tweets = pd_tweets.to_list()\n return tweets\n except:\n print(\"Something went wrong parsing the file \" + filename)\n\ndef get_sentiments(filename):\n \"\"\" Process a file that contains in each line a word or\n set of words followed by a numerical value, called \"feeling\n - returns a dictionary with pairs of words and sentiments\n \"\"\"\n valores = {}\n for linea in open(filename, 'r'):\n termino, valor = linea.split('\\t')\n valores[termino] = int(valor)\n return valores\n\n# ---- MAIN PROGRAM -------------------------------------------------------------------------------------------------\n\n# ---- Filenames (including path)\nfile_tweet = 'Tweets.txt'\nfile_sentimientos = 'Sentimientos.txt'\n\n\n# -- PROCESS TWEETS FILE WITH PANDAS READ_JSON\nlist_of_tweets = get_tweets(file_tweet)\n\n# -- PROCESS SENTIMIENTOS FILE TO A DICITIONARY\nvalores = get_sentiments(file_sentimientos)\n\n# -- PROCESS TWEETS SENTIMENT AND PRINT\nfor tweet in list_of_tweets:\n tweet_sentimiento = 0\n words_without_sent = []\n number_of_words = 0\n for word in tweet.split(\" \"):\n tweet_sentimiento += valores.get(word.lower(),0)\n number_of_words += 1\n if valores.get(word.lower(),0)==0:\n words_without_sent.append(word)\n\n # asignar como valor el valor medio del “sentimiento” del tweet\n for item in words_without_sent:\n print(item + ': ' + str(tweet_sentimiento/number_of_words))\n print(\"\\n\")\n\nprint(\"--- THE END ---\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
from typing import List, Optional, Sequence
import boto3
from google.cloud import storage
from ..globals import GLOBALS, LOGGER
def set_gcs_credentials():
if os.path.exists(GLOBALS.google_application_credentials):
return
secrets_client = boto3.client(
"secretsmanager",
region_name=GLOBALS.aws_region,
endpoint_url=GLOBALS.aws_endpoint_uri,
)
response = secrets_client.get_secret_value(SecretId=GLOBALS.gcs_key_secret_arn)
os.makedirs(
os.path.dirname(GLOBALS.google_application_credentials),
exist_ok=True,
)
with open(GLOBALS.google_application_credentials, "w") as f:
f.write(response["SecretString"])
def get_gs_files(
bucket: str,
prefix: str,
limit: Optional[int] = None,
exit_after_max: Optional[int] = None,
extensions: Sequence[str] = tuple(),
) -> List[str]:
"""Get all matching files in GCS.
Adapted from data API.
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
matches: List[str] = list()
num_matches: int = 0
blobs = list(storage_client.list_blobs(bucket, prefix=prefix, max_results=limit))
LOGGER.info(f"Found files under gs://{bucket}/{prefix}: {blobs}")
for blob in blobs:
if not extensions or any(blob.name.endswith(ext) for ext in extensions):
matches.append(blob.name)
num_matches += 1
if exit_after_max and num_matches >= exit_after_max:
break
return matches
def get_gs_subfolders(
bucket: str,
prefix: str,
) -> List[str]:
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
delimiter = "/"
if not prefix.endswith(delimiter):
prefix = prefix + delimiter
blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=delimiter)
try:
_ = next(blobs)
except StopIteration:
pass
found_prefixes = [
found_prefix.lstrip(prefix).strip("/") for found_prefix in blobs.prefixes
]
return found_prefixes
def get_gs_file_as_text(
bucket: str,
key: str,
) -> str:
"""
Get contents of a file as a string
"""
set_gcs_credentials()
storage_client = storage.Client.from_service_account_json(
GLOBALS.google_application_credentials
)
blob = storage_client.get_bucket(bucket).get_blob(key)
return blob.download_as_text(encoding="utf-8")
|
normal
|
{
"blob_id": "a5eeafef694db04770833a4063358e8f32f467b0",
"index": 8310,
"step-1": "<mask token>\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\ndef get_gs_subfolders(bucket: str, prefix: str) ->List[str]:\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n delimiter = '/'\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=\n delimiter)\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n found_prefixes = [found_prefix.lstrip(prefix).strip('/') for\n found_prefix in blobs.prefixes]\n return found_prefixes\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\ndef get_gs_subfolders(bucket: str, prefix: str) ->List[str]:\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n delimiter = '/'\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=\n delimiter)\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n found_prefixes = [found_prefix.lstrip(prefix).strip('/') for\n found_prefix in blobs.prefixes]\n return found_prefixes\n\n\ndef get_gs_file_as_text(bucket: str, key: str) ->str:\n \"\"\"\n Get contents of a file as a string\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n blob = storage_client.get_bucket(bucket).get_blob(key)\n return blob.download_as_text(encoding='utf-8')\n",
"step-4": "import os\nfrom typing import List, Optional, Sequence\nimport boto3\nfrom google.cloud import storage\nfrom ..globals import GLOBALS, LOGGER\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n secrets_client = boto3.client('secretsmanager', region_name=GLOBALS.\n aws_region, endpoint_url=GLOBALS.aws_endpoint_uri)\n response = secrets_client.get_secret_value(SecretId=GLOBALS.\n gcs_key_secret_arn)\n os.makedirs(os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True)\n with open(GLOBALS.google_application_credentials, 'w') as f:\n f.write(response['SecretString'])\n\n\ndef get_gs_files(bucket: str, prefix: str, limit: Optional[int]=None,\n exit_after_max: Optional[int]=None, extensions: Sequence[str]=tuple()\n ) ->List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n matches: List[str] = list()\n num_matches: int = 0\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix,\n max_results=limit))\n LOGGER.info(f'Found files under gs://{bucket}/{prefix}: {blobs}')\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions\n ):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n return matches\n\n\ndef get_gs_subfolders(bucket: str, prefix: str) ->List[str]:\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n delimiter = '/'\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=\n delimiter)\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n found_prefixes = [found_prefix.lstrip(prefix).strip('/') for\n found_prefix in blobs.prefixes]\n return found_prefixes\n\n\ndef get_gs_file_as_text(bucket: str, key: str) ->str:\n \"\"\"\n Get contents of a file as a string\n \"\"\"\n set_gcs_credentials()\n storage_client = storage.Client.from_service_account_json(GLOBALS.\n google_application_credentials)\n blob = storage_client.get_bucket(bucket).get_blob(key)\n return blob.download_as_text(encoding='utf-8')\n",
"step-5": "import os\nfrom typing import List, Optional, Sequence\n\nimport boto3\nfrom google.cloud import storage\n\nfrom ..globals import GLOBALS, LOGGER\n\n\ndef set_gcs_credentials():\n if os.path.exists(GLOBALS.google_application_credentials):\n return\n\n secrets_client = boto3.client(\n \"secretsmanager\",\n region_name=GLOBALS.aws_region,\n endpoint_url=GLOBALS.aws_endpoint_uri,\n )\n\n response = secrets_client.get_secret_value(SecretId=GLOBALS.gcs_key_secret_arn)\n\n os.makedirs(\n os.path.dirname(GLOBALS.google_application_credentials),\n exist_ok=True,\n )\n\n with open(GLOBALS.google_application_credentials, \"w\") as f:\n f.write(response[\"SecretString\"])\n\n\ndef get_gs_files(\n bucket: str,\n prefix: str,\n limit: Optional[int] = None,\n exit_after_max: Optional[int] = None,\n extensions: Sequence[str] = tuple(),\n) -> List[str]:\n \"\"\"Get all matching files in GCS.\n Adapted from data API.\n \"\"\"\n set_gcs_credentials()\n\n storage_client = storage.Client.from_service_account_json(\n GLOBALS.google_application_credentials\n )\n\n matches: List[str] = list()\n num_matches: int = 0\n\n blobs = list(storage_client.list_blobs(bucket, prefix=prefix, max_results=limit))\n\n LOGGER.info(f\"Found files under gs://{bucket}/{prefix}: {blobs}\")\n for blob in blobs:\n if not extensions or any(blob.name.endswith(ext) for ext in extensions):\n matches.append(blob.name)\n num_matches += 1\n if exit_after_max and num_matches >= exit_after_max:\n break\n\n return matches\n\n\ndef get_gs_subfolders(\n bucket: str,\n prefix: str,\n) -> List[str]:\n set_gcs_credentials()\n\n storage_client = storage.Client.from_service_account_json(\n GLOBALS.google_application_credentials\n )\n\n delimiter = \"/\"\n if not prefix.endswith(delimiter):\n prefix = prefix + delimiter\n\n blobs = storage_client.list_blobs(bucket, prefix=prefix, delimiter=delimiter)\n\n try:\n _ = next(blobs)\n except StopIteration:\n pass\n\n found_prefixes = [\n found_prefix.lstrip(prefix).strip(\"/\") for found_prefix in blobs.prefixes\n ]\n\n return found_prefixes\n\n\ndef get_gs_file_as_text(\n bucket: str,\n key: str,\n) -> str:\n \"\"\"\n Get contents of a file as a string\n \"\"\"\n set_gcs_credentials()\n\n storage_client = storage.Client.from_service_account_json(\n GLOBALS.google_application_credentials\n )\n\n blob = storage_client.get_bucket(bucket).get_blob(key)\n return blob.download_as_text(encoding=\"utf-8\")\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import math
def normal(data,mean,variance):
# print data-mean
return -1*(((data-mean)**2)/(2*variance)) - (0.5 * math.log(2*3.1415*variance))
a = math.log(0.33333) + normal(67.7854,6.0998,13.5408)
b = math.log(0.33333) + normal(67.7854,119.3287,9.4803)
c = math.log(0.33333) + normal(67.7854,65.7801,12.6203)
d = math.exp(a) + math.exp(b) + math.exp(c)
print math.exp(a)
print math.exp(b)
print math.exp(c)
print math.exp(a)/d
|
normal
|
{
"blob_id": "0edca9893d62eea6513543a1d3dd960e9e95d573",
"index": 7505,
"step-1": "import math\n\ndef normal(data,mean,variance):\n\t# print data-mean\n\treturn -1*(((data-mean)**2)/(2*variance)) - (0.5 * math.log(2*3.1415*variance))\n\na = math.log(0.33333) + normal(67.7854,6.0998,13.5408)\nb = math.log(0.33333) + normal(67.7854,119.3287,9.4803)\nc = math.log(0.33333) + normal(67.7854,65.7801,12.6203)\n\nd = math.exp(a) + math.exp(b) + math.exp(c)\n\nprint math.exp(a)\nprint math.exp(b)\nprint math.exp(c)\n\nprint math.exp(a)/d",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.db import models
ch=[
('Garment','Garment'),
('Hardgoods','Hardgoods'),
('Home Furnishing','Home Furnishing'),
]
class Factory(models.Model):
name = models.CharField(max_length=30,choices=ch)
def __str__(self):
return self.name
class Fabric(models.Model):
name = models.ForeignKey(Factory, on_delete=models.CASCADE,null=True,blank=True)
fabric = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.fabric
class Wash(models.Model):
name=models.ForeignKey(Fabric,on_delete=models.CASCADE,null=True,blank=True)
wash = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.wash
class Category(models.Model):
cat=models.ForeignKey(Factory,on_delete=models.CASCADE,blank=True)
name = models.ForeignKey(Wash, on_delete=models.CASCADE,null=True,blank=True)
category = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.category
class Subcategory(models.Model):
name = models.ForeignKey(Category, on_delete=models.CASCADE,null=True,blank=True)
subcategory = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.subcategory
class Department(models.Model):
name = models.ForeignKey(Subcategory, on_delete=models.CASCADE,null=True,blank=True)
department = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.department
class Sections(models.Model):
name = models.ForeignKey(Department, on_delete=models.CASCADE,null=True,blank=True)
section = models.CharField(unique=True,max_length=100,null=True,blank=True)
def __str__(self):
return self.section
class Subsection(models.Model):
name = models.ForeignKey(Sections, on_delete=models.CASCADE,null=True,blank=True)
subsection = models.CharField(unique=True,max_length=500,null=True,blank=True)
def __str__(self):
return self.subsection
class Person(models.Model):
name=models.CharField(max_length=30)
fact=models.ForeignKey(Factory,on_delete=models.CASCADE)
fab=models.ForeignKey(Fabric,on_delete=models.CASCADE,null=True)
was= models.ForeignKey(Wash, on_delete=models.CASCADE,null=True)
cate = models.ForeignKey(Category, on_delete=models.CASCADE)
subcat=models.ForeignKey(Subcategory,on_delete=models.CASCADE)
dept = models.ForeignKey(Department, on_delete=models.CASCADE,null=True)
sect=models.ForeignKey(Sections,on_delete=models.CASCADE,null=True)
subsect=models.ForeignKey(Subsection,on_delete=models.CASCADE,null=True)
def __str__(self):
return str(self.name)
|
normal
|
{
"blob_id": "a0dcfb738451c11ed4ff1428629c3f7bbf5c52c9",
"index": 5649,
"step-1": "<mask token>\n\n\nclass Category(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.category\n\n\nclass Subcategory(models.Model):\n name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,\n blank=True)\n subcategory = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.subcategory\n\n\nclass Department(models.Model):\n name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=\n True, blank=True)\n department = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.department\n\n\nclass Sections(models.Model):\n name = models.ForeignKey(Department, on_delete=models.CASCADE, null=\n True, blank=True)\n section = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.section\n\n\nclass Subsection(models.Model):\n name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,\n blank=True)\n subsection = models.CharField(unique=True, max_length=500, null=True,\n blank=True)\n\n def __str__(self):\n return self.subsection\n\n\nclass Person(models.Model):\n name = models.CharField(max_length=30)\n fact = models.ForeignKey(Factory, on_delete=models.CASCADE)\n fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)\n was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)\n cate = models.ForeignKey(Category, on_delete=models.CASCADE)\n subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)\n dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)\n sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)\n subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True\n )\n\n def __str__(self):\n return str(self.name)\n",
"step-2": "<mask token>\n\n\nclass Wash(models.Model):\n <mask token>\n <mask token>\n\n def __str__(self):\n return self.wash\n\n\nclass Category(models.Model):\n cat = models.ForeignKey(Factory, on_delete=models.CASCADE, blank=True)\n name = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True,\n blank=True)\n category = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.category\n\n\nclass Subcategory(models.Model):\n name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,\n blank=True)\n subcategory = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.subcategory\n\n\nclass Department(models.Model):\n name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=\n True, blank=True)\n department = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.department\n\n\nclass Sections(models.Model):\n name = models.ForeignKey(Department, on_delete=models.CASCADE, null=\n True, blank=True)\n section = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.section\n\n\nclass Subsection(models.Model):\n name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,\n blank=True)\n subsection = models.CharField(unique=True, max_length=500, null=True,\n blank=True)\n\n def __str__(self):\n return self.subsection\n\n\nclass Person(models.Model):\n name = models.CharField(max_length=30)\n fact = models.ForeignKey(Factory, on_delete=models.CASCADE)\n fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)\n was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)\n cate = models.ForeignKey(Category, on_delete=models.CASCADE)\n subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)\n dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)\n sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)\n subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True\n )\n\n def __str__(self):\n return str(self.name)\n",
"step-3": "<mask token>\n\n\nclass Fabric(models.Model):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Wash(models.Model):\n name = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True,\n blank=True)\n wash = models.CharField(unique=True, max_length=100, null=True, blank=True)\n\n def __str__(self):\n return self.wash\n\n\nclass Category(models.Model):\n cat = models.ForeignKey(Factory, on_delete=models.CASCADE, blank=True)\n name = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True,\n blank=True)\n category = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.category\n\n\nclass Subcategory(models.Model):\n name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,\n blank=True)\n subcategory = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.subcategory\n\n\nclass Department(models.Model):\n name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=\n True, blank=True)\n department = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.department\n\n\nclass Sections(models.Model):\n name = models.ForeignKey(Department, on_delete=models.CASCADE, null=\n True, blank=True)\n section = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.section\n\n\nclass Subsection(models.Model):\n name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,\n blank=True)\n subsection = models.CharField(unique=True, max_length=500, null=True,\n blank=True)\n\n def __str__(self):\n return self.subsection\n\n\nclass Person(models.Model):\n name = models.CharField(max_length=30)\n fact = models.ForeignKey(Factory, on_delete=models.CASCADE)\n fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)\n was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)\n cate = models.ForeignKey(Category, on_delete=models.CASCADE)\n subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)\n dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)\n sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)\n subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True\n )\n\n def __str__(self):\n return str(self.name)\n",
"step-4": "<mask token>\n\n\nclass Fabric(models.Model):\n name = models.ForeignKey(Factory, on_delete=models.CASCADE, null=True,\n blank=True)\n fabric = models.CharField(unique=True, max_length=100, null=True, blank\n =True)\n\n def __str__(self):\n return self.fabric\n\n\nclass Wash(models.Model):\n name = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True,\n blank=True)\n wash = models.CharField(unique=True, max_length=100, null=True, blank=True)\n\n def __str__(self):\n return self.wash\n\n\nclass Category(models.Model):\n cat = models.ForeignKey(Factory, on_delete=models.CASCADE, blank=True)\n name = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True,\n blank=True)\n category = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.category\n\n\nclass Subcategory(models.Model):\n name = models.ForeignKey(Category, on_delete=models.CASCADE, null=True,\n blank=True)\n subcategory = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.subcategory\n\n\nclass Department(models.Model):\n name = models.ForeignKey(Subcategory, on_delete=models.CASCADE, null=\n True, blank=True)\n department = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.department\n\n\nclass Sections(models.Model):\n name = models.ForeignKey(Department, on_delete=models.CASCADE, null=\n True, blank=True)\n section = models.CharField(unique=True, max_length=100, null=True,\n blank=True)\n\n def __str__(self):\n return self.section\n\n\nclass Subsection(models.Model):\n name = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True,\n blank=True)\n subsection = models.CharField(unique=True, max_length=500, null=True,\n blank=True)\n\n def __str__(self):\n return self.subsection\n\n\nclass Person(models.Model):\n name = models.CharField(max_length=30)\n fact = models.ForeignKey(Factory, on_delete=models.CASCADE)\n fab = models.ForeignKey(Fabric, on_delete=models.CASCADE, null=True)\n was = models.ForeignKey(Wash, on_delete=models.CASCADE, null=True)\n cate = models.ForeignKey(Category, on_delete=models.CASCADE)\n subcat = models.ForeignKey(Subcategory, on_delete=models.CASCADE)\n dept = models.ForeignKey(Department, on_delete=models.CASCADE, null=True)\n sect = models.ForeignKey(Sections, on_delete=models.CASCADE, null=True)\n subsect = models.ForeignKey(Subsection, on_delete=models.CASCADE, null=True\n )\n\n def __str__(self):\n return str(self.name)\n",
"step-5": "from django.db import models\r\n\r\n\r\nch=[\r\n ('Garment','Garment'),\r\n ('Hardgoods','Hardgoods'),\r\n ('Home Furnishing','Home Furnishing'),\r\n]\r\nclass Factory(models.Model):\r\n name = models.CharField(max_length=30,choices=ch)\r\n\r\n def __str__(self):\r\n return self.name\r\n\r\nclass Fabric(models.Model):\r\n name = models.ForeignKey(Factory, on_delete=models.CASCADE,null=True,blank=True)\r\n fabric = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.fabric\r\n\r\nclass Wash(models.Model):\r\n name=models.ForeignKey(Fabric,on_delete=models.CASCADE,null=True,blank=True)\r\n wash = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n\r\n def __str__(self):\r\n return self.wash\r\n\r\nclass Category(models.Model):\r\n cat=models.ForeignKey(Factory,on_delete=models.CASCADE,blank=True)\r\n name = models.ForeignKey(Wash, on_delete=models.CASCADE,null=True,blank=True)\r\n category = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.category\r\n\r\nclass Subcategory(models.Model):\r\n name = models.ForeignKey(Category, on_delete=models.CASCADE,null=True,blank=True)\r\n subcategory = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.subcategory\r\n\r\nclass Department(models.Model):\r\n name = models.ForeignKey(Subcategory, on_delete=models.CASCADE,null=True,blank=True)\r\n department = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.department\r\n\r\nclass Sections(models.Model):\r\n name = models.ForeignKey(Department, on_delete=models.CASCADE,null=True,blank=True)\r\n section = models.CharField(unique=True,max_length=100,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.section\r\n\r\nclass Subsection(models.Model):\r\n name = models.ForeignKey(Sections, on_delete=models.CASCADE,null=True,blank=True)\r\n subsection = models.CharField(unique=True,max_length=500,null=True,blank=True)\r\n\r\n def __str__(self):\r\n return self.subsection\r\n\r\nclass Person(models.Model):\r\n name=models.CharField(max_length=30)\r\n fact=models.ForeignKey(Factory,on_delete=models.CASCADE)\r\n fab=models.ForeignKey(Fabric,on_delete=models.CASCADE,null=True)\r\n was= models.ForeignKey(Wash, on_delete=models.CASCADE,null=True)\r\n cate = models.ForeignKey(Category, on_delete=models.CASCADE)\r\n subcat=models.ForeignKey(Subcategory,on_delete=models.CASCADE)\r\n dept = models.ForeignKey(Department, on_delete=models.CASCADE,null=True)\r\n sect=models.ForeignKey(Sections,on_delete=models.CASCADE,null=True)\r\n subsect=models.ForeignKey(Subsection,on_delete=models.CASCADE,null=True)\r\n\r\n def __str__(self):\r\n return str(self.name)\r\n",
"step-ids": [
17,
20,
22,
24,
30
]
}
|
[
17,
20,
22,
24,
30
] |
#from getData import getRatings
import numpy as np
num_factors = 10
num_iter = 75
regularization = 0.05
lr = 0.005
folds=5
#to make sure you are able to repeat results, set the random seed to something:
np.random.seed(17)
def split_matrix(ratings, num_users, num_movies):
#Convert data into (IxJ) matrix
X= np.zeros((num_users, num_movies))
for r in np.arange(len(ratings)):
X[ratings[r,0]-1,ratings[r,1]-1] = ratings[r,2]
#print(X.shape)
return X
def mf_gd(ratings, num_users, num_movies):
X_data= split_matrix(ratings, num_users, num_movies)
X_hat = np.zeros(num_users, num_movies) #predicted rating matrix
err = np.zeros(num_users, num_movies) #error values
# Randomly initialize weights in U and M
U = np.random.rand(num_users, num_factors)
M = np.random.rand(num_factors, num_movies)
U_prime = U
M_prime = M
for nr in np.arange(num_iter):
for i in np.arange(len(ratings)):
userID = ratings[i,0]-1
movieID = ratings[i,1]-1
actual = ratings[i,2]
prediction = np.sum(U[userID,:]*M[:,movieID]) #SVD
error = actual - prediction #compute e(i,j)
#update U and M using following equations:
#Uprime(i,k) = u(i,k) + lr(2e*m(k,j)-lamda.u(i,k))
#Mprime(k,j) = m(k,j) + lr(2e*u(i,k)-lamda.m(k,j))
for k in np.arange(num_factors):
U_prime[userID,k] = U[userID,k]+ lr * (2*error*M[k,movieID] - regularization * U[userID,k])
M_prime[k,movieID] = M[k,movieID] + lr * (2*error*U[userID,k] - regularization * M[k,movieID])
U = U_prime
M = M_prime
#Intermediate RMSE
X_hat = np.dot(U,M)
err = X_data-X_hat
e = err[np.where(np.isnan(err)==False)]
ir = np.sqrt(np.mean(e**2))
print ("Error for iteration #", nr, ":", ir)
#Return the result
X_hat = np.dot(U,M)
return X_hat
def mf():
#Read dataset
#ratings = getRatings()
ratings = np.genfromtxt("D:/Leiden/Semester 1_Sept/Assignment1/AiDM/ml-1m/ratings.dat", usecols=(0,1,2), delimiter='::',dtype='int')
#number of users and movies in data.
num_users= np.max(ratings[:,0])
num_movies= np.max(ratings[:,1])
print(num_users, num_movies)
print(len(ratings))
#5-fold cross validation
for f in np.arange(folds):
print ("Fold #", f)
#shuffle data for train and test
np.random.shuffle(ratings)
train_set = np.array([ratings[x] for x in np.arange(len(ratings)) if (x%folds) !=f])
test_set = np.array([ratings[x] for x in np.arange(len(ratings)) if (x%folds) == f])
#Matrix fact
X_hat = mf_gd(train_set, num_users, num_movies)
X_train = split_matrix(train_set, num_users, num_movies)
X_test = split_matrix(test_set, num_users, num_movies)
err_train = X_train- X_hat
err_test = X_test - X_hat
#RMSE
e_mf = err_train[np.where(np.isnan(err_train)==False)]
error_train_mf = np.sqrt(np.mean(e_mf**2))
e2_mf = err_test[np.where(np.isnan(err_test)==False)]
error_test_mf = np.sqrt(np.mean(e2_mf**2))
print ('Matrix Factorization Error -> training set: ', error_train_mf)
print ('Matrix Factorization Error -> test set: ', error_test_mf)
mf()
#Still getting a high error rate, not comparable to the website mentioned in the assignment doc.
# I need to check the logic again.
#https://medium.com/coinmonks/recommendation-engine-python-401c080c583e; followed this blogpost
|
normal
|
{
"blob_id": "b4267612e7939b635542099e1ba31e661720607a",
"index": 3129,
"step-1": "<mask token>\n\n\ndef split_matrix(ratings, num_users, num_movies):\n X = np.zeros((num_users, num_movies))\n for r in np.arange(len(ratings)):\n X[ratings[r, 0] - 1, ratings[r, 1] - 1] = ratings[r, 2]\n return X\n\n\ndef mf_gd(ratings, num_users, num_movies):\n X_data = split_matrix(ratings, num_users, num_movies)\n X_hat = np.zeros(num_users, num_movies)\n err = np.zeros(num_users, num_movies)\n U = np.random.rand(num_users, num_factors)\n M = np.random.rand(num_factors, num_movies)\n U_prime = U\n M_prime = M\n for nr in np.arange(num_iter):\n for i in np.arange(len(ratings)):\n userID = ratings[i, 0] - 1\n movieID = ratings[i, 1] - 1\n actual = ratings[i, 2]\n prediction = np.sum(U[userID, :] * M[:, movieID])\n error = actual - prediction\n for k in np.arange(num_factors):\n U_prime[userID, k] = U[userID, k] + lr * (2 * error * M[k,\n movieID] - regularization * U[userID, k])\n M_prime[k, movieID] = M[k, movieID] + lr * (2 * error * U[\n userID, k] - regularization * M[k, movieID])\n U = U_prime\n M = M_prime\n X_hat = np.dot(U, M)\n err = X_data - X_hat\n e = err[np.where(np.isnan(err) == False)]\n ir = np.sqrt(np.mean(e ** 2))\n print('Error for iteration #', nr, ':', ir)\n X_hat = np.dot(U, M)\n return X_hat\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef split_matrix(ratings, num_users, num_movies):\n X = np.zeros((num_users, num_movies))\n for r in np.arange(len(ratings)):\n X[ratings[r, 0] - 1, ratings[r, 1] - 1] = ratings[r, 2]\n return X\n\n\ndef mf_gd(ratings, num_users, num_movies):\n X_data = split_matrix(ratings, num_users, num_movies)\n X_hat = np.zeros(num_users, num_movies)\n err = np.zeros(num_users, num_movies)\n U = np.random.rand(num_users, num_factors)\n M = np.random.rand(num_factors, num_movies)\n U_prime = U\n M_prime = M\n for nr in np.arange(num_iter):\n for i in np.arange(len(ratings)):\n userID = ratings[i, 0] - 1\n movieID = ratings[i, 1] - 1\n actual = ratings[i, 2]\n prediction = np.sum(U[userID, :] * M[:, movieID])\n error = actual - prediction\n for k in np.arange(num_factors):\n U_prime[userID, k] = U[userID, k] + lr * (2 * error * M[k,\n movieID] - regularization * U[userID, k])\n M_prime[k, movieID] = M[k, movieID] + lr * (2 * error * U[\n userID, k] - regularization * M[k, movieID])\n U = U_prime\n M = M_prime\n X_hat = np.dot(U, M)\n err = X_data - X_hat\n e = err[np.where(np.isnan(err) == False)]\n ir = np.sqrt(np.mean(e ** 2))\n print('Error for iteration #', nr, ':', ir)\n X_hat = np.dot(U, M)\n return X_hat\n\n\ndef mf():\n ratings = np.genfromtxt(\n 'D:/Leiden/Semester 1_Sept/Assignment1/AiDM/ml-1m/ratings.dat',\n usecols=(0, 1, 2), delimiter='::', dtype='int')\n num_users = np.max(ratings[:, 0])\n num_movies = np.max(ratings[:, 1])\n print(num_users, num_movies)\n print(len(ratings))\n for f in np.arange(folds):\n print('Fold #', f)\n np.random.shuffle(ratings)\n train_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds != f])\n test_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds == f])\n X_hat = mf_gd(train_set, num_users, num_movies)\n X_train = split_matrix(train_set, num_users, num_movies)\n X_test = split_matrix(test_set, num_users, num_movies)\n err_train = X_train - X_hat\n err_test = X_test - X_hat\n e_mf = err_train[np.where(np.isnan(err_train) == False)]\n error_train_mf = np.sqrt(np.mean(e_mf ** 2))\n e2_mf = err_test[np.where(np.isnan(err_test) == False)]\n error_test_mf = np.sqrt(np.mean(e2_mf ** 2))\n print('Matrix Factorization Error -> training set: ', error_train_mf)\n print('Matrix Factorization Error -> test set: ', error_test_mf)\n\n\n<mask token>\n",
"step-3": "<mask token>\nnp.random.seed(17)\n\n\ndef split_matrix(ratings, num_users, num_movies):\n X = np.zeros((num_users, num_movies))\n for r in np.arange(len(ratings)):\n X[ratings[r, 0] - 1, ratings[r, 1] - 1] = ratings[r, 2]\n return X\n\n\ndef mf_gd(ratings, num_users, num_movies):\n X_data = split_matrix(ratings, num_users, num_movies)\n X_hat = np.zeros(num_users, num_movies)\n err = np.zeros(num_users, num_movies)\n U = np.random.rand(num_users, num_factors)\n M = np.random.rand(num_factors, num_movies)\n U_prime = U\n M_prime = M\n for nr in np.arange(num_iter):\n for i in np.arange(len(ratings)):\n userID = ratings[i, 0] - 1\n movieID = ratings[i, 1] - 1\n actual = ratings[i, 2]\n prediction = np.sum(U[userID, :] * M[:, movieID])\n error = actual - prediction\n for k in np.arange(num_factors):\n U_prime[userID, k] = U[userID, k] + lr * (2 * error * M[k,\n movieID] - regularization * U[userID, k])\n M_prime[k, movieID] = M[k, movieID] + lr * (2 * error * U[\n userID, k] - regularization * M[k, movieID])\n U = U_prime\n M = M_prime\n X_hat = np.dot(U, M)\n err = X_data - X_hat\n e = err[np.where(np.isnan(err) == False)]\n ir = np.sqrt(np.mean(e ** 2))\n print('Error for iteration #', nr, ':', ir)\n X_hat = np.dot(U, M)\n return X_hat\n\n\ndef mf():\n ratings = np.genfromtxt(\n 'D:/Leiden/Semester 1_Sept/Assignment1/AiDM/ml-1m/ratings.dat',\n usecols=(0, 1, 2), delimiter='::', dtype='int')\n num_users = np.max(ratings[:, 0])\n num_movies = np.max(ratings[:, 1])\n print(num_users, num_movies)\n print(len(ratings))\n for f in np.arange(folds):\n print('Fold #', f)\n np.random.shuffle(ratings)\n train_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds != f])\n test_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds == f])\n X_hat = mf_gd(train_set, num_users, num_movies)\n X_train = split_matrix(train_set, num_users, num_movies)\n X_test = split_matrix(test_set, num_users, num_movies)\n err_train = X_train - X_hat\n err_test = X_test - X_hat\n e_mf = err_train[np.where(np.isnan(err_train) == False)]\n error_train_mf = np.sqrt(np.mean(e_mf ** 2))\n e2_mf = err_test[np.where(np.isnan(err_test) == False)]\n error_test_mf = np.sqrt(np.mean(e2_mf ** 2))\n print('Matrix Factorization Error -> training set: ', error_train_mf)\n print('Matrix Factorization Error -> test set: ', error_test_mf)\n\n\nmf()\n",
"step-4": "<mask token>\nnum_factors = 10\nnum_iter = 75\nregularization = 0.05\nlr = 0.005\nfolds = 5\nnp.random.seed(17)\n\n\ndef split_matrix(ratings, num_users, num_movies):\n X = np.zeros((num_users, num_movies))\n for r in np.arange(len(ratings)):\n X[ratings[r, 0] - 1, ratings[r, 1] - 1] = ratings[r, 2]\n return X\n\n\ndef mf_gd(ratings, num_users, num_movies):\n X_data = split_matrix(ratings, num_users, num_movies)\n X_hat = np.zeros(num_users, num_movies)\n err = np.zeros(num_users, num_movies)\n U = np.random.rand(num_users, num_factors)\n M = np.random.rand(num_factors, num_movies)\n U_prime = U\n M_prime = M\n for nr in np.arange(num_iter):\n for i in np.arange(len(ratings)):\n userID = ratings[i, 0] - 1\n movieID = ratings[i, 1] - 1\n actual = ratings[i, 2]\n prediction = np.sum(U[userID, :] * M[:, movieID])\n error = actual - prediction\n for k in np.arange(num_factors):\n U_prime[userID, k] = U[userID, k] + lr * (2 * error * M[k,\n movieID] - regularization * U[userID, k])\n M_prime[k, movieID] = M[k, movieID] + lr * (2 * error * U[\n userID, k] - regularization * M[k, movieID])\n U = U_prime\n M = M_prime\n X_hat = np.dot(U, M)\n err = X_data - X_hat\n e = err[np.where(np.isnan(err) == False)]\n ir = np.sqrt(np.mean(e ** 2))\n print('Error for iteration #', nr, ':', ir)\n X_hat = np.dot(U, M)\n return X_hat\n\n\ndef mf():\n ratings = np.genfromtxt(\n 'D:/Leiden/Semester 1_Sept/Assignment1/AiDM/ml-1m/ratings.dat',\n usecols=(0, 1, 2), delimiter='::', dtype='int')\n num_users = np.max(ratings[:, 0])\n num_movies = np.max(ratings[:, 1])\n print(num_users, num_movies)\n print(len(ratings))\n for f in np.arange(folds):\n print('Fold #', f)\n np.random.shuffle(ratings)\n train_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds != f])\n test_set = np.array([ratings[x] for x in np.arange(len(ratings)) if\n x % folds == f])\n X_hat = mf_gd(train_set, num_users, num_movies)\n X_train = split_matrix(train_set, num_users, num_movies)\n X_test = split_matrix(test_set, num_users, num_movies)\n err_train = X_train - X_hat\n err_test = X_test - X_hat\n e_mf = err_train[np.where(np.isnan(err_train) == False)]\n error_train_mf = np.sqrt(np.mean(e_mf ** 2))\n e2_mf = err_test[np.where(np.isnan(err_test) == False)]\n error_test_mf = np.sqrt(np.mean(e2_mf ** 2))\n print('Matrix Factorization Error -> training set: ', error_train_mf)\n print('Matrix Factorization Error -> test set: ', error_test_mf)\n\n\nmf()\n",
"step-5": "#from getData import getRatings\r\nimport numpy as np \r\n\r\n\r\nnum_factors = 10\r\nnum_iter = 75\r\nregularization = 0.05\r\nlr = 0.005\r\nfolds=5\r\n\r\n#to make sure you are able to repeat results, set the random seed to something:\r\nnp.random.seed(17)\r\n\r\n\r\ndef split_matrix(ratings, num_users, num_movies):\r\n #Convert data into (IxJ) matrix\r\n X= np.zeros((num_users, num_movies))\r\n for r in np.arange(len(ratings)):\r\n X[ratings[r,0]-1,ratings[r,1]-1] = ratings[r,2]\r\n\r\n #print(X.shape)\r\n return X\r\n\r\n\r\ndef mf_gd(ratings, num_users, num_movies):\r\n X_data= split_matrix(ratings, num_users, num_movies)\r\n\r\n X_hat = np.zeros(num_users, num_movies) #predicted rating matrix\r\n err = np.zeros(num_users, num_movies) #error values\r\n\r\n # Randomly initialize weights in U and M \r\n U = np.random.rand(num_users, num_factors)\r\n M = np.random.rand(num_factors, num_movies)\r\n U_prime = U\r\n M_prime = M\r\n\r\n for nr in np.arange(num_iter):\r\n for i in np.arange(len(ratings)):\r\n userID = ratings[i,0]-1\r\n movieID = ratings[i,1]-1\r\n actual = ratings[i,2]\r\n prediction = np.sum(U[userID,:]*M[:,movieID]) #SVD\r\n error = actual - prediction #compute e(i,j)\r\n\r\n \r\n #update U and M using following equations:\r\n #Uprime(i,k) = u(i,k) + lr(2e*m(k,j)-lamda.u(i,k))\r\n #Mprime(k,j) = m(k,j) + lr(2e*u(i,k)-lamda.m(k,j))\r\n for k in np.arange(num_factors):\r\n U_prime[userID,k] = U[userID,k]+ lr * (2*error*M[k,movieID] - regularization * U[userID,k])\r\n M_prime[k,movieID] = M[k,movieID] + lr * (2*error*U[userID,k] - regularization * M[k,movieID])\r\n\r\n U = U_prime\r\n M = M_prime\r\n\r\n #Intermediate RMSE\r\n X_hat = np.dot(U,M)\r\n err = X_data-X_hat\r\n e = err[np.where(np.isnan(err)==False)]\r\n ir = np.sqrt(np.mean(e**2))\r\n\r\n print (\"Error for iteration #\", nr, \":\", ir)\r\n\r\n \r\n #Return the result \r\n X_hat = np.dot(U,M)\r\n return X_hat\r\n\r\n\r\ndef mf():\r\n #Read dataset \r\n #ratings = getRatings()\r\n ratings = np.genfromtxt(\"D:/Leiden/Semester 1_Sept/Assignment1/AiDM/ml-1m/ratings.dat\", usecols=(0,1,2), delimiter='::',dtype='int')\r\n\r\n #number of users and movies in data. \r\n num_users= np.max(ratings[:,0])\r\n num_movies= np.max(ratings[:,1])\r\n\r\n print(num_users, num_movies)\r\n print(len(ratings))\r\n \r\n #5-fold cross validation\r\n for f in np.arange(folds):\r\n print (\"Fold #\", f)\r\n\r\n #shuffle data for train and test\r\n np.random.shuffle(ratings)\r\n train_set = np.array([ratings[x] for x in np.arange(len(ratings)) if (x%folds) !=f])\r\n test_set = np.array([ratings[x] for x in np.arange(len(ratings)) if (x%folds) == f])\r\n\r\n \r\n #Matrix fact\r\n X_hat = mf_gd(train_set, num_users, num_movies)\r\n X_train = split_matrix(train_set, num_users, num_movies)\r\n X_test = split_matrix(test_set, num_users, num_movies)\r\n\r\n err_train = X_train- X_hat\r\n err_test = X_test - X_hat\r\n\r\n #RMSE\r\n e_mf = err_train[np.where(np.isnan(err_train)==False)]\r\n error_train_mf = np.sqrt(np.mean(e_mf**2))\r\n\r\n e2_mf = err_test[np.where(np.isnan(err_test)==False)]\r\n error_test_mf = np.sqrt(np.mean(e2_mf**2))\r\n \r\n\r\n print ('Matrix Factorization Error -> training set: ', error_train_mf)\r\n print ('Matrix Factorization Error -> test set: ', error_test_mf)\r\n\r\nmf()\r\n\r\n#Still getting a high error rate, not comparable to the website mentioned in the assignment doc. \r\n# I need to check the logic again. \r\n#https://medium.com/coinmonks/recommendation-engine-python-401c080c583e; followed this blogpost ",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
# coding: utf-8
"""
MailSlurp API
MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501
The version of the OpenAPI document: 6.5.2
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from mailslurp_client.api_client import ApiClient
from mailslurp_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class FormControllerApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def submit_form(self, **kwargs): # noqa: E501
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `[email protected]` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"[email protected]\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/[email protected]\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: str
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.submit_form_with_http_info(**kwargs) # noqa: E501
def submit_form_with_http_info(self, **kwargs): # noqa: E501
"""Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501
This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `[email protected]` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"[email protected]\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/[email protected]\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.submit_form_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str to: The email address that submitted form should be sent to.
:param str subject: Optional subject of the email that will be sent.
:param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.
:param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.
:param str success_message: Optional success message to display if no _redirectTo present.
:param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.
:param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(str, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'to',
'subject',
'redirect_to',
'email_address',
'success_message',
'spam_check',
'other_parameters'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method submit_form" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'to' in local_var_params and local_var_params['to'] is not None: # noqa: E501
query_params.append(('_to', local_var_params['to'])) # noqa: E501
if 'subject' in local_var_params and local_var_params['subject'] is not None: # noqa: E501
query_params.append(('_subject', local_var_params['subject'])) # noqa: E501
if 'redirect_to' in local_var_params and local_var_params['redirect_to'] is not None: # noqa: E501
query_params.append(('_redirectTo', local_var_params['redirect_to'])) # noqa: E501
if 'email_address' in local_var_params and local_var_params['email_address'] is not None: # noqa: E501
query_params.append(('_emailAddress', local_var_params['email_address'])) # noqa: E501
if 'success_message' in local_var_params and local_var_params['success_message'] is not None: # noqa: E501
query_params.append(('_successMessage', local_var_params['success_message'])) # noqa: E501
if 'spam_check' in local_var_params and local_var_params['spam_check'] is not None: # noqa: E501
query_params.append(('_spamCheck', local_var_params['spam_check'])) # noqa: E501
if 'other_parameters' in local_var_params and local_var_params['other_parameters'] is not None: # noqa: E501
query_params.append(('otherParameters', local_var_params['other_parameters'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['API_KEY'] # noqa: E501
return self.api_client.call_api(
'/forms', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='str', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
normal
|
{
"blob_id": "a4ccf373695b7df60039bc8f6440a6ad43d265c1",
"index": 3750,
"step-1": "<mask token>\n\n\nclass FormControllerApi(object):\n <mask token>\n <mask token>\n <mask token>\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `[email protected]` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"[email protected]\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/[email protected]\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-2": "<mask token>\n\n\nclass FormControllerApi(object):\n <mask token>\n <mask token>\n\n def submit_form(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `[email protected]` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"[email protected]\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/[email protected]\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs)\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `[email protected]` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"[email protected]\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/[email protected]\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-3": "<mask token>\n\n\nclass FormControllerApi(object):\n <mask token>\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def submit_form(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `[email protected]` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"[email protected]\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/[email protected]\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs)\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `[email protected]` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"[email protected]\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/[email protected]\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-4": "<mask token>\n\n\nclass FormControllerApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def submit_form(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `[email protected]` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"[email protected]\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/[email protected]\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs)\n\n def submit_form_with_http_info(self, **kwargs):\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `[email protected]` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\"https://python.api.mailslurp.com/forms\" method=\"post\" > <input name=\"_to\" type=\"hidden\" value=\"[email protected]\"/> <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` #### URL Example ```html <form action=\"https://python.api.mailslurp.com/[email protected]\" method=\"post\" > <textarea name=\"feedback\"></textarea> <button type=\"submit\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\"multipart/form-data\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n local_var_params = locals()\n all_params = ['to', 'subject', 'redirect_to', 'email_address',\n 'success_message', 'spam_check', 'other_parameters']\n all_params.extend(['async_req', '_return_http_data_only',\n '_preload_content', '_request_timeout'])\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s' to method submit_form\"\n % key)\n local_var_params[key] = val\n del local_var_params['kwargs']\n collection_formats = {}\n path_params = {}\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None:\n query_params.append(('_to', local_var_params['to']))\n if 'subject' in local_var_params and local_var_params['subject'\n ] is not None:\n query_params.append(('_subject', local_var_params['subject']))\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'\n ] is not None:\n query_params.append(('_redirectTo', local_var_params[\n 'redirect_to']))\n if 'email_address' in local_var_params and local_var_params[\n 'email_address'] is not None:\n query_params.append(('_emailAddress', local_var_params[\n 'email_address']))\n if 'success_message' in local_var_params and local_var_params[\n 'success_message'] is not None:\n query_params.append(('_successMessage', local_var_params[\n 'success_message']))\n if 'spam_check' in local_var_params and local_var_params['spam_check'\n ] is not None:\n query_params.append(('_spamCheck', local_var_params['spam_check']))\n if 'other_parameters' in local_var_params and local_var_params[\n 'other_parameters'] is not None:\n query_params.append(('otherParameters', local_var_params[\n 'other_parameters']))\n header_params = {}\n form_params = []\n local_var_files = {}\n body_params = None\n header_params['Accept'] = self.api_client.select_header_accept(['*/*'])\n auth_settings = ['API_KEY']\n return self.api_client.call_api('/forms', 'POST', path_params,\n query_params, header_params, body=body_params, post_params=\n form_params, files=local_var_files, response_type='str',\n auth_settings=auth_settings, async_req=local_var_params.get(\n 'async_req'), _return_http_data_only=local_var_params.get(\n '_return_http_data_only'), _preload_content=local_var_params.\n get('_preload_content', True), _request_timeout=\n local_var_params.get('_request_timeout'), collection_formats=\n collection_formats)\n",
"step-5": "# coding: utf-8\n\n\"\"\"\n MailSlurp API\n\n MailSlurp is an API for sending and receiving emails from dynamically allocated email addresses. It's designed for developers and QA teams to test applications, process inbound emails, send templated notifications, attachments, and more. ## Resources - [Homepage](https://www.mailslurp.com) - Get an [API KEY](https://app.mailslurp.com/sign-up/) - Generated [SDK Clients](https://docs.mailslurp.com/) - [Examples](https://github.com/mailslurp/examples) repository # noqa: E501\n\n The version of the OpenAPI document: 6.5.2\n Contact: [email protected]\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport re # noqa: F401\n\n# python 2 and python 3 compatibility library\nimport six\n\nfrom mailslurp_client.api_client import ApiClient\nfrom mailslurp_client.exceptions import ( # noqa: F401\n ApiTypeError,\n ApiValueError\n)\n\n\nclass FormControllerApi(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n def __init__(self, api_client=None):\n if api_client is None:\n api_client = ApiClient()\n self.api_client = api_client\n\n def submit_form(self, **kwargs): # noqa: E501\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `[email protected]` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\\\"https://python.api.mailslurp.com/forms\\\" method=\\\"post\\\" > <input name=\\\"_to\\\" type=\\\"hidden\\\" value=\\\"[email protected]\\\"/> <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` #### URL Example ```html <form action=\\\"https://python.api.mailslurp.com/[email protected]\\\" method=\\\"post\\\" > <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\\\"multipart/form-data\\\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: str\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n kwargs['_return_http_data_only'] = True\n return self.submit_form_with_http_info(**kwargs) # noqa: E501\n\n def submit_form_with_http_info(self, **kwargs): # noqa: E501\n \"\"\"Submit a form to be parsed and sent as an email to an address determined by the form fields # noqa: E501\n\n This endpoint allows you to submit HTML forms and receive the field values and files via email. #### Parameters The endpoint looks for special meta parameters in the form fields OR in the URL request parameters. The meta parameters can be used to specify the behaviour of the email. You must provide at-least a `_to` email address to tell the endpoint where the form should be emailed. These can be submitted as hidden HTML input fields with the corresponding `name` attributes or as URL query parameters such as `[email protected]` The endpoint takes all other form fields that are named and includes them in the message body of the email. Files are sent as attachments. #### Submitting This endpoint accepts form submission via POST method. It accepts `application/x-www-form-urlencoded`, and `multipart/form-data` content-types. #### HTML Example ```html <form action=\\\"https://python.api.mailslurp.com/forms\\\" method=\\\"post\\\" > <input name=\\\"_to\\\" type=\\\"hidden\\\" value=\\\"[email protected]\\\"/> <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` #### URL Example ```html <form action=\\\"https://python.api.mailslurp.com/[email protected]\\\" method=\\\"post\\\" > <textarea name=\\\"feedback\\\"></textarea> <button type=\\\"submit\\\">Submit</button> </form> ``` The email address is specified by a `_to` field OR is extracted from an email alias specified by a `_toAlias` field (see the alias controller for more information). Endpoint accepts . You can specify a content type in HTML forms using the `enctype` attribute, for instance: `<form enctype=\\\"multipart/form-data\\\">`. # noqa: E501\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n >>> thread = api.submit_form_with_http_info(async_req=True)\n >>> result = thread.get()\n\n :param async_req bool: execute request asynchronously\n :param str to: The email address that submitted form should be sent to.\n :param str subject: Optional subject of the email that will be sent.\n :param str redirect_to: Optional URL to redirect form submitter to after submission. If not present user will see a success message.\n :param str email_address: Email address of the submitting user. Include this if you wish to record the submitters email address and reply to it later.\n :param str success_message: Optional success message to display if no _redirectTo present.\n :param str spam_check: Optional but recommended field that catches spammers out. Include as a hidden form field but LEAVE EMPTY. Spam-bots will usually fill every field. If the _spamCheck field is filled the form submission will be ignored.\n :param str other_parameters: All other parameters or fields will be accepted and attached to the sent email. This includes files and any HTML form field with a name. These fields will become the body of the email that is sent.\n :param _return_http_data_only: response data without head status code\n and headers\n :param _preload_content: if False, the urllib3.HTTPResponse object will\n be returned without reading/decoding response\n data. Default is True.\n :param _request_timeout: timeout setting for this request. If one\n number provided, it will be total request\n timeout. It can also be a pair (tuple) of\n (connection, read) timeouts.\n :return: tuple(str, status_code(int), headers(HTTPHeaderDict))\n If the method is called asynchronously,\n returns the request thread.\n \"\"\"\n\n local_var_params = locals()\n\n all_params = [\n 'to',\n 'subject',\n 'redirect_to',\n 'email_address',\n 'success_message',\n 'spam_check',\n 'other_parameters'\n ]\n all_params.extend(\n [\n 'async_req',\n '_return_http_data_only',\n '_preload_content',\n '_request_timeout'\n ]\n )\n\n for key, val in six.iteritems(local_var_params['kwargs']):\n if key not in all_params:\n raise ApiTypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method submit_form\" % key\n )\n local_var_params[key] = val\n del local_var_params['kwargs']\n\n collection_formats = {}\n\n path_params = {}\n\n query_params = []\n if 'to' in local_var_params and local_var_params['to'] is not None: # noqa: E501\n query_params.append(('_to', local_var_params['to'])) # noqa: E501\n if 'subject' in local_var_params and local_var_params['subject'] is not None: # noqa: E501\n query_params.append(('_subject', local_var_params['subject'])) # noqa: E501\n if 'redirect_to' in local_var_params and local_var_params['redirect_to'] is not None: # noqa: E501\n query_params.append(('_redirectTo', local_var_params['redirect_to'])) # noqa: E501\n if 'email_address' in local_var_params and local_var_params['email_address'] is not None: # noqa: E501\n query_params.append(('_emailAddress', local_var_params['email_address'])) # noqa: E501\n if 'success_message' in local_var_params and local_var_params['success_message'] is not None: # noqa: E501\n query_params.append(('_successMessage', local_var_params['success_message'])) # noqa: E501\n if 'spam_check' in local_var_params and local_var_params['spam_check'] is not None: # noqa: E501\n query_params.append(('_spamCheck', local_var_params['spam_check'])) # noqa: E501\n if 'other_parameters' in local_var_params and local_var_params['other_parameters'] is not None: # noqa: E501\n query_params.append(('otherParameters', local_var_params['other_parameters'])) # noqa: E501\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.select_header_accept(\n ['*/*']) # noqa: E501\n\n # Authentication setting\n auth_settings = ['API_KEY'] # noqa: E501\n\n return self.api_client.call_api(\n '/forms', 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='str', # noqa: E501\n auth_settings=auth_settings,\n async_req=local_var_params.get('async_req'),\n _return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501\n _preload_content=local_var_params.get('_preload_content', True),\n _request_timeout=local_var_params.get('_request_timeout'),\n collection_formats=collection_formats)\n",
"step-ids": [
2,
3,
4,
5,
7
]
}
|
[
2,
3,
4,
5,
7
] |
#!/usr/bin/python
import sys
BLACK = '\033[30;0m'
RED = '\033[31;0m'
GREEN = '\033[32;0m'
YELLOW = '\033[33;0m'
BLUE = '\033[34;0m'
PINK = '\033[35;0m'
CBLUE = '\033[36;0m'
WHITE = '\033[37;0m'
def colorPrint(color, str):
print(color + str + '\033[0m');
def main():
if sys.argv.__len__() < 2:
print('Wrong usage, exit')
return
colorPrint(YELLOW, sys.argv[1])
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "a49c00dab8d445ce0b08fd31a4a41d6c8976d662",
"index": 2263,
"step-1": "<mask token>\n\n\ndef colorPrint(color, str):\n print(color + str + '\\x1b[0m')\n\n\ndef main():\n if sys.argv.__len__() < 2:\n print('Wrong usage, exit')\n return\n colorPrint(YELLOW, sys.argv[1])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef colorPrint(color, str):\n print(color + str + '\\x1b[0m')\n\n\ndef main():\n if sys.argv.__len__() < 2:\n print('Wrong usage, exit')\n return\n colorPrint(YELLOW, sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nBLACK = '\\x1b[30;0m'\nRED = '\\x1b[31;0m'\nGREEN = '\\x1b[32;0m'\nYELLOW = '\\x1b[33;0m'\nBLUE = '\\x1b[34;0m'\nPINK = '\\x1b[35;0m'\nCBLUE = '\\x1b[36;0m'\nWHITE = '\\x1b[37;0m'\n\n\ndef colorPrint(color, str):\n print(color + str + '\\x1b[0m')\n\n\ndef main():\n if sys.argv.__len__() < 2:\n print('Wrong usage, exit')\n return\n colorPrint(YELLOW, sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import sys\nBLACK = '\\x1b[30;0m'\nRED = '\\x1b[31;0m'\nGREEN = '\\x1b[32;0m'\nYELLOW = '\\x1b[33;0m'\nBLUE = '\\x1b[34;0m'\nPINK = '\\x1b[35;0m'\nCBLUE = '\\x1b[36;0m'\nWHITE = '\\x1b[37;0m'\n\n\ndef colorPrint(color, str):\n print(color + str + '\\x1b[0m')\n\n\ndef main():\n if sys.argv.__len__() < 2:\n print('Wrong usage, exit')\n return\n colorPrint(YELLOW, sys.argv[1])\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/python\nimport sys\n\nBLACK = '\\033[30;0m'\nRED = '\\033[31;0m'\nGREEN = '\\033[32;0m'\nYELLOW = '\\033[33;0m'\nBLUE = '\\033[34;0m'\nPINK = '\\033[35;0m'\nCBLUE = '\\033[36;0m'\nWHITE = '\\033[37;0m'\n\ndef colorPrint(color, str):\n print(color + str + '\\033[0m');\n\ndef main():\n if sys.argv.__len__() < 2:\n print('Wrong usage, exit')\n return\n colorPrint(YELLOW, sys.argv[1])\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from urllib import request
from urllib import error
from urllib.request import urlretrieve
import os, re
from bs4 import BeautifulSoup
import configparser
from apng2gif import apng2gif
config = configparser.ConfigParser()
config.read('crawler.config')
# 下載儲存位置
directoryLocation = os.getcwd() + '\\img'
# 設置要爬的頁面
urlList = config['lineStoreUrl']['url'].split(',')
downLoadType = '貼圖'
# 設置User-Agent
headers = ("User_Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0")
# 自定義opener
opener = request.build_opener()
opener.addheaders = [headers]
request.install_opener(opener)
def saveImg(imgurl, downLoadType):
fileLocation = directoryLocation + "\\" + downLoadType + "\\" + title
if not os.path.exists(fileLocation):
os.makedirs(fileLocation)
file = fileLocation + "\\" + str(count + 1) + ".png"
urlretrieve(imgurl, filename=file)
return file
def getTitle(content):
soup = BeautifulSoup(content, 'html.parser')
title = soup.find('p', 'mdCMN38Item01Ttl').text
return title
def downloadImageList(imgurl):
# if animationUrl download animation png ,else download imageurl
animationUrl = imgurl[:-7] + '[email protected]'
try:
file = saveImg(animationUrl, '動圖')
apng2gif(file)
except error.URLError as err:
saveImg(imgurl, downLoadType)
for i in range(0, len(urlList)):
downLoadType = '貼圖'
content = request.urlopen(urlList[i]).read().decode("utf-8", "ignore")
rule = '(https.*sticker@2x\.png)' # 正則匹配
ruleEmoji = '(https.*/\d{3}\.png)'
title = getTitle(content)
title = re.sub('\s', '', title)
title = re.sub('[\W_]+', '', title)
print('開始下載 ' + title)
imglist = re.compile(rule).findall(content) # 獲取圖片列表
if len(imglist) == 0:
imglist = re.compile(ruleEmoji).findall(content) # 小表情規則
downLoadType = '小表情'
for count in range(0, len(imglist)):
imgurl = downloadImageList(imglist[count])
print('第', count + 1, '張下載完成!')
print("已全部下載完成")
|
normal
|
{
"blob_id": "7bcdd6c5c6e41b076e476e1db35b663e34d74a67",
"index": 1885,
"step-1": "<mask token>\n\n\ndef saveImg(imgurl, downLoadType):\n fileLocation = directoryLocation + '\\\\' + downLoadType + '\\\\' + title\n if not os.path.exists(fileLocation):\n os.makedirs(fileLocation)\n file = fileLocation + '\\\\' + str(count + 1) + '.png'\n urlretrieve(imgurl, filename=file)\n return file\n\n\ndef getTitle(content):\n soup = BeautifulSoup(content, 'html.parser')\n title = soup.find('p', 'mdCMN38Item01Ttl').text\n return title\n\n\ndef downloadImageList(imgurl):\n animationUrl = imgurl[:-7] + '[email protected]'\n try:\n file = saveImg(animationUrl, '動圖')\n apng2gif(file)\n except error.URLError as err:\n saveImg(imgurl, downLoadType)\n\n\n<mask token>\n",
"step-2": "<mask token>\nconfig.read('crawler.config')\n<mask token>\nrequest.install_opener(opener)\n\n\ndef saveImg(imgurl, downLoadType):\n fileLocation = directoryLocation + '\\\\' + downLoadType + '\\\\' + title\n if not os.path.exists(fileLocation):\n os.makedirs(fileLocation)\n file = fileLocation + '\\\\' + str(count + 1) + '.png'\n urlretrieve(imgurl, filename=file)\n return file\n\n\ndef getTitle(content):\n soup = BeautifulSoup(content, 'html.parser')\n title = soup.find('p', 'mdCMN38Item01Ttl').text\n return title\n\n\ndef downloadImageList(imgurl):\n animationUrl = imgurl[:-7] + '[email protected]'\n try:\n file = saveImg(animationUrl, '動圖')\n apng2gif(file)\n except error.URLError as err:\n saveImg(imgurl, downLoadType)\n\n\nfor i in range(0, len(urlList)):\n downLoadType = '貼圖'\n content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')\n rule = '(https.*sticker@2x\\\\.png)'\n ruleEmoji = '(https.*/\\\\d{3}\\\\.png)'\n title = getTitle(content)\n title = re.sub('\\\\s', '', title)\n title = re.sub('[\\\\W_]+', '', title)\n print('開始下載 ' + title)\n imglist = re.compile(rule).findall(content)\n if len(imglist) == 0:\n imglist = re.compile(ruleEmoji).findall(content)\n downLoadType = '小表情'\n for count in range(0, len(imglist)):\n imgurl = downloadImageList(imglist[count])\n print('第', count + 1, '張下載完成!')\nprint('已全部下載完成')\n",
"step-3": "<mask token>\nconfig = configparser.ConfigParser()\nconfig.read('crawler.config')\ndirectoryLocation = os.getcwd() + '\\\\img'\nurlList = config['lineStoreUrl']['url'].split(',')\ndownLoadType = '貼圖'\nheaders = ('User_Agent',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'\n )\nopener = request.build_opener()\nopener.addheaders = [headers]\nrequest.install_opener(opener)\n\n\ndef saveImg(imgurl, downLoadType):\n fileLocation = directoryLocation + '\\\\' + downLoadType + '\\\\' + title\n if not os.path.exists(fileLocation):\n os.makedirs(fileLocation)\n file = fileLocation + '\\\\' + str(count + 1) + '.png'\n urlretrieve(imgurl, filename=file)\n return file\n\n\ndef getTitle(content):\n soup = BeautifulSoup(content, 'html.parser')\n title = soup.find('p', 'mdCMN38Item01Ttl').text\n return title\n\n\ndef downloadImageList(imgurl):\n animationUrl = imgurl[:-7] + '[email protected]'\n try:\n file = saveImg(animationUrl, '動圖')\n apng2gif(file)\n except error.URLError as err:\n saveImg(imgurl, downLoadType)\n\n\nfor i in range(0, len(urlList)):\n downLoadType = '貼圖'\n content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')\n rule = '(https.*sticker@2x\\\\.png)'\n ruleEmoji = '(https.*/\\\\d{3}\\\\.png)'\n title = getTitle(content)\n title = re.sub('\\\\s', '', title)\n title = re.sub('[\\\\W_]+', '', title)\n print('開始下載 ' + title)\n imglist = re.compile(rule).findall(content)\n if len(imglist) == 0:\n imglist = re.compile(ruleEmoji).findall(content)\n downLoadType = '小表情'\n for count in range(0, len(imglist)):\n imgurl = downloadImageList(imglist[count])\n print('第', count + 1, '張下載完成!')\nprint('已全部下載完成')\n",
"step-4": "from urllib import request\nfrom urllib import error\nfrom urllib.request import urlretrieve\nimport os, re\nfrom bs4 import BeautifulSoup\nimport configparser\nfrom apng2gif import apng2gif\nconfig = configparser.ConfigParser()\nconfig.read('crawler.config')\ndirectoryLocation = os.getcwd() + '\\\\img'\nurlList = config['lineStoreUrl']['url'].split(',')\ndownLoadType = '貼圖'\nheaders = ('User_Agent',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'\n )\nopener = request.build_opener()\nopener.addheaders = [headers]\nrequest.install_opener(opener)\n\n\ndef saveImg(imgurl, downLoadType):\n fileLocation = directoryLocation + '\\\\' + downLoadType + '\\\\' + title\n if not os.path.exists(fileLocation):\n os.makedirs(fileLocation)\n file = fileLocation + '\\\\' + str(count + 1) + '.png'\n urlretrieve(imgurl, filename=file)\n return file\n\n\ndef getTitle(content):\n soup = BeautifulSoup(content, 'html.parser')\n title = soup.find('p', 'mdCMN38Item01Ttl').text\n return title\n\n\ndef downloadImageList(imgurl):\n animationUrl = imgurl[:-7] + '[email protected]'\n try:\n file = saveImg(animationUrl, '動圖')\n apng2gif(file)\n except error.URLError as err:\n saveImg(imgurl, downLoadType)\n\n\nfor i in range(0, len(urlList)):\n downLoadType = '貼圖'\n content = request.urlopen(urlList[i]).read().decode('utf-8', 'ignore')\n rule = '(https.*sticker@2x\\\\.png)'\n ruleEmoji = '(https.*/\\\\d{3}\\\\.png)'\n title = getTitle(content)\n title = re.sub('\\\\s', '', title)\n title = re.sub('[\\\\W_]+', '', title)\n print('開始下載 ' + title)\n imglist = re.compile(rule).findall(content)\n if len(imglist) == 0:\n imglist = re.compile(ruleEmoji).findall(content)\n downLoadType = '小表情'\n for count in range(0, len(imglist)):\n imgurl = downloadImageList(imglist[count])\n print('第', count + 1, '張下載完成!')\nprint('已全部下載完成')\n",
"step-5": "from urllib import request\nfrom urllib import error\nfrom urllib.request import urlretrieve\nimport os, re\nfrom bs4 import BeautifulSoup\nimport configparser\nfrom apng2gif import apng2gif\n\nconfig = configparser.ConfigParser()\nconfig.read('crawler.config')\n# 下載儲存位置\ndirectoryLocation = os.getcwd() + '\\\\img'\n# 設置要爬的頁面\nurlList = config['lineStoreUrl']['url'].split(',')\ndownLoadType = '貼圖'\n\n# 設置User-Agent\nheaders = (\"User_Agent\",\n \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0\")\n# 自定義opener\nopener = request.build_opener()\nopener.addheaders = [headers]\nrequest.install_opener(opener)\n\n\ndef saveImg(imgurl, downLoadType):\n fileLocation = directoryLocation + \"\\\\\" + downLoadType + \"\\\\\" + title\n if not os.path.exists(fileLocation):\n os.makedirs(fileLocation)\n file = fileLocation + \"\\\\\" + str(count + 1) + \".png\"\n urlretrieve(imgurl, filename=file)\n return file\n\n\ndef getTitle(content):\n soup = BeautifulSoup(content, 'html.parser')\n title = soup.find('p', 'mdCMN38Item01Ttl').text\n return title\n\n\ndef downloadImageList(imgurl):\n # if animationUrl download animation png ,else download imageurl\n animationUrl = imgurl[:-7] + '[email protected]'\n try:\n file = saveImg(animationUrl, '動圖')\n apng2gif(file)\n except error.URLError as err:\n saveImg(imgurl, downLoadType)\n\n\nfor i in range(0, len(urlList)):\n downLoadType = '貼圖'\n content = request.urlopen(urlList[i]).read().decode(\"utf-8\", \"ignore\")\n rule = '(https.*sticker@2x\\.png)' # 正則匹配\n ruleEmoji = '(https.*/\\d{3}\\.png)'\n title = getTitle(content)\n title = re.sub('\\s', '', title)\n title = re.sub('[\\W_]+', '', title)\n\n print('開始下載 ' + title)\n imglist = re.compile(rule).findall(content) # 獲取圖片列表\n if len(imglist) == 0:\n imglist = re.compile(ruleEmoji).findall(content) # 小表情規則\n downLoadType = '小表情'\n for count in range(0, len(imglist)):\n imgurl = downloadImageList(imglist[count])\n\n print('第', count + 1, '張下載完成!')\nprint(\"已全部下載完成\")\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
from pythonforandroid.recipe import CompiledComponentsPythonRecipe
from multiprocessing import cpu_count
from os.path import join
class NumpyRecipe(CompiledComponentsPythonRecipe):
version = '1.18.1'
url = 'https://pypi.python.org/packages/source/n/numpy/numpy-{version}.zip'
site_packages_name = 'numpy'
depends = ['setuptools', 'cython']
patches = [
join('patches', 'add_libm_explicitly_to_build.patch'),
join('patches', 'do_not_use_system_libs.patch'),
join('patches', 'remove_unittest_call.patch'),
]
call_hostpython_via_targetpython = False
def build_compiled_components(self, arch):
self.setup_extra_args = ['-j', str(cpu_count())]
super().build_compiled_components(arch)
self.setup_extra_args = []
def rebuild_compiled_components(self, arch, env):
self.setup_extra_args = ['-j', str(cpu_count())]
super().rebuild_compiled_components(arch, env)
self.setup_extra_args = []
recipe = NumpyRecipe()
|
normal
|
{
"blob_id": "610610e7e49fc98927a4894efe62686e26e0cb83",
"index": 3502,
"step-1": "<mask token>\n\n\nclass NumpyRecipe(CompiledComponentsPythonRecipe):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def build_compiled_components(self, arch):\n self.setup_extra_args = ['-j', str(cpu_count())]\n super().build_compiled_components(arch)\n self.setup_extra_args = []\n\n def rebuild_compiled_components(self, arch, env):\n self.setup_extra_args = ['-j', str(cpu_count())]\n super().rebuild_compiled_components(arch, env)\n self.setup_extra_args = []\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NumpyRecipe(CompiledComponentsPythonRecipe):\n version = '1.18.1'\n url = 'https://pypi.python.org/packages/source/n/numpy/numpy-{version}.zip'\n site_packages_name = 'numpy'\n depends = ['setuptools', 'cython']\n patches = [join('patches', 'add_libm_explicitly_to_build.patch'), join(\n 'patches', 'do_not_use_system_libs.patch'), join('patches',\n 'remove_unittest_call.patch')]\n call_hostpython_via_targetpython = False\n\n def build_compiled_components(self, arch):\n self.setup_extra_args = ['-j', str(cpu_count())]\n super().build_compiled_components(arch)\n self.setup_extra_args = []\n\n def rebuild_compiled_components(self, arch, env):\n self.setup_extra_args = ['-j', str(cpu_count())]\n super().rebuild_compiled_components(arch, env)\n self.setup_extra_args = []\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass NumpyRecipe(CompiledComponentsPythonRecipe):\n version = '1.18.1'\n url = 'https://pypi.python.org/packages/source/n/numpy/numpy-{version}.zip'\n site_packages_name = 'numpy'\n depends = ['setuptools', 'cython']\n patches = [join('patches', 'add_libm_explicitly_to_build.patch'), join(\n 'patches', 'do_not_use_system_libs.patch'), join('patches',\n 'remove_unittest_call.patch')]\n call_hostpython_via_targetpython = False\n\n def build_compiled_components(self, arch):\n self.setup_extra_args = ['-j', str(cpu_count())]\n super().build_compiled_components(arch)\n self.setup_extra_args = []\n\n def rebuild_compiled_components(self, arch, env):\n self.setup_extra_args = ['-j', str(cpu_count())]\n super().rebuild_compiled_components(arch, env)\n self.setup_extra_args = []\n\n\nrecipe = NumpyRecipe()\n",
"step-4": "from pythonforandroid.recipe import CompiledComponentsPythonRecipe\nfrom multiprocessing import cpu_count\nfrom os.path import join\n\n\nclass NumpyRecipe(CompiledComponentsPythonRecipe):\n version = '1.18.1'\n url = 'https://pypi.python.org/packages/source/n/numpy/numpy-{version}.zip'\n site_packages_name = 'numpy'\n depends = ['setuptools', 'cython']\n patches = [join('patches', 'add_libm_explicitly_to_build.patch'), join(\n 'patches', 'do_not_use_system_libs.patch'), join('patches',\n 'remove_unittest_call.patch')]\n call_hostpython_via_targetpython = False\n\n def build_compiled_components(self, arch):\n self.setup_extra_args = ['-j', str(cpu_count())]\n super().build_compiled_components(arch)\n self.setup_extra_args = []\n\n def rebuild_compiled_components(self, arch, env):\n self.setup_extra_args = ['-j', str(cpu_count())]\n super().rebuild_compiled_components(arch, env)\n self.setup_extra_args = []\n\n\nrecipe = NumpyRecipe()\n",
"step-5": "from pythonforandroid.recipe import CompiledComponentsPythonRecipe\nfrom multiprocessing import cpu_count\nfrom os.path import join\n\n\nclass NumpyRecipe(CompiledComponentsPythonRecipe):\n\n version = '1.18.1'\n url = 'https://pypi.python.org/packages/source/n/numpy/numpy-{version}.zip'\n site_packages_name = 'numpy'\n depends = ['setuptools', 'cython']\n\n patches = [\n join('patches', 'add_libm_explicitly_to_build.patch'),\n join('patches', 'do_not_use_system_libs.patch'),\n join('patches', 'remove_unittest_call.patch'),\n ]\n\n call_hostpython_via_targetpython = False\n\n def build_compiled_components(self, arch):\n self.setup_extra_args = ['-j', str(cpu_count())]\n super().build_compiled_components(arch)\n self.setup_extra_args = []\n\n def rebuild_compiled_components(self, arch, env):\n self.setup_extra_args = ['-j', str(cpu_count())]\n super().rebuild_compiled_components(arch, env)\n self.setup_extra_args = []\n\n\nrecipe = NumpyRecipe()\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import os.path as path
from googleapiclient.discovery import build
from google.oauth2 import service_account
# If modifying these scopes, delete the file token.pickle.
SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
SAMPLE_SPREADSHEET_ID = '1FSMATLJUNCbV8-XYM8h7yHoWRSGA8JFsaECOZy_i2T8'
def main():
service_account_json = path.join(path.dirname(
path.abspath(__file__)), 'service_account.json')
credentials = service_account.Credentials.from_service_account_file(
service_account_json, scopes=SCOPES)
service = build('sheets', 'v4', credentials=credentials)
sheet_service = service.spreadsheets()
print('Getting pie chart information')
get_pie_chart_info(sheet_service)
print('Getting line chart information')
get_line_chart_info(sheet_service)
print('Getting boolean information')
get_bool_info(sheet_service)
def get_pie_chart_info(sheet_service):
sample_range_name = 'data!F:G'
result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=sample_range_name).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('Race, Breakdown:')
for row in values:
# Print columns A and E, which correspond to indices 0 and 4.
print('%s, %s' % (row[0], row[1]))
def get_line_chart_info(sheet_service):
sample_range_name = 'data!D:D'
result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=sample_range_name).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('Time series information:')
for row in values:
print('%s' % row[0])
def get_bool_info(sheet_service):
sample_range_name = 'data!B1'
result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,
range=sample_range_name).execute()
values = result.get('values', [])
if not values:
print('No data found.')
else:
print('Time series information:')
for row in values:
print(row[0] == 'TRUE')
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "f9261c1844cc629c91043d1221d0b76f6e22fef6",
"index": 6157,
"step-1": "<mask token>\n\n\ndef main():\n service_account_json = path.join(path.dirname(path.abspath(__file__)),\n 'service_account.json')\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=credentials)\n sheet_service = service.spreadsheets()\n print('Getting pie chart information')\n get_pie_chart_info(sheet_service)\n print('Getting line chart information')\n get_line_chart_info(sheet_service)\n print('Getting boolean information')\n get_bool_info(sheet_service)\n\n\ndef get_pie_chart_info(sheet_service):\n sample_range_name = 'data!F:G'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Race, Breakdown:')\n for row in values:\n print('%s, %s' % (row[0], row[1]))\n\n\ndef get_line_chart_info(sheet_service):\n sample_range_name = 'data!D:D'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print('%s' % row[0])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n service_account_json = path.join(path.dirname(path.abspath(__file__)),\n 'service_account.json')\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=credentials)\n sheet_service = service.spreadsheets()\n print('Getting pie chart information')\n get_pie_chart_info(sheet_service)\n print('Getting line chart information')\n get_line_chart_info(sheet_service)\n print('Getting boolean information')\n get_bool_info(sheet_service)\n\n\ndef get_pie_chart_info(sheet_service):\n sample_range_name = 'data!F:G'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Race, Breakdown:')\n for row in values:\n print('%s, %s' % (row[0], row[1]))\n\n\ndef get_line_chart_info(sheet_service):\n sample_range_name = 'data!D:D'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print('%s' % row[0])\n\n\ndef get_bool_info(sheet_service):\n sample_range_name = 'data!B1'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print(row[0] == 'TRUE')\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']\nSAMPLE_SPREADSHEET_ID = '1FSMATLJUNCbV8-XYM8h7yHoWRSGA8JFsaECOZy_i2T8'\n\n\ndef main():\n service_account_json = path.join(path.dirname(path.abspath(__file__)),\n 'service_account.json')\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=credentials)\n sheet_service = service.spreadsheets()\n print('Getting pie chart information')\n get_pie_chart_info(sheet_service)\n print('Getting line chart information')\n get_line_chart_info(sheet_service)\n print('Getting boolean information')\n get_bool_info(sheet_service)\n\n\ndef get_pie_chart_info(sheet_service):\n sample_range_name = 'data!F:G'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Race, Breakdown:')\n for row in values:\n print('%s, %s' % (row[0], row[1]))\n\n\ndef get_line_chart_info(sheet_service):\n sample_range_name = 'data!D:D'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print('%s' % row[0])\n\n\ndef get_bool_info(sheet_service):\n sample_range_name = 'data!B1'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print(row[0] == 'TRUE')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import os.path as path\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']\nSAMPLE_SPREADSHEET_ID = '1FSMATLJUNCbV8-XYM8h7yHoWRSGA8JFsaECOZy_i2T8'\n\n\ndef main():\n service_account_json = path.join(path.dirname(path.abspath(__file__)),\n 'service_account.json')\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=credentials)\n sheet_service = service.spreadsheets()\n print('Getting pie chart information')\n get_pie_chart_info(sheet_service)\n print('Getting line chart information')\n get_line_chart_info(sheet_service)\n print('Getting boolean information')\n get_bool_info(sheet_service)\n\n\ndef get_pie_chart_info(sheet_service):\n sample_range_name = 'data!F:G'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Race, Breakdown:')\n for row in values:\n print('%s, %s' % (row[0], row[1]))\n\n\ndef get_line_chart_info(sheet_service):\n sample_range_name = 'data!D:D'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print('%s' % row[0])\n\n\ndef get_bool_info(sheet_service):\n sample_range_name = 'data!B1'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print(row[0] == 'TRUE')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import os.path as path\nfrom googleapiclient.discovery import build\nfrom google.oauth2 import service_account\n\n# If modifying these scopes, delete the file token.pickle.\nSCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']\n\n# The ID and range of a sample spreadsheet.\nSAMPLE_SPREADSHEET_ID = '1FSMATLJUNCbV8-XYM8h7yHoWRSGA8JFsaECOZy_i2T8'\n\n\ndef main():\n service_account_json = path.join(path.dirname(\n path.abspath(__file__)), 'service_account.json')\n credentials = service_account.Credentials.from_service_account_file(\n service_account_json, scopes=SCOPES)\n service = build('sheets', 'v4', credentials=credentials)\n sheet_service = service.spreadsheets()\n\n print('Getting pie chart information')\n get_pie_chart_info(sheet_service)\n\n print('Getting line chart information')\n get_line_chart_info(sheet_service)\n\n print('Getting boolean information')\n get_bool_info(sheet_service)\n\n\ndef get_pie_chart_info(sheet_service):\n sample_range_name = 'data!F:G'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found.')\n else:\n print('Race, Breakdown:')\n for row in values:\n # Print columns A and E, which correspond to indices 0 and 4.\n print('%s, %s' % (row[0], row[1]))\n\n\ndef get_line_chart_info(sheet_service):\n sample_range_name = 'data!D:D'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print('%s' % row[0])\n\n\ndef get_bool_info(sheet_service):\n sample_range_name = 'data!B1'\n result = sheet_service.values().get(spreadsheetId=SAMPLE_SPREADSHEET_ID,\n range=sample_range_name).execute()\n values = result.get('values', [])\n\n if not values:\n print('No data found.')\n else:\n print('Time series information:')\n for row in values:\n print(row[0] == 'TRUE')\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
#################################################
### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###
#################################################
# file to edit: dev_nb/10_DogcatcherFlatten.ipynb
import pandas as pd
import argparse
import csv
import os
import numpy as np
import string
def FivePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=True)
df["FA_start"] = df["gene_start"]
df_exon = df[df["type"]=="exon"].copy()
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["FA_end"] = df_exon["end"]
df_exon = df_exon[["name","FA_end"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["FA_length"] = df["FA_end"] - df["FA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def ThreePrimeArea(df):
df = df.sort_values(by=["chr","end"],ascending=False)
df["LA_end"] = df["gene_end"]
df_exon = df[df["type"]=="exon"].copy()
# Keep first exon
df_exon = df_exon.drop_duplicates(subset=['name'],keep="first")
df_exon["LA_start"] = df_exon["start"]
df_exon = df_exon[["name","LA_start"]]
df = pd.merge(df,df_exon,how="left",on="name")
df["LA_length"] = df["LA_end"] - df["LA_start"]
df = df.drop_duplicates(subset=['name'],keep="first")
return df
def getAreas(df):
"""
This function will get the first and last exons for plu and min strand.
Call it area because not necessarily exon.
"""
df_plu = df[df["strand"]=="+"]
df_min = df[df["strand"]=="-"]
df_plu_FA = FivePrimeArea(df_plu)
df_min_FA = FivePrimeArea(df_min)
df_plu_LA = ThreePrimeArea(df_plu)[["name","LA_start","LA_end","LA_length"]]
df_min_LA = ThreePrimeArea(df_min)[["name","LA_start","LA_end","LA_length"]]
df_plu = pd.merge(df_plu_FA,df_plu_LA,on="name")
df_min = pd.merge(df_min_FA,df_min_LA,on="name")
df = pd.concat([df_plu,df_min])
return df
def chrDIC(df):
"""This function will take a gtf and return strand specific dictionary of different chrm"""
chr_names=df['chr'].unique().tolist()
d_chr = d_gtf_chr = {chrom : df[df["chr"]==chrom] for chrom in chr_names}
return d_chr
def countInside(df, start, end):
rows_df = df[ (start < df["start"]) & (df["end"] < end) ]
names = rows_df['name'].unique().tolist()
names = ",".join(names)
if len(names) >0:
return names
else:
return np.nan
def removeInside(df):
d_chr = chrDIC(df)
df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']], row["start"], row["end"]), axis=1)
df2 = df.dropna(subset=['genes_inside'])
all_names = []
for i in range(len(df2)):
names = df2["genes_inside"].iloc[i]
names = names.split(",")
all_names = all_names + names
inside_genes = list(set(all_names))
l = len(inside_genes)
print(f"Removing {l} genes that are inside other genes")
df_inside = pd.DataFrame(inside_genes,columns=['name'])
df = df[~df["name"].isin(df_inside["name"])].copy()
del df["genes_inside"]
return df, df_inside
def flattenGTF(file_in,file_type,NEXTFLOW=True):
if file_type == "ENSEMBL":
print(f"Flattening ENSEMBL like genome {file_in}")
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["chr"] = df["chr"].astype(str)
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df.sort_values(by=["chr","start"], inplace=True, ascending=True)
fout = f"{file_in[:-4]}_sort.gtf"
df.to_csv(fout,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("gene_id ","")
df["name"] = df["name"].str.replace("\"","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "BED":
my_col = ["chr","start","end","name","strand"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
if file_type == "REFSEQGFF":
# Chrome numbers are changed. Need to change back to chr1 etc.
# https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.39#/def_asm_Primary_Assembly
print(f"Flattening REFSEQGFF like genome")
# https://ftp.ncbi.nlm.nih.gov/genomes/refseq/vertebrate_mammalian/Homo_sapiens/reference/
#download this GCF_000001405.39_GRCh38.p13_genomic.gtf.gz
# sort and index in IGV
# NC_000001.11 BestRefSeq gene 11874 14409 . + . gene_id "DDX11L1"; transcript_id ""; db_xref "GeneID:100287102"; db_xref "HGNC:HGNC:37102"; description "DEAD/H-box helicase 11 like 1 (pseudogene)"; gbkey "Gene"; gene "DDX11L1"; gene_biotype "transcribed_pseudogene"; pseudo "true";
my_col = ["chr","source","type","start","end","dot","strand","dot2","gene_id"]
replace_list = [("chr1","NC_000001.11"),
("chr2","NC_000002.12"),
("chr3","NC_000003.12"),
("chr4","NC_000004.12"),
("chr5","NC_000005.10"),
("chr6","NC_000006.12"),
("chr7","NC_000007.14"),
("chr8","NC_000008.11"),
("chr9","NC_000009.12"),
("chr10","NC_000010.11"),
("chr11","NC_000011.10"),
("chr12","NC_000012.12"),
("chr13","NC_000013.11"),
("chr14","NC_000014.9"),
("chr15","NC_000015.10"),
("chr16","NC_000016.10"),
("chr17","NC_000017.11"),
("chr18","NC_000018.10"),
("chr19","NC_000019.10"),
("chr20","NC_000020.11"),
("chr21","NC_000021.9"),
("chr22","NC_000022.11"),
("chrX","NC_000023.11"),
("chrY","NC_000024.10")]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[df["type"]=="gene"].copy()
# Change NC names to chr
for l in replace_list:
df["chr"] = np.where(df["chr"]==l[1],l[0],df["chr"])
df = df[~df["chr"].str.contains("\.") ] # Take out patches
df["name"] = df["gene_id"].str.split(';',expand=True)[0]
df["name"] = df["name"].str.replace("ID=gene-","")
df["type"] = df["type"].astype(str)
df_gene = df[df["type"]=="gene"].copy()
df_gene["gene_start"] = df_gene["start"]
df_gene["gene_end"] = df_gene["end"]
df_gene = df_gene[["name","gene_start","gene_end"]].copy()
df = pd.merge(df,df_gene,how="left",on="name")
df = getAreas(df)
df["start"] = df["gene_start"]
df["end"] = df["gene_end"]
# df = df[["chr","start","end","strand","name","type"]].copy()
if file_type == "REFSEQBED":
# chr1 11873 14409 NR_046018 0 +
# 14409 14409 0 3 354,109,1189, 0,739,1347,
my_col = ["chr","start","end","name","dot","strand","start1","start2","dot2","dot3","gene_id","gene_id2"]
df = pd.read_csv(file_in, sep="\t",header=None,names=my_col, comment="#",low_memory=False)
df = df[["chr","start","end","name","strand"]]
df["FA_start"] = df["start"]
df["FA_end"] = df["end"]
df["LA_start"] = df["start"]
df["LA_end"] = df["end"]
df["dot"] = "."
df["dot2"] = "."
df["source"] = "NA"
df["type"] = "NA"
df["gene_id"] = df["name"]
df_plu = df[df["strand"]=="+"].copy()
df_min = df[df["strand"]=="-"].copy()
df_plu, df_plu_inside = removeInside(df_plu)
df_min, df_min_inside = removeInside(df_min)
df_plu.sort_values(by=["chr","end"], inplace=True, ascending=False)
df_plu.drop_duplicates(subset=["start","chr"], keep='first', inplace=True)
df_min.sort_values(by=["chr","start"], inplace=True, ascending=True)
df_min.drop_duplicates(subset=["end","chr"], keep='first', inplace=True)
df = pd.concat([df_plu,df_min])
df = df.sort_values(by=["chr","end"],ascending=False)
gtf = df[["chr","source","type","start","end","dot","strand","dot2","gene_id"] ]
df = df[["chr","start","end","name","strand","FA_start","FA_end","LA_start","LA_end"]]
if NEXTFLOW:
file_in = os.path.basename(file_in)
fout = f"{file_in[:-4]}_flat.txt"
fout2 = f"{file_in[:-4]}_flat.gtf"
fout3 = f"{file_in[:-4]}_flat_CHROMNAMES.txt"
print(f"Outputting flat file {fout}")
df.to_csv(fout,sep="\t",index=None)
gtf.to_csv(fout2,sep="\t", index=None,quoting=csv.QUOTE_NONE, header=None)
gtf_names = gtf[["chr"]].copy()
gtf_names.drop_duplicates(subset=["chr"], keep='first', inplace=True)
gtf_names.to_csv(fout3,sep="\t", index=None)
return df
import argparse
def parse_arguments():
parser = argparse.ArgumentParser(description='Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED')
parser.add_argument('--annotation_in', action= 'store', metavar='annotation_in')
parser.add_argument('--file_type', action= 'store', metavar='file_type',default="ENSEMBL")
args = parser.parse_args()
return args
if __name__=="__main__":
args = parse_arguments()
file_in = args.annotation_in
file_type = args.file_type
flattenGTF(file_in,file_type)
|
normal
|
{
"blob_id": "5c5922fd3a7a5eec121d94e69bc972089e435175",
"index": 9406,
"step-1": "<mask token>\n\n\ndef FivePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=True)\n df['FA_start'] = df['gene_start']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['FA_end'] = df_exon['end']\n df_exon = df_exon[['name', 'FA_end']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['FA_length'] = df['FA_end'] - df['FA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\n<mask token>\n\n\ndef getAreas(df):\n \"\"\"\n This function will get the first and last exons for plu and min strand.\n Call it area because not necessarily exon.\n \"\"\"\n df_plu = df[df['strand'] == '+']\n df_min = df[df['strand'] == '-']\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_min_LA = ThreePrimeArea(df_min)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_plu = pd.merge(df_plu_FA, df_plu_LA, on='name')\n df_min = pd.merge(df_min_FA, df_min_LA, on='name')\n df = pd.concat([df_plu, df_min])\n return df\n\n\ndef chrDIC(df):\n \"\"\"This function will take a gtf and return strand specific dictionary of different chrm\"\"\"\n chr_names = df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom: df[df['chr'] == chrom] for chrom in chr_names}\n return d_chr\n\n\ndef countInside(df, start, end):\n rows_df = df[(start < df['start']) & (df['end'] < end)]\n names = rows_df['name'].unique().tolist()\n names = ','.join(names)\n if len(names) > 0:\n return names\n else:\n return np.nan\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef FivePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=True)\n df['FA_start'] = df['gene_start']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['FA_end'] = df_exon['end']\n df_exon = df_exon[['name', 'FA_end']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['FA_length'] = df['FA_end'] - df['FA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\n<mask token>\n\n\ndef getAreas(df):\n \"\"\"\n This function will get the first and last exons for plu and min strand.\n Call it area because not necessarily exon.\n \"\"\"\n df_plu = df[df['strand'] == '+']\n df_min = df[df['strand'] == '-']\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_min_LA = ThreePrimeArea(df_min)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_plu = pd.merge(df_plu_FA, df_plu_LA, on='name')\n df_min = pd.merge(df_min_FA, df_min_LA, on='name')\n df = pd.concat([df_plu, df_min])\n return df\n\n\ndef chrDIC(df):\n \"\"\"This function will take a gtf and return strand specific dictionary of different chrm\"\"\"\n chr_names = df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom: df[df['chr'] == chrom] for chrom in chr_names}\n return d_chr\n\n\ndef countInside(df, start, end):\n rows_df = df[(start < df['start']) & (df['end'] < end)]\n names = rows_df['name'].unique().tolist()\n names = ','.join(names)\n if len(names) > 0:\n return names\n else:\n return np.nan\n\n\n<mask token>\n\n\ndef flattenGTF(file_in, file_type, NEXTFLOW=True):\n if file_type == 'ENSEMBL':\n print(f'Flattening ENSEMBL like genome {file_in}')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['chr'] = df['chr'].astype(str)\n df = df[~df['chr'].str.contains('\\\\.')]\n df.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n fout = f'{file_in[:-4]}_sort.gtf'\n df.to_csv(fout, sep='\\t', index=None, quoting=csv.QUOTE_NONE,\n header=None)\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('gene_id ', '')\n df['name'] = df['name'].str.replace('\"', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'BED':\n my_col = ['chr', 'start', 'end', 'name', 'strand']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n if file_type == 'REFSEQGFF':\n print(f'Flattening REFSEQGFF like genome')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n replace_list = [('chr1', 'NC_000001.11'), ('chr2', 'NC_000002.12'),\n ('chr3', 'NC_000003.12'), ('chr4', 'NC_000004.12'), ('chr5',\n 'NC_000005.10'), ('chr6', 'NC_000006.12'), ('chr7',\n 'NC_000007.14'), ('chr8', 'NC_000008.11'), ('chr9',\n 'NC_000009.12'), ('chr10', 'NC_000010.11'), ('chr11',\n 'NC_000011.10'), ('chr12', 'NC_000012.12'), ('chr13',\n 'NC_000013.11'), ('chr14', 'NC_000014.9'), ('chr15',\n 'NC_000015.10'), ('chr16', 'NC_000016.10'), ('chr17',\n 'NC_000017.11'), ('chr18', 'NC_000018.10'), ('chr19',\n 'NC_000019.10'), ('chr20', 'NC_000020.11'), ('chr21',\n 'NC_000021.9'), ('chr22', 'NC_000022.11'), ('chrX',\n 'NC_000023.11'), ('chrY', 'NC_000024.10')]\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[df['type'] == 'gene'].copy()\n for l in replace_list:\n df['chr'] = np.where(df['chr'] == l[1], l[0], df['chr'])\n df = df[~df['chr'].str.contains('\\\\.')]\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('ID=gene-', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'REFSEQBED':\n my_col = ['chr', 'start', 'end', 'name', 'dot', 'strand', 'start1',\n 'start2', 'dot2', 'dot3', 'gene_id', 'gene_id2']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[['chr', 'start', 'end', 'name', 'strand']]\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n df_plu = df[df['strand'] == '+'].copy()\n df_min = df[df['strand'] == '-'].copy()\n df_plu, df_plu_inside = removeInside(df_plu)\n df_min, df_min_inside = removeInside(df_min)\n df_plu.sort_values(by=['chr', 'end'], inplace=True, ascending=False)\n df_plu.drop_duplicates(subset=['start', 'chr'], keep='first', inplace=True)\n df_min.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n df_min.drop_duplicates(subset=['end', 'chr'], keep='first', inplace=True)\n df = pd.concat([df_plu, df_min])\n df = df.sort_values(by=['chr', 'end'], ascending=False)\n gtf = df[['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']]\n df = df[['chr', 'start', 'end', 'name', 'strand', 'FA_start', 'FA_end',\n 'LA_start', 'LA_end']]\n if NEXTFLOW:\n file_in = os.path.basename(file_in)\n fout = f'{file_in[:-4]}_flat.txt'\n fout2 = f'{file_in[:-4]}_flat.gtf'\n fout3 = f'{file_in[:-4]}_flat_CHROMNAMES.txt'\n print(f'Outputting flat file {fout}')\n df.to_csv(fout, sep='\\t', index=None)\n gtf.to_csv(fout2, sep='\\t', index=None, quoting=csv.QUOTE_NONE, header=None\n )\n gtf_names = gtf[['chr']].copy()\n gtf_names.drop_duplicates(subset=['chr'], keep='first', inplace=True)\n gtf_names.to_csv(fout3, sep='\\t', index=None)\n return df\n\n\n<mask token>\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\n 'Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED'\n )\n parser.add_argument('--annotation_in', action='store', metavar=\n 'annotation_in')\n parser.add_argument('--file_type', action='store', metavar='file_type',\n default='ENSEMBL')\n args = parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef FivePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=True)\n df['FA_start'] = df['gene_start']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['FA_end'] = df_exon['end']\n df_exon = df_exon[['name', 'FA_end']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['FA_length'] = df['FA_end'] - df['FA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\ndef ThreePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=False)\n df['LA_end'] = df['gene_end']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['LA_start'] = df_exon['start']\n df_exon = df_exon[['name', 'LA_start']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['LA_length'] = df['LA_end'] - df['LA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\ndef getAreas(df):\n \"\"\"\n This function will get the first and last exons for plu and min strand.\n Call it area because not necessarily exon.\n \"\"\"\n df_plu = df[df['strand'] == '+']\n df_min = df[df['strand'] == '-']\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_min_LA = ThreePrimeArea(df_min)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_plu = pd.merge(df_plu_FA, df_plu_LA, on='name')\n df_min = pd.merge(df_min_FA, df_min_LA, on='name')\n df = pd.concat([df_plu, df_min])\n return df\n\n\ndef chrDIC(df):\n \"\"\"This function will take a gtf and return strand specific dictionary of different chrm\"\"\"\n chr_names = df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom: df[df['chr'] == chrom] for chrom in chr_names}\n return d_chr\n\n\ndef countInside(df, start, end):\n rows_df = df[(start < df['start']) & (df['end'] < end)]\n names = rows_df['name'].unique().tolist()\n names = ','.join(names)\n if len(names) > 0:\n return names\n else:\n return np.nan\n\n\ndef removeInside(df):\n d_chr = chrDIC(df)\n df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']],\n row['start'], row['end']), axis=1)\n df2 = df.dropna(subset=['genes_inside'])\n all_names = []\n for i in range(len(df2)):\n names = df2['genes_inside'].iloc[i]\n names = names.split(',')\n all_names = all_names + names\n inside_genes = list(set(all_names))\n l = len(inside_genes)\n print(f'Removing {l} genes that are inside other genes')\n df_inside = pd.DataFrame(inside_genes, columns=['name'])\n df = df[~df['name'].isin(df_inside['name'])].copy()\n del df['genes_inside']\n return df, df_inside\n\n\ndef flattenGTF(file_in, file_type, NEXTFLOW=True):\n if file_type == 'ENSEMBL':\n print(f'Flattening ENSEMBL like genome {file_in}')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['chr'] = df['chr'].astype(str)\n df = df[~df['chr'].str.contains('\\\\.')]\n df.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n fout = f'{file_in[:-4]}_sort.gtf'\n df.to_csv(fout, sep='\\t', index=None, quoting=csv.QUOTE_NONE,\n header=None)\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('gene_id ', '')\n df['name'] = df['name'].str.replace('\"', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'BED':\n my_col = ['chr', 'start', 'end', 'name', 'strand']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n if file_type == 'REFSEQGFF':\n print(f'Flattening REFSEQGFF like genome')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n replace_list = [('chr1', 'NC_000001.11'), ('chr2', 'NC_000002.12'),\n ('chr3', 'NC_000003.12'), ('chr4', 'NC_000004.12'), ('chr5',\n 'NC_000005.10'), ('chr6', 'NC_000006.12'), ('chr7',\n 'NC_000007.14'), ('chr8', 'NC_000008.11'), ('chr9',\n 'NC_000009.12'), ('chr10', 'NC_000010.11'), ('chr11',\n 'NC_000011.10'), ('chr12', 'NC_000012.12'), ('chr13',\n 'NC_000013.11'), ('chr14', 'NC_000014.9'), ('chr15',\n 'NC_000015.10'), ('chr16', 'NC_000016.10'), ('chr17',\n 'NC_000017.11'), ('chr18', 'NC_000018.10'), ('chr19',\n 'NC_000019.10'), ('chr20', 'NC_000020.11'), ('chr21',\n 'NC_000021.9'), ('chr22', 'NC_000022.11'), ('chrX',\n 'NC_000023.11'), ('chrY', 'NC_000024.10')]\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[df['type'] == 'gene'].copy()\n for l in replace_list:\n df['chr'] = np.where(df['chr'] == l[1], l[0], df['chr'])\n df = df[~df['chr'].str.contains('\\\\.')]\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('ID=gene-', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'REFSEQBED':\n my_col = ['chr', 'start', 'end', 'name', 'dot', 'strand', 'start1',\n 'start2', 'dot2', 'dot3', 'gene_id', 'gene_id2']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[['chr', 'start', 'end', 'name', 'strand']]\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n df_plu = df[df['strand'] == '+'].copy()\n df_min = df[df['strand'] == '-'].copy()\n df_plu, df_plu_inside = removeInside(df_plu)\n df_min, df_min_inside = removeInside(df_min)\n df_plu.sort_values(by=['chr', 'end'], inplace=True, ascending=False)\n df_plu.drop_duplicates(subset=['start', 'chr'], keep='first', inplace=True)\n df_min.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n df_min.drop_duplicates(subset=['end', 'chr'], keep='first', inplace=True)\n df = pd.concat([df_plu, df_min])\n df = df.sort_values(by=['chr', 'end'], ascending=False)\n gtf = df[['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']]\n df = df[['chr', 'start', 'end', 'name', 'strand', 'FA_start', 'FA_end',\n 'LA_start', 'LA_end']]\n if NEXTFLOW:\n file_in = os.path.basename(file_in)\n fout = f'{file_in[:-4]}_flat.txt'\n fout2 = f'{file_in[:-4]}_flat.gtf'\n fout3 = f'{file_in[:-4]}_flat_CHROMNAMES.txt'\n print(f'Outputting flat file {fout}')\n df.to_csv(fout, sep='\\t', index=None)\n gtf.to_csv(fout2, sep='\\t', index=None, quoting=csv.QUOTE_NONE, header=None\n )\n gtf_names = gtf[['chr']].copy()\n gtf_names.drop_duplicates(subset=['chr'], keep='first', inplace=True)\n gtf_names.to_csv(fout3, sep='\\t', index=None)\n return df\n\n\n<mask token>\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\n 'Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED'\n )\n parser.add_argument('--annotation_in', action='store', metavar=\n 'annotation_in')\n parser.add_argument('--file_type', action='store', metavar='file_type',\n default='ENSEMBL')\n args = parser.parse_args()\n return args\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef FivePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=True)\n df['FA_start'] = df['gene_start']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['FA_end'] = df_exon['end']\n df_exon = df_exon[['name', 'FA_end']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['FA_length'] = df['FA_end'] - df['FA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\ndef ThreePrimeArea(df):\n df = df.sort_values(by=['chr', 'end'], ascending=False)\n df['LA_end'] = df['gene_end']\n df_exon = df[df['type'] == 'exon'].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'], keep='first')\n df_exon['LA_start'] = df_exon['start']\n df_exon = df_exon[['name', 'LA_start']]\n df = pd.merge(df, df_exon, how='left', on='name')\n df['LA_length'] = df['LA_end'] - df['LA_start']\n df = df.drop_duplicates(subset=['name'], keep='first')\n return df\n\n\ndef getAreas(df):\n \"\"\"\n This function will get the first and last exons for plu and min strand.\n Call it area because not necessarily exon.\n \"\"\"\n df_plu = df[df['strand'] == '+']\n df_min = df[df['strand'] == '-']\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_min_LA = ThreePrimeArea(df_min)[['name', 'LA_start', 'LA_end',\n 'LA_length']]\n df_plu = pd.merge(df_plu_FA, df_plu_LA, on='name')\n df_min = pd.merge(df_min_FA, df_min_LA, on='name')\n df = pd.concat([df_plu, df_min])\n return df\n\n\ndef chrDIC(df):\n \"\"\"This function will take a gtf and return strand specific dictionary of different chrm\"\"\"\n chr_names = df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom: df[df['chr'] == chrom] for chrom in chr_names}\n return d_chr\n\n\ndef countInside(df, start, end):\n rows_df = df[(start < df['start']) & (df['end'] < end)]\n names = rows_df['name'].unique().tolist()\n names = ','.join(names)\n if len(names) > 0:\n return names\n else:\n return np.nan\n\n\ndef removeInside(df):\n d_chr = chrDIC(df)\n df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']],\n row['start'], row['end']), axis=1)\n df2 = df.dropna(subset=['genes_inside'])\n all_names = []\n for i in range(len(df2)):\n names = df2['genes_inside'].iloc[i]\n names = names.split(',')\n all_names = all_names + names\n inside_genes = list(set(all_names))\n l = len(inside_genes)\n print(f'Removing {l} genes that are inside other genes')\n df_inside = pd.DataFrame(inside_genes, columns=['name'])\n df = df[~df['name'].isin(df_inside['name'])].copy()\n del df['genes_inside']\n return df, df_inside\n\n\ndef flattenGTF(file_in, file_type, NEXTFLOW=True):\n if file_type == 'ENSEMBL':\n print(f'Flattening ENSEMBL like genome {file_in}')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['chr'] = df['chr'].astype(str)\n df = df[~df['chr'].str.contains('\\\\.')]\n df.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n fout = f'{file_in[:-4]}_sort.gtf'\n df.to_csv(fout, sep='\\t', index=None, quoting=csv.QUOTE_NONE,\n header=None)\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('gene_id ', '')\n df['name'] = df['name'].str.replace('\"', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'BED':\n my_col = ['chr', 'start', 'end', 'name', 'strand']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n if file_type == 'REFSEQGFF':\n print(f'Flattening REFSEQGFF like genome')\n my_col = ['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']\n replace_list = [('chr1', 'NC_000001.11'), ('chr2', 'NC_000002.12'),\n ('chr3', 'NC_000003.12'), ('chr4', 'NC_000004.12'), ('chr5',\n 'NC_000005.10'), ('chr6', 'NC_000006.12'), ('chr7',\n 'NC_000007.14'), ('chr8', 'NC_000008.11'), ('chr9',\n 'NC_000009.12'), ('chr10', 'NC_000010.11'), ('chr11',\n 'NC_000011.10'), ('chr12', 'NC_000012.12'), ('chr13',\n 'NC_000013.11'), ('chr14', 'NC_000014.9'), ('chr15',\n 'NC_000015.10'), ('chr16', 'NC_000016.10'), ('chr17',\n 'NC_000017.11'), ('chr18', 'NC_000018.10'), ('chr19',\n 'NC_000019.10'), ('chr20', 'NC_000020.11'), ('chr21',\n 'NC_000021.9'), ('chr22', 'NC_000022.11'), ('chrX',\n 'NC_000023.11'), ('chrY', 'NC_000024.10')]\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[df['type'] == 'gene'].copy()\n for l in replace_list:\n df['chr'] = np.where(df['chr'] == l[1], l[0], df['chr'])\n df = df[~df['chr'].str.contains('\\\\.')]\n df['name'] = df['gene_id'].str.split(';', expand=True)[0]\n df['name'] = df['name'].str.replace('ID=gene-', '')\n df['type'] = df['type'].astype(str)\n df_gene = df[df['type'] == 'gene'].copy()\n df_gene['gene_start'] = df_gene['start']\n df_gene['gene_end'] = df_gene['end']\n df_gene = df_gene[['name', 'gene_start', 'gene_end']].copy()\n df = pd.merge(df, df_gene, how='left', on='name')\n df = getAreas(df)\n df['start'] = df['gene_start']\n df['end'] = df['gene_end']\n if file_type == 'REFSEQBED':\n my_col = ['chr', 'start', 'end', 'name', 'dot', 'strand', 'start1',\n 'start2', 'dot2', 'dot3', 'gene_id', 'gene_id2']\n df = pd.read_csv(file_in, sep='\\t', header=None, names=my_col,\n comment='#', low_memory=False)\n df = df[['chr', 'start', 'end', 'name', 'strand']]\n df['FA_start'] = df['start']\n df['FA_end'] = df['end']\n df['LA_start'] = df['start']\n df['LA_end'] = df['end']\n df['dot'] = '.'\n df['dot2'] = '.'\n df['source'] = 'NA'\n df['type'] = 'NA'\n df['gene_id'] = df['name']\n df_plu = df[df['strand'] == '+'].copy()\n df_min = df[df['strand'] == '-'].copy()\n df_plu, df_plu_inside = removeInside(df_plu)\n df_min, df_min_inside = removeInside(df_min)\n df_plu.sort_values(by=['chr', 'end'], inplace=True, ascending=False)\n df_plu.drop_duplicates(subset=['start', 'chr'], keep='first', inplace=True)\n df_min.sort_values(by=['chr', 'start'], inplace=True, ascending=True)\n df_min.drop_duplicates(subset=['end', 'chr'], keep='first', inplace=True)\n df = pd.concat([df_plu, df_min])\n df = df.sort_values(by=['chr', 'end'], ascending=False)\n gtf = df[['chr', 'source', 'type', 'start', 'end', 'dot', 'strand',\n 'dot2', 'gene_id']]\n df = df[['chr', 'start', 'end', 'name', 'strand', 'FA_start', 'FA_end',\n 'LA_start', 'LA_end']]\n if NEXTFLOW:\n file_in = os.path.basename(file_in)\n fout = f'{file_in[:-4]}_flat.txt'\n fout2 = f'{file_in[:-4]}_flat.gtf'\n fout3 = f'{file_in[:-4]}_flat_CHROMNAMES.txt'\n print(f'Outputting flat file {fout}')\n df.to_csv(fout, sep='\\t', index=None)\n gtf.to_csv(fout2, sep='\\t', index=None, quoting=csv.QUOTE_NONE, header=None\n )\n gtf_names = gtf[['chr']].copy()\n gtf_names.drop_duplicates(subset=['chr'], keep='first', inplace=True)\n gtf_names.to_csv(fout3, sep='\\t', index=None)\n return df\n\n\n<mask token>\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser(description=\n 'Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED'\n )\n parser.add_argument('--annotation_in', action='store', metavar=\n 'annotation_in')\n parser.add_argument('--file_type', action='store', metavar='file_type',\n default='ENSEMBL')\n args = parser.parse_args()\n return args\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n file_in = args.annotation_in\n file_type = args.file_type\n flattenGTF(file_in, file_type)\n",
"step-5": "\n#################################################\n### THIS FILE WAS AUTOGENERATED! DO NOT EDIT! ###\n#################################################\n# file to edit: dev_nb/10_DogcatcherFlatten.ipynb\nimport pandas as pd\nimport argparse\nimport csv\nimport os\n\nimport numpy as np\nimport string\n\ndef FivePrimeArea(df):\n df = df.sort_values(by=[\"chr\",\"end\"],ascending=True)\n df[\"FA_start\"] = df[\"gene_start\"]\n df_exon = df[df[\"type\"]==\"exon\"].copy()\n df_exon = df_exon.drop_duplicates(subset=['name'],keep=\"first\")\n df_exon[\"FA_end\"] = df_exon[\"end\"]\n df_exon = df_exon[[\"name\",\"FA_end\"]]\n df = pd.merge(df,df_exon,how=\"left\",on=\"name\")\n df[\"FA_length\"] = df[\"FA_end\"] - df[\"FA_start\"]\n df = df.drop_duplicates(subset=['name'],keep=\"first\")\n return df\n\n\ndef ThreePrimeArea(df):\n df = df.sort_values(by=[\"chr\",\"end\"],ascending=False)\n df[\"LA_end\"] = df[\"gene_end\"]\n df_exon = df[df[\"type\"]==\"exon\"].copy()\n # Keep first exon\n df_exon = df_exon.drop_duplicates(subset=['name'],keep=\"first\")\n df_exon[\"LA_start\"] = df_exon[\"start\"]\n df_exon = df_exon[[\"name\",\"LA_start\"]]\n df = pd.merge(df,df_exon,how=\"left\",on=\"name\")\n df[\"LA_length\"] = df[\"LA_end\"] - df[\"LA_start\"]\n df = df.drop_duplicates(subset=['name'],keep=\"first\")\n return df\n\n\ndef getAreas(df):\n \"\"\"\n This function will get the first and last exons for plu and min strand.\n Call it area because not necessarily exon.\n \"\"\"\n\n df_plu = df[df[\"strand\"]==\"+\"]\n df_min = df[df[\"strand\"]==\"-\"]\n df_plu_FA = FivePrimeArea(df_plu)\n df_min_FA = FivePrimeArea(df_min)\n df_plu_LA = ThreePrimeArea(df_plu)[[\"name\",\"LA_start\",\"LA_end\",\"LA_length\"]]\n df_min_LA = ThreePrimeArea(df_min)[[\"name\",\"LA_start\",\"LA_end\",\"LA_length\"]]\n df_plu = pd.merge(df_plu_FA,df_plu_LA,on=\"name\")\n df_min = pd.merge(df_min_FA,df_min_LA,on=\"name\")\n df = pd.concat([df_plu,df_min])\n return df\n\n\ndef chrDIC(df):\n \"\"\"This function will take a gtf and return strand specific dictionary of different chrm\"\"\"\n chr_names=df['chr'].unique().tolist()\n d_chr = d_gtf_chr = {chrom : df[df[\"chr\"]==chrom] for chrom in chr_names}\n return d_chr\n\ndef countInside(df, start, end):\n rows_df = df[ (start < df[\"start\"]) & (df[\"end\"] < end) ]\n names = rows_df['name'].unique().tolist()\n names = \",\".join(names)\n if len(names) >0:\n return names\n else:\n return np.nan\n\ndef removeInside(df):\n d_chr = chrDIC(df)\n\n df['genes_inside'] = df.apply(lambda row: countInside(d_chr[row['chr']], row[\"start\"], row[\"end\"]), axis=1)\n df2 = df.dropna(subset=['genes_inside'])\n all_names = []\n for i in range(len(df2)):\n names = df2[\"genes_inside\"].iloc[i]\n names = names.split(\",\")\n all_names = all_names + names\n\n inside_genes = list(set(all_names))\n l = len(inside_genes)\n print(f\"Removing {l} genes that are inside other genes\")\n\n df_inside = pd.DataFrame(inside_genes,columns=['name'])\n df = df[~df[\"name\"].isin(df_inside[\"name\"])].copy()\n del df[\"genes_inside\"]\n\n return df, df_inside\n\ndef flattenGTF(file_in,file_type,NEXTFLOW=True):\n if file_type == \"ENSEMBL\":\n print(f\"Flattening ENSEMBL like genome {file_in}\")\n my_col = [\"chr\",\"source\",\"type\",\"start\",\"end\",\"dot\",\"strand\",\"dot2\",\"gene_id\"]\n\n df = pd.read_csv(file_in, sep=\"\\t\",header=None,names=my_col, comment=\"#\",low_memory=False)\n\n df[\"chr\"] = df[\"chr\"].astype(str)\n df = df[~df[\"chr\"].str.contains(\"\\.\") ] # Take out patches\n\n df.sort_values(by=[\"chr\",\"start\"], inplace=True, ascending=True)\n fout = f\"{file_in[:-4]}_sort.gtf\"\n df.to_csv(fout,sep=\"\\t\", index=None,quoting=csv.QUOTE_NONE, header=None)\n\n\n df[\"name\"] = df[\"gene_id\"].str.split(';',expand=True)[0]\n df[\"name\"] = df[\"name\"].str.replace(\"gene_id \",\"\")\n df[\"name\"] = df[\"name\"].str.replace(\"\\\"\",\"\")\n\n df[\"type\"] = df[\"type\"].astype(str)\n\n df_gene = df[df[\"type\"]==\"gene\"].copy()\n df_gene[\"gene_start\"] = df_gene[\"start\"]\n df_gene[\"gene_end\"] = df_gene[\"end\"]\n\n df_gene = df_gene[[\"name\",\"gene_start\",\"gene_end\"]].copy()\n df = pd.merge(df,df_gene,how=\"left\",on=\"name\")\n df = getAreas(df)\n df[\"start\"] = df[\"gene_start\"]\n df[\"end\"] = df[\"gene_end\"]\n# df = df[[\"chr\",\"start\",\"end\",\"strand\",\"name\",\"type\"]].copy()\n\n\n if file_type == \"BED\":\n my_col = [\"chr\",\"start\",\"end\",\"name\",\"strand\"]\n df = pd.read_csv(file_in, sep=\"\\t\",header=None,names=my_col, comment=\"#\",low_memory=False)\n df[\"FA_start\"] = df[\"start\"]\n df[\"FA_end\"] = df[\"end\"]\n df[\"LA_start\"] = df[\"start\"]\n df[\"LA_end\"] = df[\"end\"]\n df[\"dot\"] = \".\"\n df[\"dot2\"] = \".\"\n df[\"source\"] = \"NA\"\n df[\"type\"] = \"NA\"\n df[\"gene_id\"] = df[\"name\"]\n\n\n\n\n if file_type == \"REFSEQGFF\":\n\n # Chrome numbers are changed. Need to change back to chr1 etc.\n# https://www.ncbi.nlm.nih.gov/assembly/GCF_000001405.39#/def_asm_Primary_Assembly\n print(f\"Flattening REFSEQGFF like genome\")\n# https://ftp.ncbi.nlm.nih.gov/genomes/refseq/vertebrate_mammalian/Homo_sapiens/reference/\n #download this GCF_000001405.39_GRCh38.p13_genomic.gtf.gz\n # sort and index in IGV\n# NC_000001.11\tBestRefSeq\tgene\t11874\t14409\t.\t+\t.\tgene_id \"DDX11L1\"; transcript_id \"\"; db_xref \"GeneID:100287102\"; db_xref \"HGNC:HGNC:37102\"; description \"DEAD/H-box helicase 11 like 1 (pseudogene)\"; gbkey \"Gene\"; gene \"DDX11L1\"; gene_biotype \"transcribed_pseudogene\"; pseudo \"true\";\n\n\n\n my_col = [\"chr\",\"source\",\"type\",\"start\",\"end\",\"dot\",\"strand\",\"dot2\",\"gene_id\"]\n\n replace_list = [(\"chr1\",\"NC_000001.11\"),\n (\"chr2\",\"NC_000002.12\"),\n (\"chr3\",\"NC_000003.12\"),\n (\"chr4\",\"NC_000004.12\"),\n (\"chr5\",\"NC_000005.10\"),\n (\"chr6\",\"NC_000006.12\"),\n (\"chr7\",\"NC_000007.14\"),\n (\"chr8\",\"NC_000008.11\"),\n (\"chr9\",\"NC_000009.12\"),\n (\"chr10\",\"NC_000010.11\"),\n (\"chr11\",\"NC_000011.10\"),\n (\"chr12\",\"NC_000012.12\"),\n (\"chr13\",\"NC_000013.11\"),\n (\"chr14\",\"NC_000014.9\"),\n (\"chr15\",\"NC_000015.10\"),\n (\"chr16\",\"NC_000016.10\"),\n (\"chr17\",\"NC_000017.11\"),\n (\"chr18\",\"NC_000018.10\"),\n (\"chr19\",\"NC_000019.10\"),\n (\"chr20\",\"NC_000020.11\"),\n (\"chr21\",\"NC_000021.9\"),\n (\"chr22\",\"NC_000022.11\"),\n (\"chrX\",\"NC_000023.11\"),\n (\"chrY\",\"NC_000024.10\")]\n\n\n df = pd.read_csv(file_in, sep=\"\\t\",header=None,names=my_col, comment=\"#\",low_memory=False)\n\n df = df[df[\"type\"]==\"gene\"].copy()\n\n # Change NC names to chr\n for l in replace_list:\n df[\"chr\"] = np.where(df[\"chr\"]==l[1],l[0],df[\"chr\"])\n\n df = df[~df[\"chr\"].str.contains(\"\\.\") ] # Take out patches\n\n\n df[\"name\"] = df[\"gene_id\"].str.split(';',expand=True)[0]\n df[\"name\"] = df[\"name\"].str.replace(\"ID=gene-\",\"\")\n\n df[\"type\"] = df[\"type\"].astype(str)\n\n df_gene = df[df[\"type\"]==\"gene\"].copy()\n df_gene[\"gene_start\"] = df_gene[\"start\"]\n df_gene[\"gene_end\"] = df_gene[\"end\"]\n\n df_gene = df_gene[[\"name\",\"gene_start\",\"gene_end\"]].copy()\n df = pd.merge(df,df_gene,how=\"left\",on=\"name\")\n df = getAreas(df)\n df[\"start\"] = df[\"gene_start\"]\n df[\"end\"] = df[\"gene_end\"]\n# df = df[[\"chr\",\"start\",\"end\",\"strand\",\"name\",\"type\"]].copy()\n\n\n\n\n\n\n\n if file_type == \"REFSEQBED\":\n\n# chr1\t11873\t14409\tNR_046018\t0\t+\t\n# 14409\t14409\t0\t3\t354,109,1189,\t0,739,1347,\n\n\n my_col = [\"chr\",\"start\",\"end\",\"name\",\"dot\",\"strand\",\"start1\",\"start2\",\"dot2\",\"dot3\",\"gene_id\",\"gene_id2\"]\n\n df = pd.read_csv(file_in, sep=\"\\t\",header=None,names=my_col, comment=\"#\",low_memory=False)\n df = df[[\"chr\",\"start\",\"end\",\"name\",\"strand\"]]\n df[\"FA_start\"] = df[\"start\"]\n df[\"FA_end\"] = df[\"end\"]\n df[\"LA_start\"] = df[\"start\"]\n df[\"LA_end\"] = df[\"end\"]\n df[\"dot\"] = \".\"\n df[\"dot2\"] = \".\"\n df[\"source\"] = \"NA\"\n df[\"type\"] = \"NA\"\n df[\"gene_id\"] = df[\"name\"]\n\n\n\n df_plu = df[df[\"strand\"]==\"+\"].copy()\n df_min = df[df[\"strand\"]==\"-\"].copy()\n\n df_plu, df_plu_inside = removeInside(df_plu)\n df_min, df_min_inside = removeInside(df_min)\n\n df_plu.sort_values(by=[\"chr\",\"end\"], inplace=True, ascending=False)\n df_plu.drop_duplicates(subset=[\"start\",\"chr\"], keep='first', inplace=True)\n\n df_min.sort_values(by=[\"chr\",\"start\"], inplace=True, ascending=True)\n df_min.drop_duplicates(subset=[\"end\",\"chr\"], keep='first', inplace=True)\n\n\n df = pd.concat([df_plu,df_min])\n df = df.sort_values(by=[\"chr\",\"end\"],ascending=False)\n\n\n gtf = df[[\"chr\",\"source\",\"type\",\"start\",\"end\",\"dot\",\"strand\",\"dot2\",\"gene_id\"] ]\n df = df[[\"chr\",\"start\",\"end\",\"name\",\"strand\",\"FA_start\",\"FA_end\",\"LA_start\",\"LA_end\"]]\n\n\n if NEXTFLOW:\n file_in = os.path.basename(file_in)\n\n fout = f\"{file_in[:-4]}_flat.txt\"\n fout2 = f\"{file_in[:-4]}_flat.gtf\"\n fout3 = f\"{file_in[:-4]}_flat_CHROMNAMES.txt\"\n\n\n\n print(f\"Outputting flat file {fout}\")\n df.to_csv(fout,sep=\"\\t\",index=None)\n\n\n gtf.to_csv(fout2,sep=\"\\t\", index=None,quoting=csv.QUOTE_NONE, header=None)\n\n gtf_names = gtf[[\"chr\"]].copy()\n gtf_names.drop_duplicates(subset=[\"chr\"], keep='first', inplace=True)\n\n gtf_names.to_csv(fout3,sep=\"\\t\", index=None)\n\n return df\n\n\nimport argparse\ndef parse_arguments():\n parser = argparse.ArgumentParser(description='Flatten gtf or bed to first and last exon file. Options in currently are ENSEMBL, BED')\n parser.add_argument('--annotation_in', action= 'store', metavar='annotation_in')\n parser.add_argument('--file_type', action= 'store', metavar='file_type',default=\"ENSEMBL\")\n args = parser.parse_args()\n return args\n\nif __name__==\"__main__\":\n args = parse_arguments()\n file_in = args.annotation_in\n file_type = args.file_type\n\n flattenGTF(file_in,file_type)\n",
"step-ids": [
4,
6,
8,
9,
11
]
}
|
[
4,
6,
8,
9,
11
] |
from collections import deque
s = list(input().upper())
new = list(set(s)) # 중복 제거 한 알파벳 리스트로 카운트 해줘야 시간초과 안남
n = {}
for i in new:
n[i] = s.count(i)
cnt = deque()
for k, v in n.items():
cnt.append(v)
if cnt.count(max(cnt)) >1:
print('?')
else:
print(max(n, key=n.get))
|
normal
|
{
"blob_id": "5dcb20f52b5041d5f9ea028b383e0f2f10104af9",
"index": 9486,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in new:\n n[i] = s.count(i)\n<mask token>\nfor k, v in n.items():\n cnt.append(v)\nif cnt.count(max(cnt)) > 1:\n print('?')\nelse:\n print(max(n, key=n.get))\n",
"step-3": "<mask token>\ns = list(input().upper())\nnew = list(set(s))\nn = {}\nfor i in new:\n n[i] = s.count(i)\ncnt = deque()\nfor k, v in n.items():\n cnt.append(v)\nif cnt.count(max(cnt)) > 1:\n print('?')\nelse:\n print(max(n, key=n.get))\n",
"step-4": "from collections import deque\ns = list(input().upper())\nnew = list(set(s))\nn = {}\nfor i in new:\n n[i] = s.count(i)\ncnt = deque()\nfor k, v in n.items():\n cnt.append(v)\nif cnt.count(max(cnt)) > 1:\n print('?')\nelse:\n print(max(n, key=n.get))\n",
"step-5": "from collections import deque\ns = list(input().upper())\nnew = list(set(s)) # 중복 제거 한 알파벳 리스트로 카운트 해줘야 시간초과 안남\nn = {}\nfor i in new:\n n[i] = s.count(i)\n\ncnt = deque()\nfor k, v in n.items():\n cnt.append(v)\n\nif cnt.count(max(cnt)) >1:\n print('?')\nelse:\n print(max(n, key=n.get))\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from datetime import datetime
import pytz
from pytz import timezone
##PDXtime = datetime.now()
##print(PDXtime.hour)
##
##NYCtime = PDXtime.hour + 3
##print(NYCtime)
##
##Londontime = PDXtime.hour + 8
##print(Londontime)
Londontz = timezone('Europe/London')
Londonlocaltime = datetime.now(Londontz)
print(Londonlocaltime)
print(Londonlocaltime.strftime('%H')) #just the hour in 24 hr format
PDXtz = timezone('America/Los_Angeles')
PDXlocaltime = datetime.now(PDXtz)
print(PDXlocaltime)
print(PDXlocaltime.strftime('%H'))
NYCtz = timezone('America/New_York')
NYClocaltime = datetime.now(NYCtz)
print(NYClocaltime)
print(NYClocaltime.strftime('%H'))
|
normal
|
{
"blob_id": "d8cfd9de95e1f47fc41a5389f5137b4af90dc0f1",
"index": 3949,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H'))\n<mask token>\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\n<mask token>\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-3": "<mask token>\nLondontz = timezone('Europe/London')\nLondonlocaltime = datetime.now(Londontz)\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H'))\nPDXtz = timezone('America/Los_Angeles')\nPDXlocaltime = datetime.now(PDXtz)\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\nNYCtz = timezone('America/New_York')\nNYClocaltime = datetime.now(NYCtz)\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-4": "from datetime import datetime\nimport pytz\nfrom pytz import timezone\nLondontz = timezone('Europe/London')\nLondonlocaltime = datetime.now(Londontz)\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H'))\nPDXtz = timezone('America/Los_Angeles')\nPDXlocaltime = datetime.now(PDXtz)\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\nNYCtz = timezone('America/New_York')\nNYClocaltime = datetime.now(NYCtz)\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-5": "from datetime import datetime\nimport pytz\nfrom pytz import timezone \n\n\n\n##PDXtime = datetime.now()\n##print(PDXtime.hour)\n##\n##NYCtime = PDXtime.hour + 3\n##print(NYCtime)\n##\n##Londontime = PDXtime.hour + 8\n##print(Londontime)\n\n\n\nLondontz = timezone('Europe/London')\nLondonlocaltime = datetime.now(Londontz)\nprint(Londonlocaltime)\nprint(Londonlocaltime.strftime('%H')) #just the hour in 24 hr format\n\n\nPDXtz = timezone('America/Los_Angeles')\nPDXlocaltime = datetime.now(PDXtz)\nprint(PDXlocaltime)\nprint(PDXlocaltime.strftime('%H'))\n\nNYCtz = timezone('America/New_York')\nNYClocaltime = datetime.now(NYCtz)\nprint(NYClocaltime)\nprint(NYClocaltime.strftime('%H'))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from collections import OrderedDict
import copy
import numpy as np
from scipy.optimize import curve_fit
from ... import Operation as opmod
from ...Operation import Operation
from ....tools import saxstools
class SpectrumFit(Operation):
"""
Use a measured SAXS spectrum (I(q) vs. q),
to optimize the parameters of a theoretical SAXS spectrum
for one or several populations of scatterers.
Works by minimizing an objective function that compares
the measured spectrum against the theoretical result.
TODO: document the algorithm here.
Input arrays of q and I(q),
a string indicating choice of objective function,
a dict of features describing the spectrum,
and a list of strings indicating which keys in the dict
should be used as optimization parameters.
The input features dict includes initial fit parameters
as well as the flags indicating which populations to include.
The features dict is of the same format as
SpectrumProfiler and SpectrumParameterization outputs.
Outputs a return code and the features dict,
with entries updated for the optimized parameters.
Also returns the theoretical result for I(q),
and a renormalized measured spectrum for visual comparison.
"""
def __init__(self):
input_names = ['q','I','flags','params','fit_params','objfun']
output_names = ['params','q_I_opt']
super(SpectrumFit, self).__init__(input_names, output_names)
self.input_doc['q'] = '1d array of wave vector values in 1/Angstrom units'
self.input_doc['I'] = '1d array of intensity values I(q)'
self.input_doc['flags'] = 'dict of flags indicating what populations to fit'
self.input_doc['params'] = 'dict of initial values for the scattering equation parameters '\
'for each of the populations specified in the input flags'
self.input_doc['fit_params'] = 'list of strings (keys) indicating which parameters to optimize'
self.input_doc['objfun'] = 'string indicating objective function for optimization: '\
+ 'see documentation of saxstools.fit_spectrum() for supported objective functions'
self.output_doc['params'] = 'dict of scattering equation parameters copied from inputs, '\
'with values optimized for all keys specified in fit_params'
self.output_doc['q_I_opt'] = 'n-by-2 array of q and the optimized computed intensity spectrum'
self.input_type['q'] = opmod.workflow_item
self.input_type['I'] = opmod.workflow_item
self.input_type['flags'] = opmod.workflow_item
self.input_type['params'] = opmod.workflow_item
self.inputs['objfun'] = 'chi2log'
def run(self):
f = self.inputs['flags']
if f['bad_data'] or not any([f['precursor_scattering'],f['form_factor_scattering'],f['diffraction_peaks']]):
self.outputs['params'] = {}
return
if f['diffraction_peaks']:
self.outputs['params'] = {'ERROR_MESSAGE':'diffraction peak fitting not yet supported'}
return
q, I = self.inputs['q'], self.inputs['I']
m = self.inputs['objfun']
p = self.inputs['params']
fitkeys = self.inputs['fit_params']
#p_opt = copy.deepcopy(p)
# Set up constraints as needed
c = []
if f['form_factor_scattering'] or f['diffraction_peaks']:
c = ['fix_I0']
# Fitting happens here
p_opt = saxstools.fit_spectrum(q,I,m,f,p,fitkeys,c)
I_opt = saxstools.compute_saxs(q,f,p_opt)
nz = ((I>0)&(I_opt>0))
logI_nz = np.log(I[nz])
logIopt_nz = np.log(I_opt[nz])
Imean = np.mean(logI_nz)
Istd = np.std(logI_nz)
logI_nz_s = (logI_nz - Imean) / Istd
logIopt_nz_s = (logIopt_nz - Imean) / Istd
f['R2log_fit'] = saxstools.compute_Rsquared(np.log(I[nz]),np.log(I_opt[nz]))
f['chi2log_fit'] = saxstools.compute_chi2(logI_nz_s,logIopt_nz_s)
q_I_opt = np.array([q,I_opt]).T
self.outputs['features'] = f
self.outputs['q_I_opt'] = q_I_opt
|
normal
|
{
"blob_id": "7b5713c9a5afa911df1c2939751de30412162f15",
"index": 446,
"step-1": "<mask token>\n\n\nclass SpectrumFit(Operation):\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass SpectrumFit(Operation):\n <mask token>\n\n def __init__(self):\n input_names = ['q', 'I', 'flags', 'params', 'fit_params', 'objfun']\n output_names = ['params', 'q_I_opt']\n super(SpectrumFit, self).__init__(input_names, output_names)\n self.input_doc['q'\n ] = '1d array of wave vector values in 1/Angstrom units'\n self.input_doc['I'] = '1d array of intensity values I(q)'\n self.input_doc['flags'\n ] = 'dict of flags indicating what populations to fit'\n self.input_doc['params'] = (\n 'dict of initial values for the scattering equation parameters for each of the populations specified in the input flags'\n )\n self.input_doc['fit_params'\n ] = 'list of strings (keys) indicating which parameters to optimize'\n self.input_doc['objfun'] = (\n 'string indicating objective function for optimization: ' +\n 'see documentation of saxstools.fit_spectrum() for supported objective functions'\n )\n self.output_doc['params'] = (\n 'dict of scattering equation parameters copied from inputs, with values optimized for all keys specified in fit_params'\n )\n self.output_doc['q_I_opt'\n ] = 'n-by-2 array of q and the optimized computed intensity spectrum'\n self.input_type['q'] = opmod.workflow_item\n self.input_type['I'] = opmod.workflow_item\n self.input_type['flags'] = opmod.workflow_item\n self.input_type['params'] = opmod.workflow_item\n self.inputs['objfun'] = 'chi2log'\n\n def run(self):\n f = self.inputs['flags']\n if f['bad_data'] or not any([f['precursor_scattering'], f[\n 'form_factor_scattering'], f['diffraction_peaks']]):\n self.outputs['params'] = {}\n return\n if f['diffraction_peaks']:\n self.outputs['params'] = {'ERROR_MESSAGE':\n 'diffraction peak fitting not yet supported'}\n return\n q, I = self.inputs['q'], self.inputs['I']\n m = self.inputs['objfun']\n p = self.inputs['params']\n fitkeys = self.inputs['fit_params']\n c = []\n if f['form_factor_scattering'] or f['diffraction_peaks']:\n c = ['fix_I0']\n p_opt = saxstools.fit_spectrum(q, I, m, f, p, fitkeys, c)\n I_opt = saxstools.compute_saxs(q, f, p_opt)\n nz = (I > 0) & (I_opt > 0)\n logI_nz = np.log(I[nz])\n logIopt_nz = np.log(I_opt[nz])\n Imean = np.mean(logI_nz)\n Istd = np.std(logI_nz)\n logI_nz_s = (logI_nz - Imean) / Istd\n logIopt_nz_s = (logIopt_nz - Imean) / Istd\n f['R2log_fit'] = saxstools.compute_Rsquared(np.log(I[nz]), np.log(\n I_opt[nz]))\n f['chi2log_fit'] = saxstools.compute_chi2(logI_nz_s, logIopt_nz_s)\n q_I_opt = np.array([q, I_opt]).T\n self.outputs['features'] = f\n self.outputs['q_I_opt'] = q_I_opt\n",
"step-3": "<mask token>\n\n\nclass SpectrumFit(Operation):\n \"\"\"\n Use a measured SAXS spectrum (I(q) vs. q),\n to optimize the parameters of a theoretical SAXS spectrum\n for one or several populations of scatterers. \n Works by minimizing an objective function that compares\n the measured spectrum against the theoretical result.\n TODO: document the algorithm here.\n\n Input arrays of q and I(q), \n a string indicating choice of objective function, \n a dict of features describing the spectrum,\n and a list of strings indicating which keys in the dict \n should be used as optimization parameters.\n The input features dict includes initial fit parameters\n as well as the flags indicating which populations to include.\n The features dict is of the same format as\n SpectrumProfiler and SpectrumParameterization outputs.\n\n Outputs a return code and the features dict,\n with entries updated for the optimized parameters.\n Also returns the theoretical result for I(q),\n and a renormalized measured spectrum for visual comparison.\n \"\"\"\n\n def __init__(self):\n input_names = ['q', 'I', 'flags', 'params', 'fit_params', 'objfun']\n output_names = ['params', 'q_I_opt']\n super(SpectrumFit, self).__init__(input_names, output_names)\n self.input_doc['q'\n ] = '1d array of wave vector values in 1/Angstrom units'\n self.input_doc['I'] = '1d array of intensity values I(q)'\n self.input_doc['flags'\n ] = 'dict of flags indicating what populations to fit'\n self.input_doc['params'] = (\n 'dict of initial values for the scattering equation parameters for each of the populations specified in the input flags'\n )\n self.input_doc['fit_params'\n ] = 'list of strings (keys) indicating which parameters to optimize'\n self.input_doc['objfun'] = (\n 'string indicating objective function for optimization: ' +\n 'see documentation of saxstools.fit_spectrum() for supported objective functions'\n )\n self.output_doc['params'] = (\n 'dict of scattering equation parameters copied from inputs, with values optimized for all keys specified in fit_params'\n )\n self.output_doc['q_I_opt'\n ] = 'n-by-2 array of q and the optimized computed intensity spectrum'\n self.input_type['q'] = opmod.workflow_item\n self.input_type['I'] = opmod.workflow_item\n self.input_type['flags'] = opmod.workflow_item\n self.input_type['params'] = opmod.workflow_item\n self.inputs['objfun'] = 'chi2log'\n\n def run(self):\n f = self.inputs['flags']\n if f['bad_data'] or not any([f['precursor_scattering'], f[\n 'form_factor_scattering'], f['diffraction_peaks']]):\n self.outputs['params'] = {}\n return\n if f['diffraction_peaks']:\n self.outputs['params'] = {'ERROR_MESSAGE':\n 'diffraction peak fitting not yet supported'}\n return\n q, I = self.inputs['q'], self.inputs['I']\n m = self.inputs['objfun']\n p = self.inputs['params']\n fitkeys = self.inputs['fit_params']\n c = []\n if f['form_factor_scattering'] or f['diffraction_peaks']:\n c = ['fix_I0']\n p_opt = saxstools.fit_spectrum(q, I, m, f, p, fitkeys, c)\n I_opt = saxstools.compute_saxs(q, f, p_opt)\n nz = (I > 0) & (I_opt > 0)\n logI_nz = np.log(I[nz])\n logIopt_nz = np.log(I_opt[nz])\n Imean = np.mean(logI_nz)\n Istd = np.std(logI_nz)\n logI_nz_s = (logI_nz - Imean) / Istd\n logIopt_nz_s = (logIopt_nz - Imean) / Istd\n f['R2log_fit'] = saxstools.compute_Rsquared(np.log(I[nz]), np.log(\n I_opt[nz]))\n f['chi2log_fit'] = saxstools.compute_chi2(logI_nz_s, logIopt_nz_s)\n q_I_opt = np.array([q, I_opt]).T\n self.outputs['features'] = f\n self.outputs['q_I_opt'] = q_I_opt\n",
"step-4": "from collections import OrderedDict\nimport copy\nimport numpy as np\nfrom scipy.optimize import curve_fit\nfrom ... import Operation as opmod\nfrom ...Operation import Operation\nfrom ....tools import saxstools\n\n\nclass SpectrumFit(Operation):\n \"\"\"\n Use a measured SAXS spectrum (I(q) vs. q),\n to optimize the parameters of a theoretical SAXS spectrum\n for one or several populations of scatterers. \n Works by minimizing an objective function that compares\n the measured spectrum against the theoretical result.\n TODO: document the algorithm here.\n\n Input arrays of q and I(q), \n a string indicating choice of objective function, \n a dict of features describing the spectrum,\n and a list of strings indicating which keys in the dict \n should be used as optimization parameters.\n The input features dict includes initial fit parameters\n as well as the flags indicating which populations to include.\n The features dict is of the same format as\n SpectrumProfiler and SpectrumParameterization outputs.\n\n Outputs a return code and the features dict,\n with entries updated for the optimized parameters.\n Also returns the theoretical result for I(q),\n and a renormalized measured spectrum for visual comparison.\n \"\"\"\n\n def __init__(self):\n input_names = ['q', 'I', 'flags', 'params', 'fit_params', 'objfun']\n output_names = ['params', 'q_I_opt']\n super(SpectrumFit, self).__init__(input_names, output_names)\n self.input_doc['q'\n ] = '1d array of wave vector values in 1/Angstrom units'\n self.input_doc['I'] = '1d array of intensity values I(q)'\n self.input_doc['flags'\n ] = 'dict of flags indicating what populations to fit'\n self.input_doc['params'] = (\n 'dict of initial values for the scattering equation parameters for each of the populations specified in the input flags'\n )\n self.input_doc['fit_params'\n ] = 'list of strings (keys) indicating which parameters to optimize'\n self.input_doc['objfun'] = (\n 'string indicating objective function for optimization: ' +\n 'see documentation of saxstools.fit_spectrum() for supported objective functions'\n )\n self.output_doc['params'] = (\n 'dict of scattering equation parameters copied from inputs, with values optimized for all keys specified in fit_params'\n )\n self.output_doc['q_I_opt'\n ] = 'n-by-2 array of q and the optimized computed intensity spectrum'\n self.input_type['q'] = opmod.workflow_item\n self.input_type['I'] = opmod.workflow_item\n self.input_type['flags'] = opmod.workflow_item\n self.input_type['params'] = opmod.workflow_item\n self.inputs['objfun'] = 'chi2log'\n\n def run(self):\n f = self.inputs['flags']\n if f['bad_data'] or not any([f['precursor_scattering'], f[\n 'form_factor_scattering'], f['diffraction_peaks']]):\n self.outputs['params'] = {}\n return\n if f['diffraction_peaks']:\n self.outputs['params'] = {'ERROR_MESSAGE':\n 'diffraction peak fitting not yet supported'}\n return\n q, I = self.inputs['q'], self.inputs['I']\n m = self.inputs['objfun']\n p = self.inputs['params']\n fitkeys = self.inputs['fit_params']\n c = []\n if f['form_factor_scattering'] or f['diffraction_peaks']:\n c = ['fix_I0']\n p_opt = saxstools.fit_spectrum(q, I, m, f, p, fitkeys, c)\n I_opt = saxstools.compute_saxs(q, f, p_opt)\n nz = (I > 0) & (I_opt > 0)\n logI_nz = np.log(I[nz])\n logIopt_nz = np.log(I_opt[nz])\n Imean = np.mean(logI_nz)\n Istd = np.std(logI_nz)\n logI_nz_s = (logI_nz - Imean) / Istd\n logIopt_nz_s = (logIopt_nz - Imean) / Istd\n f['R2log_fit'] = saxstools.compute_Rsquared(np.log(I[nz]), np.log(\n I_opt[nz]))\n f['chi2log_fit'] = saxstools.compute_chi2(logI_nz_s, logIopt_nz_s)\n q_I_opt = np.array([q, I_opt]).T\n self.outputs['features'] = f\n self.outputs['q_I_opt'] = q_I_opt\n",
"step-5": "from collections import OrderedDict\nimport copy\n\nimport numpy as np\nfrom scipy.optimize import curve_fit\n\nfrom ... import Operation as opmod \nfrom ...Operation import Operation\nfrom ....tools import saxstools\n\nclass SpectrumFit(Operation):\n \"\"\"\n Use a measured SAXS spectrum (I(q) vs. q),\n to optimize the parameters of a theoretical SAXS spectrum\n for one or several populations of scatterers. \n Works by minimizing an objective function that compares\n the measured spectrum against the theoretical result.\n TODO: document the algorithm here.\n\n Input arrays of q and I(q), \n a string indicating choice of objective function, \n a dict of features describing the spectrum,\n and a list of strings indicating which keys in the dict \n should be used as optimization parameters.\n The input features dict includes initial fit parameters\n as well as the flags indicating which populations to include.\n The features dict is of the same format as\n SpectrumProfiler and SpectrumParameterization outputs.\n\n Outputs a return code and the features dict,\n with entries updated for the optimized parameters.\n Also returns the theoretical result for I(q),\n and a renormalized measured spectrum for visual comparison.\n \"\"\"\n\n def __init__(self):\n input_names = ['q','I','flags','params','fit_params','objfun']\n output_names = ['params','q_I_opt']\n super(SpectrumFit, self).__init__(input_names, output_names)\n self.input_doc['q'] = '1d array of wave vector values in 1/Angstrom units'\n self.input_doc['I'] = '1d array of intensity values I(q)'\n self.input_doc['flags'] = 'dict of flags indicating what populations to fit'\n self.input_doc['params'] = 'dict of initial values for the scattering equation parameters '\\\n 'for each of the populations specified in the input flags'\n self.input_doc['fit_params'] = 'list of strings (keys) indicating which parameters to optimize'\n self.input_doc['objfun'] = 'string indicating objective function for optimization: '\\\n + 'see documentation of saxstools.fit_spectrum() for supported objective functions'\n self.output_doc['params'] = 'dict of scattering equation parameters copied from inputs, '\\\n 'with values optimized for all keys specified in fit_params'\n self.output_doc['q_I_opt'] = 'n-by-2 array of q and the optimized computed intensity spectrum'\n self.input_type['q'] = opmod.workflow_item\n self.input_type['I'] = opmod.workflow_item\n self.input_type['flags'] = opmod.workflow_item\n self.input_type['params'] = opmod.workflow_item\n self.inputs['objfun'] = 'chi2log' \n\n def run(self):\n f = self.inputs['flags']\n if f['bad_data'] or not any([f['precursor_scattering'],f['form_factor_scattering'],f['diffraction_peaks']]):\n self.outputs['params'] = {} \n return\n if f['diffraction_peaks']:\n self.outputs['params'] = {'ERROR_MESSAGE':'diffraction peak fitting not yet supported'}\n return\n q, I = self.inputs['q'], self.inputs['I']\n m = self.inputs['objfun']\n p = self.inputs['params']\n fitkeys = self.inputs['fit_params']\n #p_opt = copy.deepcopy(p)\n\n # Set up constraints as needed\n c = []\n if f['form_factor_scattering'] or f['diffraction_peaks']:\n c = ['fix_I0']\n\n # Fitting happens here\n p_opt = saxstools.fit_spectrum(q,I,m,f,p,fitkeys,c)\n\n I_opt = saxstools.compute_saxs(q,f,p_opt)\n\n nz = ((I>0)&(I_opt>0))\n logI_nz = np.log(I[nz])\n logIopt_nz = np.log(I_opt[nz])\n Imean = np.mean(logI_nz)\n Istd = np.std(logI_nz)\n logI_nz_s = (logI_nz - Imean) / Istd\n logIopt_nz_s = (logIopt_nz - Imean) / Istd\n f['R2log_fit'] = saxstools.compute_Rsquared(np.log(I[nz]),np.log(I_opt[nz]))\n f['chi2log_fit'] = saxstools.compute_chi2(logI_nz_s,logIopt_nz_s)\n\n q_I_opt = np.array([q,I_opt]).T\n self.outputs['features'] = f \n self.outputs['q_I_opt'] = q_I_opt\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
month = ['Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb',
'Mar', 'Apr', 'May']
df = pd.DataFrame([[53, 0, 5, 3, 3], [51, 0, 1, 3, 2], [70, 4, 7, 5, 1], [
66, 4, 1, 4, 2], [64, 4, 4, 3, 2], [69, 4, 7, 8, 2], [45, 2, 8, 4, 2],
[29, 1, 6, 6, 1], [56, 4, 4, 2, 2], [41, 2, 2, 2, 1], [3, 0, 0, 0, 0],
[8, 0, 0, 0, 0]], columns=['1000', '2000', '3000', '4000', '5000'],
index=month)
plt.pcolor(df)
plt.colorbar()
plt.yticks(np.arange(0.5, len(df.index), 1), df.index)
plt.xticks(np.arange(0.5, len(df.columns), 1), df.columns)
plt.show()
|
normal
|
{
"blob_id": "f5c277da2b22debe26327464ae736892360059b4",
"index": 781,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.pcolor(df)\nplt.colorbar()\nplt.yticks(np.arange(0.5, len(df.index), 1), df.index)\nplt.xticks(np.arange(0.5, len(df.columns), 1), df.columns)\nplt.show()\n",
"step-3": "<mask token>\nmonth = ['Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb',\n 'Mar', 'Apr', 'May']\ndf = pd.DataFrame([[53, 0, 5, 3, 3], [51, 0, 1, 3, 2], [70, 4, 7, 5, 1], [\n 66, 4, 1, 4, 2], [64, 4, 4, 3, 2], [69, 4, 7, 8, 2], [45, 2, 8, 4, 2],\n [29, 1, 6, 6, 1], [56, 4, 4, 2, 2], [41, 2, 2, 2, 1], [3, 0, 0, 0, 0],\n [8, 0, 0, 0, 0]], columns=['1000', '2000', '3000', '4000', '5000'],\n index=month)\nplt.pcolor(df)\nplt.colorbar()\nplt.yticks(np.arange(0.5, len(df.index), 1), df.index)\nplt.xticks(np.arange(0.5, len(df.columns), 1), df.columns)\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nmonth = ['Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'Jan', 'Feb',\n 'Mar', 'Apr', 'May']\ndf = pd.DataFrame([[53, 0, 5, 3, 3], [51, 0, 1, 3, 2], [70, 4, 7, 5, 1], [\n 66, 4, 1, 4, 2], [64, 4, 4, 3, 2], [69, 4, 7, 8, 2], [45, 2, 8, 4, 2],\n [29, 1, 6, 6, 1], [56, 4, 4, 2, 2], [41, 2, 2, 2, 1], [3, 0, 0, 0, 0],\n [8, 0, 0, 0, 0]], columns=['1000', '2000', '3000', '4000', '5000'],\n index=month)\nplt.pcolor(df)\nplt.colorbar()\nplt.yticks(np.arange(0.5, len(df.index), 1), df.index)\nplt.xticks(np.arange(0.5, len(df.columns), 1), df.columns)\nplt.show()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- encoding:utf-8 -*-
import os
import unittest
from HTMLTestRunner_cn import HTMLTestRunner
from time import sleep
from framework.SunFlower import SunFlower
from testcase.TestCRM import TestCRM
class TestCRMcreateCustomer(TestCRM):
# 创建客户
def createCustomer(self):
# 点击客户图标
self.driver.click("text= 客户 ")
# 点击添加客户按钮
self.driver.click("text=sYVInwAAAABJRU5ErkJggg==")
#输入客户名称
self.driver.send_keys("xpath=//*[@text=\"请输入\"][1]","crm000001")
#输入客户编号
self.driver.send_keys("xpath=//*[@text=\"请输入\"][1]","c000001")
#选择客户信息来源
self.driver.click_index("class=android.view.View",59)
self.driver.click("text=电话营销")
#保存
self.driver.click("text=保存")
#点击返回
self.driver.click_index("class=android.view.View",10)
# sleep(5)
# # # 向上滑动屏幕
# # self.driver.swipe_up(n=3)
def test_weiChat(self):
self.login()
self.createCustomer()
self.logout()
if __name__ == "__main__":
report_path = os.path.dirname(__file__) + "/report/" + "TestCRM_report.html"
suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)
runer = HTMLTestRunner(title="悟空CRM测试报告", description="登录", stream=open(report_path, "wb"),
verbosity=2, retry=0, save_last_try=True)
runer.run(suite)
|
normal
|
{
"blob_id": "74bc530d53cd86c52c44ba8e98d4d8f502032340",
"index": 2423,
"step-1": "<mask token>\n\n\nclass TestCRMcreateCustomer(TestCRM):\n <mask token>\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n def createCustomer(self):\n self.driver.click('text= 客户 ')\n self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'crm000001')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'c000001')\n self.driver.click_index('class=android.view.View', 59)\n self.driver.click('text=电话营销')\n self.driver.click('text=保存')\n self.driver.click_index('class=android.view.View', 10)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n def createCustomer(self):\n self.driver.click('text= 客户 ')\n self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'crm000001')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'c000001')\n self.driver.click_index('class=android.view.View', 59)\n self.driver.click('text=电话营销')\n self.driver.click('text=保存')\n self.driver.click_index('class=android.view.View', 10)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\nif __name__ == '__main__':\n report_path = os.path.dirname(__file__\n ) + '/report/' + 'TestCRM_report.html'\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)\n runer = HTMLTestRunner(title='悟空CRM测试报告', description='登录', stream=open\n (report_path, 'wb'), verbosity=2, retry=0, save_last_try=True)\n runer.run(suite)\n",
"step-4": "import os\nimport unittest\nfrom HTMLTestRunner_cn import HTMLTestRunner\nfrom time import sleep\nfrom framework.SunFlower import SunFlower\nfrom testcase.TestCRM import TestCRM\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n def createCustomer(self):\n self.driver.click('text= 客户 ')\n self.driver.click('text=sYVInwAAAABJRU5ErkJggg==')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'crm000001')\n self.driver.send_keys('xpath=//*[@text=\"请输入\"][1]', 'c000001')\n self.driver.click_index('class=android.view.View', 59)\n self.driver.click('text=电话营销')\n self.driver.click('text=保存')\n self.driver.click_index('class=android.view.View', 10)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\nif __name__ == '__main__':\n report_path = os.path.dirname(__file__\n ) + '/report/' + 'TestCRM_report.html'\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)\n runer = HTMLTestRunner(title='悟空CRM测试报告', description='登录', stream=open\n (report_path, 'wb'), verbosity=2, retry=0, save_last_try=True)\n runer.run(suite)\n",
"step-5": "# -*- encoding:utf-8 -*-\nimport os\nimport unittest\nfrom HTMLTestRunner_cn import HTMLTestRunner\nfrom time import sleep\n\nfrom framework.SunFlower import SunFlower\nfrom testcase.TestCRM import TestCRM\n\n\nclass TestCRMcreateCustomer(TestCRM):\n\n # 创建客户\n def createCustomer(self):\n\n # 点击客户图标\n self.driver.click(\"text= 客户 \")\n # 点击添加客户按钮\n self.driver.click(\"text=sYVInwAAAABJRU5ErkJggg==\")\n #输入客户名称\n self.driver.send_keys(\"xpath=//*[@text=\\\"请输入\\\"][1]\",\"crm000001\")\n\n #输入客户编号\n self.driver.send_keys(\"xpath=//*[@text=\\\"请输入\\\"][1]\",\"c000001\")\n #选择客户信息来源\n self.driver.click_index(\"class=android.view.View\",59)\n self.driver.click(\"text=电话营销\")\n #保存\n self.driver.click(\"text=保存\")\n #点击返回\n self.driver.click_index(\"class=android.view.View\",10)\n # sleep(5)\n # # # 向上滑动屏幕\n # # self.driver.swipe_up(n=3)\n\n def test_weiChat(self):\n self.login()\n self.createCustomer()\n self.logout()\n\n\nif __name__ == \"__main__\":\n report_path = os.path.dirname(__file__) + \"/report/\" + \"TestCRM_report.html\"\n suite = unittest.TestLoader().loadTestsFromTestCase(TestCRM)\n runer = HTMLTestRunner(title=\"悟空CRM测试报告\", description=\"登录\", stream=open(report_path, \"wb\"),\n verbosity=2, retry=0, save_last_try=True)\n runer.run(suite)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import os
from unittest import TestCase
class TestMixin(TestCase):
@classmethod
def setUpClass(cls):
cls.base_dir = os.path.dirname(os.path.abspath(__file__))
cls.fixtures_dir = os.path.join(cls.base_dir, 'fixtures')
cls.bam_10xv2_path = os.path.join(cls.fixtures_dir, '10xv2.bam')
cls.fastq_10xv2_paths = [os.path.join(cls.fixtures_dir,
'10xv2_1.fastq.gz'), os.path.join(cls.fixtures_dir,
'10xv2_2.fastq.gz')]
|
normal
|
{
"blob_id": "268a8252f74a2bdafdadae488f98997c91f5607c",
"index": 2686,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass TestMixin(TestCase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TestMixin(TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.base_dir = os.path.dirname(os.path.abspath(__file__))\n cls.fixtures_dir = os.path.join(cls.base_dir, 'fixtures')\n cls.bam_10xv2_path = os.path.join(cls.fixtures_dir, '10xv2.bam')\n cls.fastq_10xv2_paths = [os.path.join(cls.fixtures_dir,\n '10xv2_1.fastq.gz'), os.path.join(cls.fixtures_dir,\n '10xv2_2.fastq.gz')]\n",
"step-4": "import os\nfrom unittest import TestCase\n\n\nclass TestMixin(TestCase):\n\n @classmethod\n def setUpClass(cls):\n cls.base_dir = os.path.dirname(os.path.abspath(__file__))\n cls.fixtures_dir = os.path.join(cls.base_dir, 'fixtures')\n cls.bam_10xv2_path = os.path.join(cls.fixtures_dir, '10xv2.bam')\n cls.fastq_10xv2_paths = [os.path.join(cls.fixtures_dir,\n '10xv2_1.fastq.gz'), os.path.join(cls.fixtures_dir,\n '10xv2_2.fastq.gz')]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import sys
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from uraeus.nmbd.python import simulation
from uraeus.nmbd.python.engine.numerics.math_funcs import A, B
database_directory = os.path.abspath('../../')
sys.path.append(database_directory)
from uraeus_fsae.simenv.assemblies import asurt_FS17_v1 as num_assm
from controllers import speed_controller, stanley_controller
num_model = num_assm.num_model
dt = num_assm.dt
TR = 254
def generate_circular_path(radius, offset):
theta = np.deg2rad(np.linspace(0, 360, 360))
x_data = radius * np.sin(theta) + offset[0]
y_data = radius * np.cos(theta) + offset[1]
radii = radius * np.ones((360,))
return x_data, y_data, radii
x_data, y_data, radii = generate_circular_path(10.5, (0, -10.5))
path_data = np.zeros((360, 3))
path_data[:, 0] = -1e3 * x_data
path_data[:, 1] = 1e3 * y_data
path_data[:, 2] = 1e3 * radii
plt.figure(figsize=(10, 5))
plt.plot(path_data[:, 0], path_data[:, 1])
plt.grid()
plt.show()
logitudinal_controller = speed_controller(35, dt)
lateral_controller = stanley_controller(path_data, 25)
def terrain_state(x, y):
local_normal = np.array([[0],[0],[1]], dtype=np.float64)
hieght = 0
return [local_normal, hieght]
def torque_function(t):
P_ch = num_model.Subsystems.CH.P_rbs_chassis
Rd = num_model.Subsystems.CH.Rd_rbs_chassis
factor = logitudinal_controller.get_torque_factor(P_ch, Rd)
return factor
def RR_Torque(t):
factor = torque_function(t)
torque = -factor*(70*9.81)*1e6*TR
return torque
def RL_Torque(t):
factor = torque_function(t)
torque = -factor*(70*9.81)*1e6*TR
return torque
def steering_function(t):
R_ch = num_model.Subsystems.CH.R_rbs_chassis
P_ch = num_model.Subsystems.CH.P_rbs_chassis
Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis
Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis
rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)
r_ax1 = R_ch + A(P_ch)@rbar_ax1
vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1)@Pd_ch))[0,0]
delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)
travel = delta * 18
#print('Travel = %s'%travel)
return travel
def zero_func(t):
return np.zeros((3,1), dtype=np.float64)
num_assm.terrain_data.get_state = terrain_state
num_assm.ST1_config.UF_mcs_rack_act = steering_function
num_assm.AX1_config.UF_far_drive = RR_Torque
num_assm.AX1_config.UF_fal_drive = RL_Torque
#num_assm.DR2_config.UF_far_drive = RR_Torque
#num_assm.DR2_config.UF_fal_drive = RL_Torque
num_assm.CH_config.UF_fas_aero_drag_F = zero_func
num_assm.CH_config.UF_fas_aero_drag_T = zero_func
# =============================================================================
# Setting and Starting Simulation
# =============================================================================
sim = simulation('sim', num_model, 'dds')
sim.set_time_array(15, dt)
# Getting Equilibrium results as initial conditions to this simulation
# ====================================================================
sim.set_initial_states('results/equilibrium_v4.npz')
sim.solve()
sim.save_as_csv('results', 'constant_radius_v8', 'pos')
sim.save_as_npz('results', 'constant_radius_v8')
#=============================================================================
# Plotting Simulation Results
# =============================================================================
import matplotlib.pyplot as plt
sim.soln.pos_dataframe.plot(x='CH.rbs_chassis.x', y='CH.rbs_chassis.y', grid=True)
sim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.x', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)
sim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)
sim.soln.acc_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e0', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e1', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e2', grid=True)
sim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e3', grid=True)
plt.show()
|
normal
|
{
"blob_id": "e0541c377eb6631e4ef5eb79b1204612ce8af48c",
"index": 6107,
"step-1": "<mask token>\n\n\ndef generate_circular_path(radius, offset):\n theta = np.deg2rad(np.linspace(0, 360, 360))\n x_data = radius * np.sin(theta) + offset[0]\n y_data = radius * np.cos(theta) + offset[1]\n radii = radius * np.ones((360,))\n return x_data, y_data, radii\n\n\n<mask token>\n\n\ndef terrain_state(x, y):\n local_normal = np.array([[0], [0], [1]], dtype=np.float64)\n hieght = 0\n return [local_normal, hieght]\n\n\n<mask token>\n\n\ndef steering_function(t):\n R_ch = num_model.Subsystems.CH.R_rbs_chassis\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis\n Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis\n rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)\n r_ax1 = R_ch + A(P_ch) @ rbar_ax1\n vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1) @ Pd_ch))[0, 0]\n delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)\n travel = delta * 18\n return travel\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(database_directory)\n<mask token>\n\n\ndef generate_circular_path(radius, offset):\n theta = np.deg2rad(np.linspace(0, 360, 360))\n x_data = radius * np.sin(theta) + offset[0]\n y_data = radius * np.cos(theta) + offset[1]\n radii = radius * np.ones((360,))\n return x_data, y_data, radii\n\n\n<mask token>\nplt.figure(figsize=(10, 5))\nplt.plot(path_data[:, 0], path_data[:, 1])\nplt.grid()\nplt.show()\n<mask token>\n\n\ndef terrain_state(x, y):\n local_normal = np.array([[0], [0], [1]], dtype=np.float64)\n hieght = 0\n return [local_normal, hieght]\n\n\ndef torque_function(t):\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd = num_model.Subsystems.CH.Rd_rbs_chassis\n factor = logitudinal_controller.get_torque_factor(P_ch, Rd)\n return factor\n\n\ndef RR_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef RL_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef steering_function(t):\n R_ch = num_model.Subsystems.CH.R_rbs_chassis\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis\n Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis\n rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)\n r_ax1 = R_ch + A(P_ch) @ rbar_ax1\n vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1) @ Pd_ch))[0, 0]\n delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)\n travel = delta * 18\n return travel\n\n\ndef zero_func(t):\n return np.zeros((3, 1), dtype=np.float64)\n\n\n<mask token>\nsim.set_time_array(15, dt)\nsim.set_initial_states('results/equilibrium_v4.npz')\nsim.solve()\nsim.save_as_csv('results', 'constant_radius_v8', 'pos')\nsim.save_as_npz('results', 'constant_radius_v8')\n<mask token>\nsim.soln.pos_dataframe.plot(x='CH.rbs_chassis.x', y='CH.rbs_chassis.y',\n grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.x', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.acc_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e0', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e1', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e2', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e3', grid=True)\nplt.show()\n",
"step-3": "<mask token>\ndatabase_directory = os.path.abspath('../../')\nsys.path.append(database_directory)\n<mask token>\nnum_model = num_assm.num_model\ndt = num_assm.dt\nTR = 254\n\n\ndef generate_circular_path(radius, offset):\n theta = np.deg2rad(np.linspace(0, 360, 360))\n x_data = radius * np.sin(theta) + offset[0]\n y_data = radius * np.cos(theta) + offset[1]\n radii = radius * np.ones((360,))\n return x_data, y_data, radii\n\n\nx_data, y_data, radii = generate_circular_path(10.5, (0, -10.5))\npath_data = np.zeros((360, 3))\npath_data[:, 0] = -1000.0 * x_data\npath_data[:, 1] = 1000.0 * y_data\npath_data[:, 2] = 1000.0 * radii\nplt.figure(figsize=(10, 5))\nplt.plot(path_data[:, 0], path_data[:, 1])\nplt.grid()\nplt.show()\nlogitudinal_controller = speed_controller(35, dt)\nlateral_controller = stanley_controller(path_data, 25)\n\n\ndef terrain_state(x, y):\n local_normal = np.array([[0], [0], [1]], dtype=np.float64)\n hieght = 0\n return [local_normal, hieght]\n\n\ndef torque_function(t):\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd = num_model.Subsystems.CH.Rd_rbs_chassis\n factor = logitudinal_controller.get_torque_factor(P_ch, Rd)\n return factor\n\n\ndef RR_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef RL_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef steering_function(t):\n R_ch = num_model.Subsystems.CH.R_rbs_chassis\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis\n Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis\n rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)\n r_ax1 = R_ch + A(P_ch) @ rbar_ax1\n vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1) @ Pd_ch))[0, 0]\n delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)\n travel = delta * 18\n return travel\n\n\ndef zero_func(t):\n return np.zeros((3, 1), dtype=np.float64)\n\n\nnum_assm.terrain_data.get_state = terrain_state\nnum_assm.ST1_config.UF_mcs_rack_act = steering_function\nnum_assm.AX1_config.UF_far_drive = RR_Torque\nnum_assm.AX1_config.UF_fal_drive = RL_Torque\nnum_assm.CH_config.UF_fas_aero_drag_F = zero_func\nnum_assm.CH_config.UF_fas_aero_drag_T = zero_func\nsim = simulation('sim', num_model, 'dds')\nsim.set_time_array(15, dt)\nsim.set_initial_states('results/equilibrium_v4.npz')\nsim.solve()\nsim.save_as_csv('results', 'constant_radius_v8', 'pos')\nsim.save_as_npz('results', 'constant_radius_v8')\n<mask token>\nsim.soln.pos_dataframe.plot(x='CH.rbs_chassis.x', y='CH.rbs_chassis.y',\n grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.x', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.acc_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e0', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e1', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e2', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e3', grid=True)\nplt.show()\n",
"step-4": "import sys\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom uraeus.nmbd.python import simulation\nfrom uraeus.nmbd.python.engine.numerics.math_funcs import A, B\ndatabase_directory = os.path.abspath('../../')\nsys.path.append(database_directory)\nfrom uraeus_fsae.simenv.assemblies import asurt_FS17_v1 as num_assm\nfrom controllers import speed_controller, stanley_controller\nnum_model = num_assm.num_model\ndt = num_assm.dt\nTR = 254\n\n\ndef generate_circular_path(radius, offset):\n theta = np.deg2rad(np.linspace(0, 360, 360))\n x_data = radius * np.sin(theta) + offset[0]\n y_data = radius * np.cos(theta) + offset[1]\n radii = radius * np.ones((360,))\n return x_data, y_data, radii\n\n\nx_data, y_data, radii = generate_circular_path(10.5, (0, -10.5))\npath_data = np.zeros((360, 3))\npath_data[:, 0] = -1000.0 * x_data\npath_data[:, 1] = 1000.0 * y_data\npath_data[:, 2] = 1000.0 * radii\nplt.figure(figsize=(10, 5))\nplt.plot(path_data[:, 0], path_data[:, 1])\nplt.grid()\nplt.show()\nlogitudinal_controller = speed_controller(35, dt)\nlateral_controller = stanley_controller(path_data, 25)\n\n\ndef terrain_state(x, y):\n local_normal = np.array([[0], [0], [1]], dtype=np.float64)\n hieght = 0\n return [local_normal, hieght]\n\n\ndef torque_function(t):\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd = num_model.Subsystems.CH.Rd_rbs_chassis\n factor = logitudinal_controller.get_torque_factor(P_ch, Rd)\n return factor\n\n\ndef RR_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef RL_Torque(t):\n factor = torque_function(t)\n torque = -factor * (70 * 9.81) * 1000000.0 * TR\n return torque\n\n\ndef steering_function(t):\n R_ch = num_model.Subsystems.CH.R_rbs_chassis\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis\n Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis\n rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)\n r_ax1 = R_ch + A(P_ch) @ rbar_ax1\n vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1) @ Pd_ch))[0, 0]\n delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)\n travel = delta * 18\n return travel\n\n\ndef zero_func(t):\n return np.zeros((3, 1), dtype=np.float64)\n\n\nnum_assm.terrain_data.get_state = terrain_state\nnum_assm.ST1_config.UF_mcs_rack_act = steering_function\nnum_assm.AX1_config.UF_far_drive = RR_Torque\nnum_assm.AX1_config.UF_fal_drive = RL_Torque\nnum_assm.CH_config.UF_fas_aero_drag_F = zero_func\nnum_assm.CH_config.UF_fas_aero_drag_T = zero_func\nsim = simulation('sim', num_model, 'dds')\nsim.set_time_array(15, dt)\nsim.set_initial_states('results/equilibrium_v4.npz')\nsim.solve()\nsim.save_as_csv('results', 'constant_radius_v8', 'pos')\nsim.save_as_npz('results', 'constant_radius_v8')\nimport matplotlib.pyplot as plt\nsim.soln.pos_dataframe.plot(x='CH.rbs_chassis.x', y='CH.rbs_chassis.y',\n grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.x', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.acc_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e0', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e1', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e2', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e3', grid=True)\nplt.show()\n",
"step-5": "import sys\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom uraeus.nmbd.python import simulation\nfrom uraeus.nmbd.python.engine.numerics.math_funcs import A, B\n\ndatabase_directory = os.path.abspath('../../')\nsys.path.append(database_directory)\n\nfrom uraeus_fsae.simenv.assemblies import asurt_FS17_v1 as num_assm\nfrom controllers import speed_controller, stanley_controller\n\nnum_model = num_assm.num_model\n\ndt = num_assm.dt\nTR = 254\n\ndef generate_circular_path(radius, offset):\n theta = np.deg2rad(np.linspace(0, 360, 360))\n x_data = radius * np.sin(theta) + offset[0]\n y_data = radius * np.cos(theta) + offset[1]\n radii = radius * np.ones((360,))\n return x_data, y_data, radii\n\n\nx_data, y_data, radii = generate_circular_path(10.5, (0, -10.5))\n\npath_data = np.zeros((360, 3))\npath_data[:, 0] = -1e3 * x_data\npath_data[:, 1] = 1e3 * y_data\npath_data[:, 2] = 1e3 * radii\n\nplt.figure(figsize=(10, 5))\nplt.plot(path_data[:, 0], path_data[:, 1])\nplt.grid()\nplt.show()\n\nlogitudinal_controller = speed_controller(35, dt)\nlateral_controller = stanley_controller(path_data, 25)\n\n\ndef terrain_state(x, y):\n local_normal = np.array([[0],[0],[1]], dtype=np.float64)\n hieght = 0\n return [local_normal, hieght]\n\n\ndef torque_function(t):\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd = num_model.Subsystems.CH.Rd_rbs_chassis\n factor = logitudinal_controller.get_torque_factor(P_ch, Rd)\n return factor\n\ndef RR_Torque(t):\n factor = torque_function(t)\n torque = -factor*(70*9.81)*1e6*TR\n return torque\n\ndef RL_Torque(t):\n factor = torque_function(t)\n torque = -factor*(70*9.81)*1e6*TR\n return torque\n\ndef steering_function(t):\n R_ch = num_model.Subsystems.CH.R_rbs_chassis\n P_ch = num_model.Subsystems.CH.P_rbs_chassis\n Rd_ch = num_model.Subsystems.CH.Rd_rbs_chassis\n Pd_ch = num_model.Subsystems.CH.Pd_rbs_chassis\n\n rbar_ax1 = np.array([[-800], [0], [0]], dtype=np.float64)\n r_ax1 = R_ch + A(P_ch)@rbar_ax1\n vel = (A(P_ch).T @ (Rd_ch + B(P_ch, rbar_ax1)@Pd_ch))[0,0]\n\n delta = lateral_controller.get_steer_factor(r_ax1, P_ch, Pd_ch, vel)\n\n travel = delta * 18\n #print('Travel = %s'%travel)\n return travel\n\n\ndef zero_func(t):\n return np.zeros((3,1), dtype=np.float64)\n\n\nnum_assm.terrain_data.get_state = terrain_state\n\nnum_assm.ST1_config.UF_mcs_rack_act = steering_function\n\nnum_assm.AX1_config.UF_far_drive = RR_Torque\nnum_assm.AX1_config.UF_fal_drive = RL_Torque\n\n#num_assm.DR2_config.UF_far_drive = RR_Torque\n#num_assm.DR2_config.UF_fal_drive = RL_Torque\n\nnum_assm.CH_config.UF_fas_aero_drag_F = zero_func\nnum_assm.CH_config.UF_fas_aero_drag_T = zero_func\n# =============================================================================\n# Setting and Starting Simulation\n# =============================================================================\n\nsim = simulation('sim', num_model, 'dds')\nsim.set_time_array(15, dt)\n\n# Getting Equilibrium results as initial conditions to this simulation\n# ====================================================================\nsim.set_initial_states('results/equilibrium_v4.npz')\n\nsim.solve()\n\nsim.save_as_csv('results', 'constant_radius_v8', 'pos')\nsim.save_as_npz('results', 'constant_radius_v8')\n\n#=============================================================================\n# Plotting Simulation Results\n# =============================================================================\n\nimport matplotlib.pyplot as plt\n\nsim.soln.pos_dataframe.plot(x='CH.rbs_chassis.x', y='CH.rbs_chassis.y', grid=True)\n\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.x', grid=True)\n\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.vel_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\nsim.soln.acc_dataframe.plot(x='time', y='CH.rbs_chassis.z', grid=True)\n\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e0', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e1', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e2', grid=True)\nsim.soln.pos_dataframe.plot(x='time', y='CH.rbs_chassis.e3', grid=True)\n\nplt.show()\n",
"step-ids": [
3,
8,
9,
10,
11
]
}
|
[
3,
8,
9,
10,
11
] |
# Imports
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils import data
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
# Create Fully Connected Network
class NN(nn.Module):
def __init__(self, input_size,num_classes):
super(NN,self).__init__()
self.fc1 = nn.Linear(input_size,50)
self.fc2 = nn.Linear(50,num_classes)
def forward(self,x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
# Set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyperparameters
input_size =784
num_classes = 10
learning_rate = 0.001
batch_size = 64
num_epochs = 10
# Load Data
train_dataset = datasets.MNIST(
root='dataset/',
train=True,
transform=transforms.ToTensor(),
download=True,
)
train_loader = DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
)
test_dataset = datasets.MNIST(
root='dataset/',
train=False,
transform=transforms.ToTensor(),
download=True,
)
test_loader = DataLoader(
dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
)
# Initialize network
model = NN(input_size,num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),lr=learning_rate)
# Train network
for epoch in range(num_epochs):
print("Epoch: "+str(epoch+1))
for batch_idx, (data, targets) in enumerate(train_loader):
data = data.to(device=device)
targets = targets.to(device=device)
# Get to correct shape
data = data.reshape(data.shape[0],-1)
scores = model(data)
loss = criterion(scores,targets)
# backward
optimizer.zero_grad()
loss.backward()
# gradient descent or adam step
optimizer.step()
# Check accuracy on training and test to see how good our model
def check_accuracy(loader, model):
if loader.dataset.train:
print("Checking accuracy on training data")
else:
print("Checking accuracy on test data")
num_correct = 0
num_samples = 0
model.eval()
with torch.no_grad():
for x,y in loader:
x = x.to(device=device)
y = y.to(device=device)
x = x.reshape(x.shape[0],-1)
scores = model(x)
_, predictions = scores.max(1)
num_correct += (predictions == y).sum()
num_samples += predictions.size(0)
print(f'Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}')
model.train()
check_accuracy(train_loader,model)
check_accuracy(test_loader,model)
|
normal
|
{
"blob_id": "1edb92a4905048f3961e3067c67ef892d7b8a034",
"index": 9154,
"step-1": "<mask token>\n\n\nclass NN(nn.Module):\n\n def __init__(self, input_size, num_classes):\n super(NN, self).__init__()\n self.fc1 = nn.Linear(input_size, 50)\n self.fc2 = nn.Linear(50, num_classes)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\n<mask token>\n\n\ndef check_accuracy(loader, model):\n if loader.dataset.train:\n print('Checking accuracy on training data')\n else:\n print('Checking accuracy on test data')\n num_correct = 0\n num_samples = 0\n model.eval()\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n x = x.reshape(x.shape[0], -1)\n scores = model(x)\n _, predictions = scores.max(1)\n num_correct += (predictions == y).sum()\n num_samples += predictions.size(0)\n print(\n f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'\n )\n model.train()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass NN(nn.Module):\n\n def __init__(self, input_size, num_classes):\n super(NN, self).__init__()\n self.fc1 = nn.Linear(input_size, 50)\n self.fc2 = nn.Linear(50, num_classes)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\n<mask token>\nfor epoch in range(num_epochs):\n print('Epoch: ' + str(epoch + 1))\n for batch_idx, (data, targets) in enumerate(train_loader):\n data = data.to(device=device)\n targets = targets.to(device=device)\n data = data.reshape(data.shape[0], -1)\n scores = model(data)\n loss = criterion(scores, targets)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\ndef check_accuracy(loader, model):\n if loader.dataset.train:\n print('Checking accuracy on training data')\n else:\n print('Checking accuracy on test data')\n num_correct = 0\n num_samples = 0\n model.eval()\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n x = x.reshape(x.shape[0], -1)\n scores = model(x)\n _, predictions = scores.max(1)\n num_correct += (predictions == y).sum()\n num_samples += predictions.size(0)\n print(\n f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'\n )\n model.train()\n\n\ncheck_accuracy(train_loader, model)\ncheck_accuracy(test_loader, model)\n",
"step-3": "<mask token>\n\n\nclass NN(nn.Module):\n\n def __init__(self, input_size, num_classes):\n super(NN, self).__init__()\n self.fc1 = nn.Linear(input_size, 50)\n self.fc2 = nn.Linear(50, num_classes)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ninput_size = 784\nnum_classes = 10\nlearning_rate = 0.001\nbatch_size = 64\nnum_epochs = 10\ntrain_dataset = datasets.MNIST(root='dataset/', train=True, transform=\n transforms.ToTensor(), download=True)\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,\n shuffle=True)\ntest_dataset = datasets.MNIST(root='dataset/', train=False, transform=\n transforms.ToTensor(), download=True)\ntest_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,\n shuffle=True)\nmodel = NN(input_size, num_classes).to(device)\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=learning_rate)\nfor epoch in range(num_epochs):\n print('Epoch: ' + str(epoch + 1))\n for batch_idx, (data, targets) in enumerate(train_loader):\n data = data.to(device=device)\n targets = targets.to(device=device)\n data = data.reshape(data.shape[0], -1)\n scores = model(data)\n loss = criterion(scores, targets)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\ndef check_accuracy(loader, model):\n if loader.dataset.train:\n print('Checking accuracy on training data')\n else:\n print('Checking accuracy on test data')\n num_correct = 0\n num_samples = 0\n model.eval()\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n x = x.reshape(x.shape[0], -1)\n scores = model(x)\n _, predictions = scores.max(1)\n num_correct += (predictions == y).sum()\n num_samples += predictions.size(0)\n print(\n f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'\n )\n model.train()\n\n\ncheck_accuracy(train_loader, model)\ncheck_accuracy(test_loader, model)\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils import data\nfrom torch.utils.data import DataLoader\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\n\nclass NN(nn.Module):\n\n def __init__(self, input_size, num_classes):\n super(NN, self).__init__()\n self.fc1 = nn.Linear(input_size, 50)\n self.fc2 = nn.Linear(50, num_classes)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\ninput_size = 784\nnum_classes = 10\nlearning_rate = 0.001\nbatch_size = 64\nnum_epochs = 10\ntrain_dataset = datasets.MNIST(root='dataset/', train=True, transform=\n transforms.ToTensor(), download=True)\ntrain_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,\n shuffle=True)\ntest_dataset = datasets.MNIST(root='dataset/', train=False, transform=\n transforms.ToTensor(), download=True)\ntest_loader = DataLoader(dataset=train_dataset, batch_size=batch_size,\n shuffle=True)\nmodel = NN(input_size, num_classes).to(device)\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=learning_rate)\nfor epoch in range(num_epochs):\n print('Epoch: ' + str(epoch + 1))\n for batch_idx, (data, targets) in enumerate(train_loader):\n data = data.to(device=device)\n targets = targets.to(device=device)\n data = data.reshape(data.shape[0], -1)\n scores = model(data)\n loss = criterion(scores, targets)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n\ndef check_accuracy(loader, model):\n if loader.dataset.train:\n print('Checking accuracy on training data')\n else:\n print('Checking accuracy on test data')\n num_correct = 0\n num_samples = 0\n model.eval()\n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n x = x.reshape(x.shape[0], -1)\n scores = model(x)\n _, predictions = scores.max(1)\n num_correct += (predictions == y).sum()\n num_samples += predictions.size(0)\n print(\n f'Got {num_correct} / {num_samples} with accuracy {float(num_correct) / float(num_samples) * 100:.2f}'\n )\n model.train()\n\n\ncheck_accuracy(train_loader, model)\ncheck_accuracy(test_loader, model)\n",
"step-5": "# Imports\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.utils import data\nfrom torch.utils.data import DataLoader\nimport torchvision.datasets as datasets\nimport torchvision.transforms as transforms\n\n# Create Fully Connected Network\nclass NN(nn.Module):\n def __init__(self, input_size,num_classes):\n super(NN,self).__init__()\n self.fc1 = nn.Linear(input_size,50)\n self.fc2 = nn.Linear(50,num_classes)\n\n def forward(self,x):\n x = F.relu(self.fc1(x))\n x = self.fc2(x)\n return x\n\n# Set device\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n\n# Hyperparameters\ninput_size =784\nnum_classes = 10\nlearning_rate = 0.001\nbatch_size = 64\nnum_epochs = 10\n\n# Load Data\ntrain_dataset = datasets.MNIST(\n root='dataset/',\n train=True,\n transform=transforms.ToTensor(),\n download=True,\n)\ntrain_loader = DataLoader(\n dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True,\n)\ntest_dataset = datasets.MNIST(\n root='dataset/',\n train=False,\n transform=transforms.ToTensor(),\n download=True,\n)\ntest_loader = DataLoader(\n dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True,\n)\n# Initialize network\nmodel = NN(input_size,num_classes).to(device)\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(),lr=learning_rate)\n\n# Train network\nfor epoch in range(num_epochs):\n print(\"Epoch: \"+str(epoch+1))\n for batch_idx, (data, targets) in enumerate(train_loader):\n data = data.to(device=device)\n targets = targets.to(device=device)\n\n # Get to correct shape\n data = data.reshape(data.shape[0],-1)\n scores = model(data)\n loss = criterion(scores,targets)\n\n # backward\n optimizer.zero_grad()\n loss.backward()\n\n # gradient descent or adam step\n optimizer.step()\n\n# Check accuracy on training and test to see how good our model\n\ndef check_accuracy(loader, model):\n\n if loader.dataset.train:\n print(\"Checking accuracy on training data\")\n else:\n print(\"Checking accuracy on test data\")\n\n num_correct = 0\n num_samples = 0\n model.eval()\n with torch.no_grad():\n for x,y in loader:\n x = x.to(device=device)\n y = y.to(device=device)\n x = x.reshape(x.shape[0],-1)\n\n scores = model(x)\n\n _, predictions = scores.max(1)\n num_correct += (predictions == y).sum()\n num_samples += predictions.size(0)\n\n print(f'Got {num_correct} / {num_samples} with accuracy {float(num_correct)/float(num_samples)*100:.2f}')\n model.train()\n\n\ncheck_accuracy(train_loader,model)\ncheck_accuracy(test_loader,model)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import logging
from typing import Dict
import numpy as np
from meshkit import Mesh
from rendkit.materials import DepthMaterial
from vispy import gloo, app
from vispy.gloo import gl
logger = logging.getLogger(__name__)
class Renderable:
def __init__(self,
material_name: str,
attributes: Dict[str, np.ndarray],
model_mat=np.eye(4),
uv_scale=1.0):
self.model_mat = model_mat
self.material_name = material_name
self._attributes = attributes
self._uv_scale = uv_scale
self._current_scene = None
self._program = None
self._scene_version = -1
def set_uv_scale(self, scale):
self._uv_scale = scale
if 'a_uv' in self._attributes:
if self._program is not None:
self._program['u_uv_scale'] = self._uv_scale
def scale_uv_scale(self, v):
self._uv_scale *= v
if 'a_uv' in self._attributes:
if self._program is not None:
self._program['u_uv_scale'] = self._uv_scale
def activate(self, scene, camera):
material = scene.get_material(self.material_name)
if self._program is None or scene != self._current_scene:
self._current_scene = scene
self._scene_version = -1
if self._scene_version != scene.version:
self._current_scene = scene
self._scene_version = scene.version
self._program = material.compile(
num_lights=len(scene.lights),
num_shadow_sources=len(scene.shadow_sources),
use_radiance_map=scene.radiance_map is not None)
material.upload_attributes(self._program, self._attributes, self._uv_scale)
material.upload_radmap(self._program, scene.radiance_map)
material.upload_shadow_sources(self._program, scene.shadow_sources)
material.upload_lights(self._program, scene.lights)
material.upload_camera(self._program, camera)
self._program['u_model'] = self.model_mat.T
return self._program
def mesh_to_renderables(mesh: Mesh, model_mat):
renderables = []
# For now each renderable represents a submesh with the same materials.
for material_id, material_name in enumerate(mesh.materials):
filter = {'material': material_id}
vertex_positions = mesh.expand_face_vertices(filter)
vertex_normals = mesh.expand_face_normals(filter)
vertex_tangents, vertex_bitangents = mesh.expand_tangents(
filter)
vertex_uvs = mesh.expand_face_uvs(filter)
if len(vertex_positions) < 3:
logger.warning('Material {} not visible.'.format(material_name))
continue
attributes = dict(
a_position=vertex_positions,
a_normal=vertex_normals,
a_tangent=vertex_tangents,
a_bitangent=vertex_bitangents,
a_uv=vertex_uvs
)
renderables.append(Renderable(material_name, attributes, model_mat,
uv_scale=mesh.uv_scale))
return renderables
class DummyRenderer(app.Canvas):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
gloo.set_viewport(0, 0, *self.size)
def __enter__(self):
self._backend._vispy_warmup()
return self
class ContextProvider:
def __init__(self, size):
self.size = size
canvas = gloo.get_current_canvas()
self.context_exists = canvas is not None and not canvas._closed
if self.context_exists:
logger.debug("Using existing OpenGL context.")
self.provider = gloo.get_current_canvas()
self.previous_size = self.provider.size
else:
logger.debug("Providing temporary context with DummyRenderer.")
self.provider = DummyRenderer(size=size)
def __enter__(self):
gloo.set_viewport(0, 0, *self.size)
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.context_exists:
self.provider.__exit__(exc_type, exc_val, exc_tb)
else:
gloo.set_viewport(0, 0, *self.previous_size)
def draw_depth(camera, renderables, rend_target):
rendfb, rendtex, _ = rend_target
material = DepthMaterial()
program = DepthMaterial().compile()
with rendfb:
gloo.clear(color=camera.clear_color)
gloo.set_state(depth_test=True)
gl.glEnable(gl.GL_CULL_FACE)
gl.glCullFace(gl.GL_FRONT)
for renderable in renderables:
material.upload_camera(program, camera)
material.upload_attributes(program, renderable._attributes)
program['u_model'] = renderable.model_mat.T
program.draw(gl.GL_TRIANGLES)
gl.glCullFace(gl.GL_BACK)
gl.glDisable(gl.GL_CULL_FACE)
|
normal
|
{
"blob_id": "061c287d5f0a5feeeaedc80eea6b3fc4ff02286e",
"index": 7191,
"step-1": "<mask token>\n\n\nclass Renderable:\n\n def __init__(self, material_name: str, attributes: Dict[str, np.ndarray\n ], model_mat=np.eye(4), uv_scale=1.0):\n self.model_mat = model_mat\n self.material_name = material_name\n self._attributes = attributes\n self._uv_scale = uv_scale\n self._current_scene = None\n self._program = None\n self._scene_version = -1\n <mask token>\n\n def scale_uv_scale(self, v):\n self._uv_scale *= v\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n <mask token>\n\n\n<mask token>\n\n\nclass DummyRenderer(app.Canvas):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n gloo.set_viewport(0, 0, *self.size)\n\n def __enter__(self):\n self._backend._vispy_warmup()\n return self\n\n\nclass ContextProvider:\n\n def __init__(self, size):\n self.size = size\n canvas = gloo.get_current_canvas()\n self.context_exists = canvas is not None and not canvas._closed\n if self.context_exists:\n logger.debug('Using existing OpenGL context.')\n self.provider = gloo.get_current_canvas()\n self.previous_size = self.provider.size\n else:\n logger.debug('Providing temporary context with DummyRenderer.')\n self.provider = DummyRenderer(size=size)\n\n def __enter__(self):\n gloo.set_viewport(0, 0, *self.size)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.context_exists:\n self.provider.__exit__(exc_type, exc_val, exc_tb)\n else:\n gloo.set_viewport(0, 0, *self.previous_size)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Renderable:\n\n def __init__(self, material_name: str, attributes: Dict[str, np.ndarray\n ], model_mat=np.eye(4), uv_scale=1.0):\n self.model_mat = model_mat\n self.material_name = material_name\n self._attributes = attributes\n self._uv_scale = uv_scale\n self._current_scene = None\n self._program = None\n self._scene_version = -1\n <mask token>\n\n def scale_uv_scale(self, v):\n self._uv_scale *= v\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def activate(self, scene, camera):\n material = scene.get_material(self.material_name)\n if self._program is None or scene != self._current_scene:\n self._current_scene = scene\n self._scene_version = -1\n if self._scene_version != scene.version:\n self._current_scene = scene\n self._scene_version = scene.version\n self._program = material.compile(num_lights=len(scene.lights),\n num_shadow_sources=len(scene.shadow_sources),\n use_radiance_map=scene.radiance_map is not None)\n material.upload_attributes(self._program, self._attributes,\n self._uv_scale)\n material.upload_radmap(self._program, scene.radiance_map)\n material.upload_shadow_sources(self._program, scene.shadow_sources)\n material.upload_lights(self._program, scene.lights)\n material.upload_camera(self._program, camera)\n self._program['u_model'] = self.model_mat.T\n return self._program\n\n\n<mask token>\n\n\nclass DummyRenderer(app.Canvas):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n gloo.set_viewport(0, 0, *self.size)\n\n def __enter__(self):\n self._backend._vispy_warmup()\n return self\n\n\nclass ContextProvider:\n\n def __init__(self, size):\n self.size = size\n canvas = gloo.get_current_canvas()\n self.context_exists = canvas is not None and not canvas._closed\n if self.context_exists:\n logger.debug('Using existing OpenGL context.')\n self.provider = gloo.get_current_canvas()\n self.previous_size = self.provider.size\n else:\n logger.debug('Providing temporary context with DummyRenderer.')\n self.provider = DummyRenderer(size=size)\n\n def __enter__(self):\n gloo.set_viewport(0, 0, *self.size)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.context_exists:\n self.provider.__exit__(exc_type, exc_val, exc_tb)\n else:\n gloo.set_viewport(0, 0, *self.previous_size)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Renderable:\n\n def __init__(self, material_name: str, attributes: Dict[str, np.ndarray\n ], model_mat=np.eye(4), uv_scale=1.0):\n self.model_mat = model_mat\n self.material_name = material_name\n self._attributes = attributes\n self._uv_scale = uv_scale\n self._current_scene = None\n self._program = None\n self._scene_version = -1\n\n def set_uv_scale(self, scale):\n self._uv_scale = scale\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def scale_uv_scale(self, v):\n self._uv_scale *= v\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def activate(self, scene, camera):\n material = scene.get_material(self.material_name)\n if self._program is None or scene != self._current_scene:\n self._current_scene = scene\n self._scene_version = -1\n if self._scene_version != scene.version:\n self._current_scene = scene\n self._scene_version = scene.version\n self._program = material.compile(num_lights=len(scene.lights),\n num_shadow_sources=len(scene.shadow_sources),\n use_radiance_map=scene.radiance_map is not None)\n material.upload_attributes(self._program, self._attributes,\n self._uv_scale)\n material.upload_radmap(self._program, scene.radiance_map)\n material.upload_shadow_sources(self._program, scene.shadow_sources)\n material.upload_lights(self._program, scene.lights)\n material.upload_camera(self._program, camera)\n self._program['u_model'] = self.model_mat.T\n return self._program\n\n\n<mask token>\n\n\nclass DummyRenderer(app.Canvas):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n gloo.set_viewport(0, 0, *self.size)\n\n def __enter__(self):\n self._backend._vispy_warmup()\n return self\n\n\nclass ContextProvider:\n\n def __init__(self, size):\n self.size = size\n canvas = gloo.get_current_canvas()\n self.context_exists = canvas is not None and not canvas._closed\n if self.context_exists:\n logger.debug('Using existing OpenGL context.')\n self.provider = gloo.get_current_canvas()\n self.previous_size = self.provider.size\n else:\n logger.debug('Providing temporary context with DummyRenderer.')\n self.provider = DummyRenderer(size=size)\n\n def __enter__(self):\n gloo.set_viewport(0, 0, *self.size)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.context_exists:\n self.provider.__exit__(exc_type, exc_val, exc_tb)\n else:\n gloo.set_viewport(0, 0, *self.previous_size)\n\n\ndef draw_depth(camera, renderables, rend_target):\n rendfb, rendtex, _ = rend_target\n material = DepthMaterial()\n program = DepthMaterial().compile()\n with rendfb:\n gloo.clear(color=camera.clear_color)\n gloo.set_state(depth_test=True)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glCullFace(gl.GL_FRONT)\n for renderable in renderables:\n material.upload_camera(program, camera)\n material.upload_attributes(program, renderable._attributes)\n program['u_model'] = renderable.model_mat.T\n program.draw(gl.GL_TRIANGLES)\n gl.glCullFace(gl.GL_BACK)\n gl.glDisable(gl.GL_CULL_FACE)\n",
"step-4": "import logging\nfrom typing import Dict\nimport numpy as np\nfrom meshkit import Mesh\nfrom rendkit.materials import DepthMaterial\nfrom vispy import gloo, app\nfrom vispy.gloo import gl\nlogger = logging.getLogger(__name__)\n\n\nclass Renderable:\n\n def __init__(self, material_name: str, attributes: Dict[str, np.ndarray\n ], model_mat=np.eye(4), uv_scale=1.0):\n self.model_mat = model_mat\n self.material_name = material_name\n self._attributes = attributes\n self._uv_scale = uv_scale\n self._current_scene = None\n self._program = None\n self._scene_version = -1\n\n def set_uv_scale(self, scale):\n self._uv_scale = scale\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def scale_uv_scale(self, v):\n self._uv_scale *= v\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def activate(self, scene, camera):\n material = scene.get_material(self.material_name)\n if self._program is None or scene != self._current_scene:\n self._current_scene = scene\n self._scene_version = -1\n if self._scene_version != scene.version:\n self._current_scene = scene\n self._scene_version = scene.version\n self._program = material.compile(num_lights=len(scene.lights),\n num_shadow_sources=len(scene.shadow_sources),\n use_radiance_map=scene.radiance_map is not None)\n material.upload_attributes(self._program, self._attributes,\n self._uv_scale)\n material.upload_radmap(self._program, scene.radiance_map)\n material.upload_shadow_sources(self._program, scene.shadow_sources)\n material.upload_lights(self._program, scene.lights)\n material.upload_camera(self._program, camera)\n self._program['u_model'] = self.model_mat.T\n return self._program\n\n\ndef mesh_to_renderables(mesh: Mesh, model_mat):\n renderables = []\n for material_id, material_name in enumerate(mesh.materials):\n filter = {'material': material_id}\n vertex_positions = mesh.expand_face_vertices(filter)\n vertex_normals = mesh.expand_face_normals(filter)\n vertex_tangents, vertex_bitangents = mesh.expand_tangents(filter)\n vertex_uvs = mesh.expand_face_uvs(filter)\n if len(vertex_positions) < 3:\n logger.warning('Material {} not visible.'.format(material_name))\n continue\n attributes = dict(a_position=vertex_positions, a_normal=\n vertex_normals, a_tangent=vertex_tangents, a_bitangent=\n vertex_bitangents, a_uv=vertex_uvs)\n renderables.append(Renderable(material_name, attributes, model_mat,\n uv_scale=mesh.uv_scale))\n return renderables\n\n\nclass DummyRenderer(app.Canvas):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n gloo.set_viewport(0, 0, *self.size)\n\n def __enter__(self):\n self._backend._vispy_warmup()\n return self\n\n\nclass ContextProvider:\n\n def __init__(self, size):\n self.size = size\n canvas = gloo.get_current_canvas()\n self.context_exists = canvas is not None and not canvas._closed\n if self.context_exists:\n logger.debug('Using existing OpenGL context.')\n self.provider = gloo.get_current_canvas()\n self.previous_size = self.provider.size\n else:\n logger.debug('Providing temporary context with DummyRenderer.')\n self.provider = DummyRenderer(size=size)\n\n def __enter__(self):\n gloo.set_viewport(0, 0, *self.size)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.context_exists:\n self.provider.__exit__(exc_type, exc_val, exc_tb)\n else:\n gloo.set_viewport(0, 0, *self.previous_size)\n\n\ndef draw_depth(camera, renderables, rend_target):\n rendfb, rendtex, _ = rend_target\n material = DepthMaterial()\n program = DepthMaterial().compile()\n with rendfb:\n gloo.clear(color=camera.clear_color)\n gloo.set_state(depth_test=True)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glCullFace(gl.GL_FRONT)\n for renderable in renderables:\n material.upload_camera(program, camera)\n material.upload_attributes(program, renderable._attributes)\n program['u_model'] = renderable.model_mat.T\n program.draw(gl.GL_TRIANGLES)\n gl.glCullFace(gl.GL_BACK)\n gl.glDisable(gl.GL_CULL_FACE)\n",
"step-5": "import logging\nfrom typing import Dict\n\nimport numpy as np\n\nfrom meshkit import Mesh\nfrom rendkit.materials import DepthMaterial\nfrom vispy import gloo, app\nfrom vispy.gloo import gl\n\nlogger = logging.getLogger(__name__)\n\n\nclass Renderable:\n def __init__(self,\n material_name: str,\n attributes: Dict[str, np.ndarray],\n model_mat=np.eye(4),\n uv_scale=1.0):\n self.model_mat = model_mat\n self.material_name = material_name\n self._attributes = attributes\n self._uv_scale = uv_scale\n\n self._current_scene = None\n self._program = None\n self._scene_version = -1\n\n def set_uv_scale(self, scale):\n self._uv_scale = scale\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def scale_uv_scale(self, v):\n self._uv_scale *= v\n if 'a_uv' in self._attributes:\n if self._program is not None:\n self._program['u_uv_scale'] = self._uv_scale\n\n def activate(self, scene, camera):\n material = scene.get_material(self.material_name)\n if self._program is None or scene != self._current_scene:\n self._current_scene = scene\n self._scene_version = -1\n if self._scene_version != scene.version:\n self._current_scene = scene\n self._scene_version = scene.version\n self._program = material.compile(\n num_lights=len(scene.lights),\n num_shadow_sources=len(scene.shadow_sources),\n use_radiance_map=scene.radiance_map is not None)\n material.upload_attributes(self._program, self._attributes, self._uv_scale)\n material.upload_radmap(self._program, scene.radiance_map)\n material.upload_shadow_sources(self._program, scene.shadow_sources)\n material.upload_lights(self._program, scene.lights)\n\n material.upload_camera(self._program, camera)\n self._program['u_model'] = self.model_mat.T\n\n return self._program\n\n\ndef mesh_to_renderables(mesh: Mesh, model_mat):\n renderables = []\n # For now each renderable represents a submesh with the same materials.\n for material_id, material_name in enumerate(mesh.materials):\n filter = {'material': material_id}\n vertex_positions = mesh.expand_face_vertices(filter)\n vertex_normals = mesh.expand_face_normals(filter)\n vertex_tangents, vertex_bitangents = mesh.expand_tangents(\n filter)\n vertex_uvs = mesh.expand_face_uvs(filter)\n if len(vertex_positions) < 3:\n logger.warning('Material {} not visible.'.format(material_name))\n continue\n attributes = dict(\n a_position=vertex_positions,\n a_normal=vertex_normals,\n a_tangent=vertex_tangents,\n a_bitangent=vertex_bitangents,\n a_uv=vertex_uvs\n )\n renderables.append(Renderable(material_name, attributes, model_mat,\n uv_scale=mesh.uv_scale))\n return renderables\n\n\nclass DummyRenderer(app.Canvas):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n gloo.set_viewport(0, 0, *self.size)\n\n def __enter__(self):\n self._backend._vispy_warmup()\n return self\n\n\nclass ContextProvider:\n def __init__(self, size):\n self.size = size\n canvas = gloo.get_current_canvas()\n self.context_exists = canvas is not None and not canvas._closed\n if self.context_exists:\n logger.debug(\"Using existing OpenGL context.\")\n self.provider = gloo.get_current_canvas()\n self.previous_size = self.provider.size\n else:\n logger.debug(\"Providing temporary context with DummyRenderer.\")\n self.provider = DummyRenderer(size=size)\n\n def __enter__(self):\n gloo.set_viewport(0, 0, *self.size)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not self.context_exists:\n self.provider.__exit__(exc_type, exc_val, exc_tb)\n else:\n gloo.set_viewport(0, 0, *self.previous_size)\n\n\ndef draw_depth(camera, renderables, rend_target):\n rendfb, rendtex, _ = rend_target\n\n material = DepthMaterial()\n program = DepthMaterial().compile()\n\n with rendfb:\n gloo.clear(color=camera.clear_color)\n gloo.set_state(depth_test=True)\n gl.glEnable(gl.GL_CULL_FACE)\n gl.glCullFace(gl.GL_FRONT)\n for renderable in renderables:\n material.upload_camera(program, camera)\n material.upload_attributes(program, renderable._attributes)\n program['u_model'] = renderable.model_mat.T\n program.draw(gl.GL_TRIANGLES)\n gl.glCullFace(gl.GL_BACK)\n gl.glDisable(gl.GL_CULL_FACE)\n",
"step-ids": [
10,
11,
13,
16,
17
]
}
|
[
10,
11,
13,
16,
17
] |
import numpy as np
import cv2 as cv
import methods as meth
from numpy.fft import fft2, fftshift, ifft2, ifftshift
import pandas
import os
import noGPU as h
import matplotlib.pyplot as plt
class fullSys():
def __init__(self, dir, file, size, line):
csv_reader = pandas.read_csv(file, index_col='Objective')
self.Params = {}
self.Params['mag'] = csv_reader['Magnification'][line]
self.Params['NA'] = csv_reader['NA'][line]
self.Params['ps'] = [csv_reader['Pixel Size x'][line], csv_reader['Pixel Size y'][line]]
self.Params['distance'] = csv_reader['Screen Distance'][line]
self.Params['LEDSpace'] = csv_reader['LED Spacing'][line]
self.Params['LEDNum'] = [csv_reader['Num LED x'][line], csv_reader['Num LED x'][line]]
self.Params['dir'] = dir
self.Params['images'] = os.listdir(dir)
self.Params['numImgs'] = len(self.Params['images'])
self.Params['smallSize'] = meth.readImage(dir, self.Params['images'][0], colour=1, getsize=True)
self.Params['fResolution'] = self.fRes(self.Params['mag'], self.Params['smallSize'], self.Params['ps'])
print("fullSys")
## Instantiate sub Objects ##
splitSize, self.Params['lc'] = self.getSS()
img = meth.readImage(self.Params['dir'], self.Params['images'][0])
print("fullSys2")
numFiles, divisor = self.getDivisor(img, splitSize)
print("fullSys2")
self.Params['numFiles'] = numFiles
self.Params['divisor'] = divisor
self.Params['size'] = self.getSize(size, numFiles)
self.subObjs = np.empty([numFiles, numFiles], dtype=section)
print("fullSys1")
for i in range(numFiles):
for j in range(numFiles):
subImg = img[i * divisor:(i + 1) * divisor, j * divisor:(j + 1) * divisor]
self.subObjs[i, j] = section(i, j, subImg, self.Params)
h.progbar(i, numFiles, 'Initializing')
def getSS(self):
""" Determines the required subsection size based on Cittert Zernike theorem """
rho = 300e-6 # LED size
lc = 0.61*R*530/rho
size = lc*slef.Params['mag'] / self.Params['ps']
return size, lc
def getDivisor(self, img, splitSize):
imgSize = img.shape[0]
while True:
if imgSize % splitSize == 0:
divisor = splitSize
break
splitSize += 1
numFiles = int(imgSize / divisor)
return numFiles, divisor
def getSize(self, size, numSplits):
while True:
if size[0] % numSplits == 0:
break
size[0] += 1
return size[0]
def fRes(self, mag, size, ps):
""" Determines the change in spatial frequency across one pixel in F-space """
x = 2 * np.pi * mag / (size[0] * ps[0])
y = 2 * np.pi * mag / (size[1] * ps[1])
return [x, y]
class section():
def __init__(self, i0, j0, subImg, Params):
self.Params = Params
self.subParams = {}
self.subParams['wLen'] = [630e-9, 530e-9, 430e-9]
self.subParams['subSize'] = subImg.shape
self.subParams['bigSize'] = [np.int(Params['size'] / Params['numFiles'])] * 2
self.S = np.empty([self.subParams['bigSize'][0], self.subParams['bigSize'][1], 3], dtype=np.complex64)
self.P = np.empty([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)
self.meanFFT = np.zeros([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)
self.meanNum = 0
self.subParams['fRApprox'] = np.empty([3], dtype=int)
self.subParams['coords'] = np.empty([3, 16, 16, 2])
self.subParams['isBF'] = np.empty([3, 16, 16])
for i in range(0, 3):
self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams['bigSize'])
self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],
Params['NA'], self.subParams['wLen'][i])
print(Params['NA'], self.subParams['wLen'][i], Params['mag'], Params['ps'], Params['smallSize'])
self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.subParams['fRApprox'][i])
self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i, :, :] =\
self.initCoords(i0, j0, self.subParams['wLen'][i], self.subParams['fRApprox'][i])
self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])
self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])
for i in range(3):
self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([Params['divisor'], Params['divisor']], i)
def initS0(self, img, size):
""" Initialises the FT of the high res image by linear interpolation of a low res image """
I0 = cv.resize(img, (size[1], size[0]),
interpolation=cv.INTER_LINEAR) # Bilinear interpolated upsampled image
amplitude = np.sqrt(I0)
FI0 = fft2(ifftshift(amplitude))
FI0 = fftshift(FI0) # FI0.shape[0]
S = np.array(FI0, dtype=np.complex64)
return S
def initP0(self, size, radius):
""" Initialises the pupil function as a real circular step function of value 1 """
return h.circle(size, radius)[:, :, 0]
def fRad(self, fDu, NA, wLen):
""" Determines the approximate radius in F-space in pixels of the pupil function """
x = 2 * np.pi * NA / (wLen * fDu[0])
y = 2 * np.pi * NA / (wLen * fDu[1])
avr = np.int32(np.average([x, y]))
return avr
def initCoords(self, i, j, wLen, Rad):
""" Returns 2D array where LED coords relate to fourier centre positions """
segmentPos = [i, j]
n = self.Params['numFiles']
w = self.subParams['subSize'][0]
c = w / (2 * n)
centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0]/self.Params['mag']
self.Params['centre'] = centre
coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][1], 2), dtype=np.int32)
isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]), dtype=np.int32)
numImgs = int(len(self.Params['images']) ** 0.5)
for i, img in enumerate(self.Params['images']):
LED = meth.getLED(img)
LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)
#print("LED:", LED, "LEDPixelPos:", LEDPixelPos)
#print("LEDPos:", [LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1])
coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = LEDPixelPos
if ((LEDPixelPos[0]-w/2)**2 + (LEDPixelPos[1]-w/2)**2 < Rad):
isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = 1
return coords, isBF
def getLEDPos(self, nx, ny, centre, wLen):
""" Determines the location of the centre of the fourier pattern in pixels """
ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.Params['distance']) # Angle to x axis
ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.Params['distance']) # Angle to y axis
dx = ax / (wLen * self.Params['fResolution'][0])
dy = ay / (wLen * self.Params['fResolution'][1])
pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.subParams['subSize'][0] / 2)]
return pos
class splitImage():
def __init__(self, dir, imgName, numSplits, splitSize):
self.LEDPos = meth.getLED(imgName)
self.subImg = np.empty([numSplits, numSplits], dtype=subImage)
for i in range(numSplits):
for j in range(numSplits):
self.subImg[i, j] = subImage(dir, splitSize, imgName, self.LEDPos, i, j)
class subImage():
def __init__(self, dir, splitSize, imgName, LEDPos, i, j):
img = meth.readImage(dir, imgName)
self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(j + 1) * splitSize]
self.imgPos = [i, j]
self.LEDPos = LEDPos
########################################################################################################################
'''
class preProcess(objective):
def __init__(self, dir, file, size, line, colour=1):
""" Slices images into sections """
super().__init__(dir, file, size, line, colour=1)
numFiles, devisor = self.getDevisor(150)
self.genFiles(numFiles)
self.split(devisor, numFiles)
def genFiles(self, numFiles):
path = os.path.join(os.getcwd(), 'temp')
if os.path.isdir(path):
shutil.rmtree(path)
time.sleep(0.01)
os.mkdir(path)
for i in range(numFiles):
for j in range(numFiles):
folder = '%s_%s' % (str(i), str(j))
path1 = os.path.join(path, folder)
os.mkdir(path1)
def getDevisor(self, splitSize):
imgName = self.images[0]
img = self.readImage(self.dir, imgName)
imgSize = img.shape[0]
while True:
if imgSize % splitSize == 0:
devisor = splitSize
break
splitSize += 1
numFiles = int(imgSize / devisor)
return numFiles, devisor
def split(self, devisor, numFiles):
path0 = os.path.join(os.getcwd(), 'temp')
for i0, file in enumerate(self.images):
LED = self.getLED(file)
img = self.readImage(self.dir, file)
for i in range(numFiles):
for j in range(numFiles):
folder = '%s_%s' % (str(i), str(j))
path1 = os.path.join(path0, folder)
file = 'img_%s_%s_.jpg' % (str(LED[0]), str(LED[1]))
path = os.path.join(path1, file)
subImg = img[i * devisor:(i + 1) * devisor, j * devisor:(j + 1) * devisor]
cv.imwrite(path, subImg)
h.progbar(i0 * numFiles ** 2 + i * numFiles + j,
len(self.images) * numFiles ** 2, 'Slicing Images')
def initCoords(self, dir):
""" Returns 2D array where LED coords relate to fourier centre positions """
dirName = os.path.basename(dir)
segmentPos = self.getSegment(dirName)
N = len(os.listdir(dir))
n = np.sqrt(N)
w = self.smallSize[0]
c = w / (2 * n)
centre = (segmentPos[0] * 2 * c + c - w) * self.ps[0]/self.mag
coords = np.empty((self.LEDNum[0], self.LEDNum[1], 2), dtype=np.int32)
for i, img in enumerate(self.images):
LED = self.getLED(img)
LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre)
coords[LED[0], LED[1]] = LEDPixelPos
return coords
'''
|
normal
|
{
"blob_id": "e3c9487f3221ca89b9014b2e6470ca9d4dbc925a",
"index": 2239,
"step-1": "<mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n <mask token>\n <mask token>\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass fullSys:\n <mask token>\n <mask token>\n\n def getDivisor(self, img, splitSize):\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n divisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / divisor)\n return numFiles, divisor\n <mask token>\n <mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass fullSys:\n\n def __init__(self, dir, file, size, line):\n csv_reader = pandas.read_csv(file, index_col='Objective')\n self.Params = {}\n self.Params['mag'] = csv_reader['Magnification'][line]\n self.Params['NA'] = csv_reader['NA'][line]\n self.Params['ps'] = [csv_reader['Pixel Size x'][line], csv_reader[\n 'Pixel Size y'][line]]\n self.Params['distance'] = csv_reader['Screen Distance'][line]\n self.Params['LEDSpace'] = csv_reader['LED Spacing'][line]\n self.Params['LEDNum'] = [csv_reader['Num LED x'][line], csv_reader[\n 'Num LED x'][line]]\n self.Params['dir'] = dir\n self.Params['images'] = os.listdir(dir)\n self.Params['numImgs'] = len(self.Params['images'])\n self.Params['smallSize'] = meth.readImage(dir, self.Params['images'\n ][0], colour=1, getsize=True)\n self.Params['fResolution'] = self.fRes(self.Params['mag'], self.\n Params['smallSize'], self.Params['ps'])\n print('fullSys')\n splitSize, self.Params['lc'] = self.getSS()\n img = meth.readImage(self.Params['dir'], self.Params['images'][0])\n print('fullSys2')\n numFiles, divisor = self.getDivisor(img, splitSize)\n print('fullSys2')\n self.Params['numFiles'] = numFiles\n self.Params['divisor'] = divisor\n self.Params['size'] = self.getSize(size, numFiles)\n self.subObjs = np.empty([numFiles, numFiles], dtype=section)\n print('fullSys1')\n for i in range(numFiles):\n for j in range(numFiles):\n subImg = img[i * divisor:(i + 1) * divisor, j * divisor:(j +\n 1) * divisor]\n self.subObjs[i, j] = section(i, j, subImg, self.Params)\n h.progbar(i, numFiles, 'Initializing')\n\n def getSS(self):\n \"\"\" Determines the required subsection size based on Cittert Zernike theorem \"\"\"\n rho = 0.0003\n lc = 0.61 * R * 530 / rho\n size = lc * slef.Params['mag'] / self.Params['ps']\n return size, lc\n\n def getDivisor(self, img, splitSize):\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n divisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / divisor)\n return numFiles, divisor\n <mask token>\n <mask token>\n\n\nclass section:\n\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [6.3e-07, 5.3e-07, 4.3e-07]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params[\n 'numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams[\n 'bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams[\n 'subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.\n subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams[\n 'bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'],\n Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.\n subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i,\n :, :] = self.initCoords(i0, j0, self.subParams['wLen'][i],\n self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([\n Params['divisor'], Params['divisor']], i)\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n I0 = cv.resize(img, (size[1], size[0]), interpolation=cv.INTER_LINEAR)\n amplitude = np.sqrt(I0)\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0)\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0\n ] / self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][\n 1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]\n ), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1\n ] = LEDPixelPos\n if (LEDPixelPos[0] - w / 2) ** 2 + (LEDPixelPos[1] - w / 2\n ) ** 2 < Rad:\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / \n 2) - 1] = 1\n return coords, isBF\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.\n Params['distance'])\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.\n Params['distance'])\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.\n subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage:\n\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.\n LEDPos, i, j)\n\n\nclass subImage:\n\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(\n j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n<mask token>\n",
"step-5": "import numpy as np\nimport cv2 as cv\nimport methods as meth\nfrom numpy.fft import fft2, fftshift, ifft2, ifftshift\nimport pandas\nimport os\nimport noGPU as h\nimport matplotlib.pyplot as plt\n\nclass fullSys():\n def __init__(self, dir, file, size, line):\n csv_reader = pandas.read_csv(file, index_col='Objective')\n self.Params = {}\n self.Params['mag'] = csv_reader['Magnification'][line]\n self.Params['NA'] = csv_reader['NA'][line]\n self.Params['ps'] = [csv_reader['Pixel Size x'][line], csv_reader['Pixel Size y'][line]]\n self.Params['distance'] = csv_reader['Screen Distance'][line]\n self.Params['LEDSpace'] = csv_reader['LED Spacing'][line]\n self.Params['LEDNum'] = [csv_reader['Num LED x'][line], csv_reader['Num LED x'][line]]\n self.Params['dir'] = dir\n self.Params['images'] = os.listdir(dir)\n self.Params['numImgs'] = len(self.Params['images'])\n self.Params['smallSize'] = meth.readImage(dir, self.Params['images'][0], colour=1, getsize=True)\n self.Params['fResolution'] = self.fRes(self.Params['mag'], self.Params['smallSize'], self.Params['ps'])\n print(\"fullSys\")\n\n ## Instantiate sub Objects ##\n\n splitSize, self.Params['lc'] = self.getSS()\n img = meth.readImage(self.Params['dir'], self.Params['images'][0])\n print(\"fullSys2\")\n\n numFiles, divisor = self.getDivisor(img, splitSize)\n print(\"fullSys2\")\n\n self.Params['numFiles'] = numFiles\n self.Params['divisor'] = divisor\n self.Params['size'] = self.getSize(size, numFiles)\n\n self.subObjs = np.empty([numFiles, numFiles], dtype=section)\n print(\"fullSys1\")\n\n for i in range(numFiles):\n for j in range(numFiles):\n subImg = img[i * divisor:(i + 1) * divisor, j * divisor:(j + 1) * divisor]\n self.subObjs[i, j] = section(i, j, subImg, self.Params)\n h.progbar(i, numFiles, 'Initializing')\n\n\n def getSS(self):\n \"\"\" Determines the required subsection size based on Cittert Zernike theorem \"\"\"\n rho = 300e-6 # LED size\n lc = 0.61*R*530/rho\n size = lc*slef.Params['mag'] / self.Params['ps']\n return size, lc\n\n\n def getDivisor(self, img, splitSize):\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n divisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / divisor)\n return numFiles, divisor\n\n\n def getSize(self, size, numSplits):\n while True:\n if size[0] % numSplits == 0:\n break\n size[0] += 1\n return size[0]\n\n\n def fRes(self, mag, size, ps):\n \"\"\" Determines the change in spatial frequency across one pixel in F-space \"\"\"\n x = 2 * np.pi * mag / (size[0] * ps[0])\n y = 2 * np.pi * mag / (size[1] * ps[1])\n return [x, y]\n\n\nclass section():\n def __init__(self, i0, j0, subImg, Params):\n self.Params = Params\n self.subParams = {}\n self.subParams['wLen'] = [630e-9, 530e-9, 430e-9]\n self.subParams['subSize'] = subImg.shape\n self.subParams['bigSize'] = [np.int(Params['size'] / Params['numFiles'])] * 2\n self.S = np.empty([self.subParams['bigSize'][0], self.subParams['bigSize'][1], 3], dtype=np.complex64)\n self.P = np.empty([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanFFT = np.zeros([self.subParams['subSize'][0], self.subParams['subSize'][1], 3], dtype=np.complex64)\n self.meanNum = 0\n self.subParams['fRApprox'] = np.empty([3], dtype=int)\n self.subParams['coords'] = np.empty([3, 16, 16, 2])\n self.subParams['isBF'] = np.empty([3, 16, 16])\n for i in range(0, 3):\n self.S[:, :, i] = self.initS0(subImg[:, :, i], self.subParams['bigSize'])\n self.subParams['fRApprox'][i] = self.fRad(Params['fResolution'],\n Params['NA'], self.subParams['wLen'][i])\n print(Params['NA'], self.subParams['wLen'][i], Params['mag'], Params['ps'], Params['smallSize'])\n self.P[:, :, i] = self.initP0(self.subParams['subSize'], self.subParams['fRApprox'][i])\n self.subParams['coords'][i, :, :, :], self.subParams['isBF'][i, :, :] =\\\n self.initCoords(i0, j0, self.subParams['wLen'][i], self.subParams['fRApprox'][i])\n self.bayer = np.empty([Params['divisor'], Params['divisor'], 3])\n self.invBayer = np.empty([Params['divisor'], Params['divisor'], 3])\n for i in range(3):\n self.bayer[:, :, i], self.invBayer[:, :, i] = h.genBayer([Params['divisor'], Params['divisor']], i)\n\n\n def initS0(self, img, size):\n \"\"\" Initialises the FT of the high res image by linear interpolation of a low res image \"\"\"\n\n I0 = cv.resize(img, (size[1], size[0]),\n interpolation=cv.INTER_LINEAR) # Bilinear interpolated upsampled image\n\n amplitude = np.sqrt(I0)\n\n FI0 = fft2(ifftshift(amplitude))\n FI0 = fftshift(FI0) # FI0.shape[0]\n S = np.array(FI0, dtype=np.complex64)\n return S\n\n\n def initP0(self, size, radius):\n \"\"\" Initialises the pupil function as a real circular step function of value 1 \"\"\"\n return h.circle(size, radius)[:, :, 0]\n\n\n def fRad(self, fDu, NA, wLen):\n \"\"\" Determines the approximate radius in F-space in pixels of the pupil function \"\"\"\n x = 2 * np.pi * NA / (wLen * fDu[0])\n y = 2 * np.pi * NA / (wLen * fDu[1])\n avr = np.int32(np.average([x, y]))\n return avr\n\n\n def initCoords(self, i, j, wLen, Rad):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n segmentPos = [i, j]\n n = self.Params['numFiles']\n w = self.subParams['subSize'][0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.Params['ps'][0]/self.Params['mag']\n self.Params['centre'] = centre\n coords = np.empty((self.Params['LEDNum'][0], self.Params['LEDNum'][1], 2), dtype=np.int32)\n isBF = np.zeros((self.Params['LEDNum'][0], self.Params['LEDNum'][1]), dtype=np.int32)\n numImgs = int(len(self.Params['images']) ** 0.5)\n for i, img in enumerate(self.Params['images']):\n LED = meth.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre, wLen)\n #print(\"LED:\", LED, \"LEDPixelPos:\", LEDPixelPos)\n #print(\"LEDPos:\", [LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1])\n coords[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = LEDPixelPos\n if ((LEDPixelPos[0]-w/2)**2 + (LEDPixelPos[1]-w/2)**2 < Rad):\n isBF[LED[0] + int(numImgs / 2) - 1, LED[1] + int(numImgs / 2) - 1] = 1\n return coords, isBF\n\n\n def getLEDPos(self, nx, ny, centre, wLen):\n \"\"\" Determines the location of the centre of the fourier pattern in pixels \"\"\"\n ax = np.arctan((centre - nx * self.Params['LEDSpace']) / self.Params['distance']) # Angle to x axis\n ay = np.arctan((centre - ny * self.Params['LEDSpace']) / self.Params['distance']) # Angle to y axis\n dx = ax / (wLen * self.Params['fResolution'][0])\n dy = ay / (wLen * self.Params['fResolution'][1])\n pos = [int(dx + self.subParams['subSize'][0] / 2), int(dy + self.subParams['subSize'][0] / 2)]\n return pos\n\n\nclass splitImage():\n def __init__(self, dir, imgName, numSplits, splitSize):\n self.LEDPos = meth.getLED(imgName)\n self.subImg = np.empty([numSplits, numSplits], dtype=subImage)\n for i in range(numSplits):\n for j in range(numSplits):\n self.subImg[i, j] = subImage(dir, splitSize, imgName, self.LEDPos, i, j)\n\n\nclass subImage():\n def __init__(self, dir, splitSize, imgName, LEDPos, i, j):\n img = meth.readImage(dir, imgName)\n self.image = img[i * splitSize:(i + 1) * splitSize, j * splitSize:(j + 1) * splitSize]\n self.imgPos = [i, j]\n self.LEDPos = LEDPos\n\n\n\n\n\n\n########################################################################################################################\n'''\nclass preProcess(objective):\n def __init__(self, dir, file, size, line, colour=1):\n \"\"\" Slices images into sections \"\"\"\n super().__init__(dir, file, size, line, colour=1)\n numFiles, devisor = self.getDevisor(150)\n self.genFiles(numFiles)\n self.split(devisor, numFiles)\n\n\n def genFiles(self, numFiles):\n path = os.path.join(os.getcwd(), 'temp')\n if os.path.isdir(path):\n shutil.rmtree(path)\n time.sleep(0.01)\n os.mkdir(path)\n for i in range(numFiles):\n for j in range(numFiles):\n folder = '%s_%s' % (str(i), str(j))\n path1 = os.path.join(path, folder)\n os.mkdir(path1)\n\n\n def getDevisor(self, splitSize):\n imgName = self.images[0]\n img = self.readImage(self.dir, imgName)\n imgSize = img.shape[0]\n while True:\n if imgSize % splitSize == 0:\n devisor = splitSize\n break\n splitSize += 1\n numFiles = int(imgSize / devisor)\n return numFiles, devisor\n\n\n def split(self, devisor, numFiles):\n path0 = os.path.join(os.getcwd(), 'temp')\n for i0, file in enumerate(self.images):\n LED = self.getLED(file)\n img = self.readImage(self.dir, file)\n for i in range(numFiles):\n for j in range(numFiles):\n folder = '%s_%s' % (str(i), str(j))\n path1 = os.path.join(path0, folder)\n file = 'img_%s_%s_.jpg' % (str(LED[0]), str(LED[1]))\n path = os.path.join(path1, file)\n subImg = img[i * devisor:(i + 1) * devisor, j * devisor:(j + 1) * devisor]\n cv.imwrite(path, subImg)\n h.progbar(i0 * numFiles ** 2 + i * numFiles + j,\n len(self.images) * numFiles ** 2, 'Slicing Images')\n\n\n\n def initCoords(self, dir):\n \"\"\" Returns 2D array where LED coords relate to fourier centre positions \"\"\"\n dirName = os.path.basename(dir)\n segmentPos = self.getSegment(dirName)\n N = len(os.listdir(dir))\n n = np.sqrt(N)\n w = self.smallSize[0]\n c = w / (2 * n)\n centre = (segmentPos[0] * 2 * c + c - w) * self.ps[0]/self.mag\n coords = np.empty((self.LEDNum[0], self.LEDNum[1], 2), dtype=np.int32)\n for i, img in enumerate(self.images):\n LED = self.getLED(img)\n LEDPixelPos = self.getLEDPos(LED[0], LED[1], centre)\n coords[LED[0], LED[1]] = LEDPixelPos\n return coords\n'''",
"step-ids": [
9,
11,
13,
15,
19
]
}
|
[
9,
11,
13,
15,
19
] |
import time
import random
import math
people = [('Seymour', 'BOS'),
('Franny', 'DAL'),
('Zooey', 'CAK'),
('Walt', 'MIA'),
('Buddy', 'ORD'),
('Les', 'OMA')]
destination = 'LGA'
flights = dict()
for line in file('schedule.txt'):
origin, dest, depart, arrive, price = line.strip().split(',')
flights.setdefault((origin, dest), [])
flights[(origin, dest)].append((depart, arrive, int(price)))
def getMinutes(t):
x = time.strptime(t, '%H:%M')
return x[3] * 60 + x[4]
def printSchedule(r):
for d in range(len(r) / 2):
name = people[d][0]
origin = people[d][1]
out = flights[(origin, destination)][r[2 * d]]
ret = flights[(origin, destination)][r[2 * d + 1]]
print "%10s%10s %5s-%5s $%3s %5s-%5s $%3s" % (name, origin,
out[0], out[1], out[2],
ret[0], ret[1], ret[2])
def scheduleCost(sol):
totalPrice = 0
totalWait = 0
latestArrival = 0
earliestDepart = 24 * 60
for d in range(len(sol) / 2):
origin = people[d][1]
out = flights[(origin, destination)][int(sol[2 * d])]
ret = flights[(origin, destination)][int(sol[2 * d + 1])]
totalPrice += out[2] + ret[2]
if latestArrival < getMinutes(out[1]): latestArrival = getMinutes(out[1])
if earliestDepart > getMinutes(ret[0]): earliestDepart = getMinutes(ret[0])
for d in range(len(sol) / 2):
origin = people[d][1]
out = flights[(origin, destination)][int(sol[2 * d])]
ret = flights[(origin, destination)][int(sol[2 * d + 1])]
totalWait += latestArrival - getMinutes(out[1])
totalWait += getMinutes(ret[0]) - earliestDepart
if latestArrival > earliestDepart: totalPrice += 50
return totalWait + totalPrice
def randomOptimize(domain, costf = scheduleCost):
best = 999999999999
bestr = None
for i in range(1000):
r = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]
cost = costf(r)
if cost < best:
best = cost
bestr = r
return r
def hillClimb(domain, costf = scheduleCost):
sol = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]
while 1:
neighbors = list()
for j in range(len(domain)):
if sol[j] > domain[j][0]:
neighbors.append(sol[0:j] + [sol[j] - 1] + sol[j + 1 :])
if sol[j] < domain[j][1]:
neighbors.append(sol[0:j] + [sol[j] + 1] + sol[j + 1 :])
current = costf(sol)
best = current
for j in neighbors:
cost = costf(j)
if cost < best:
best = cost
sol = j
if best == current:
break
return sol
def annealingOptimize(domain, costf = scheduleCost, T = 10000.0, cool = 0.95, step = 1):
sol = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]
while T > 0.1:
i = random.randint(0, len(domain) - 1)
dir = random.randint(-step, step)
vec = sol[:]
vec[i] += dir
if vec[i] < domain[i][0]: vec[i] = domain[i][0]
elif vec[i] > domain[i][1]: vec[i] = domain[i][1]
ca = costf(sol)
cb = costf(vec)
if cb < ca or random.random() < pow(math.e, -(cb - ca) / T):
sol = vec
T = T * cool
return sol
def geneticOptimize(domain, costf = scheduleCost, popSize = 50, step = 1,
mutProb = 0.2, elite = 0.2, maxIter = 100):
def mutate(vec):
i = random.randint(0, len(domain) - 1)
if random.random < 0.5 and vec[i] > domain[i][0]:
return vec[0 : i] + [vec[i] - step] + vec[i + 1 :]
elif vec[i] < domain[i][1]:
return vec[0 : i] + [vec[i] + step] + vec[i + 1 :]
def crossOver(r1, r2):
i = random.randint(1, len(domain) - 2)
return r1[0 : i] + r2[i :]
pop = list()
for i in range(popSize):
vec = [random.randint(domain[i][0], domain[i][1])
for i in range(len(domain))]
pop.append(vec)
topElite = int(elite * popSize)
for i in range(maxIter):
scores = [(costf(v), v) for v in pop if v != None]
scores.sort()
ranked = [v for (s, v) in scores]
pop = ranked[0 : topElite]
while len(pop) < popSize:
if random.random() < mutProb:
pop.append(mutate(ranked[random.randint(0, topElite)]))
else:
c1 = random.randint(0, topElite)
c2 = random.randint(0, topElite)
pop.append(crossOver(ranked[c1], ranked[c2]))
print scores[0][0]
return scores[0][1]
|
normal
|
{
"blob_id": "bd5f298027f82edf5451f5297d577005674de4c3",
"index": 3577,
"step-1": "import time\nimport random\nimport math\n\npeople = [('Seymour', 'BOS'),\n ('Franny', 'DAL'),\n ('Zooey', 'CAK'),\n ('Walt', 'MIA'),\n ('Buddy', 'ORD'),\n ('Les', 'OMA')]\n\ndestination = 'LGA'\n\nflights = dict()\n\nfor line in file('schedule.txt'):\n origin, dest, depart, arrive, price = line.strip().split(',')\n flights.setdefault((origin, dest), [])\n flights[(origin, dest)].append((depart, arrive, int(price)))\n\n\ndef getMinutes(t):\n x = time.strptime(t, '%H:%M')\n return x[3] * 60 + x[4]\n\ndef printSchedule(r):\n for d in range(len(r) / 2):\n name = people[d][0]\n origin = people[d][1]\n out = flights[(origin, destination)][r[2 * d]]\n ret = flights[(origin, destination)][r[2 * d + 1]]\n print \"%10s%10s %5s-%5s $%3s %5s-%5s $%3s\" % (name, origin,\n out[0], out[1], out[2],\n ret[0], ret[1], ret[2])\ndef scheduleCost(sol):\n totalPrice = 0\n totalWait = 0\n latestArrival = 0\n earliestDepart = 24 * 60\n\n for d in range(len(sol) / 2):\n origin = people[d][1]\n out = flights[(origin, destination)][int(sol[2 * d])]\n ret = flights[(origin, destination)][int(sol[2 * d + 1])]\n\n totalPrice += out[2] + ret[2]\n\n if latestArrival < getMinutes(out[1]): latestArrival = getMinutes(out[1])\n if earliestDepart > getMinutes(ret[0]): earliestDepart = getMinutes(ret[0])\n\n for d in range(len(sol) / 2):\n origin = people[d][1]\n out = flights[(origin, destination)][int(sol[2 * d])]\n ret = flights[(origin, destination)][int(sol[2 * d + 1])]\n totalWait += latestArrival - getMinutes(out[1])\n totalWait += getMinutes(ret[0]) - earliestDepart\n\n if latestArrival > earliestDepart: totalPrice += 50\n\n return totalWait + totalPrice\n\ndef randomOptimize(domain, costf = scheduleCost):\n best = 999999999999\n bestr = None\n\n for i in range(1000):\n r = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]\n cost = costf(r)\n if cost < best:\n best = cost\n bestr = r\n\n return r\n\ndef hillClimb(domain, costf = scheduleCost):\n sol = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]\n\n while 1:\n neighbors = list()\n for j in range(len(domain)):\n if sol[j] > domain[j][0]:\n neighbors.append(sol[0:j] + [sol[j] - 1] + sol[j + 1 :])\n if sol[j] < domain[j][1]:\n neighbors.append(sol[0:j] + [sol[j] + 1] + sol[j + 1 :])\n\n current = costf(sol)\n best = current\n for j in neighbors:\n cost = costf(j)\n if cost < best:\n best = cost\n sol = j\n\n if best == current:\n break\n\n return sol\n\ndef annealingOptimize(domain, costf = scheduleCost, T = 10000.0, cool = 0.95, step = 1):\n sol = [random.randint(domain[i][0], domain[i][1]) for i in range(len(domain))]\n\n while T > 0.1:\n i = random.randint(0, len(domain) - 1)\n dir = random.randint(-step, step)\n vec = sol[:]\n vec[i] += dir\n if vec[i] < domain[i][0]: vec[i] = domain[i][0]\n elif vec[i] > domain[i][1]: vec[i] = domain[i][1]\n\n ca = costf(sol)\n cb = costf(vec)\n\n if cb < ca or random.random() < pow(math.e, -(cb - ca) / T):\n sol = vec\n\n T = T * cool\n\n return sol\n\ndef geneticOptimize(domain, costf = scheduleCost, popSize = 50, step = 1,\n mutProb = 0.2, elite = 0.2, maxIter = 100):\n def mutate(vec):\n i = random.randint(0, len(domain) - 1)\n if random.random < 0.5 and vec[i] > domain[i][0]:\n return vec[0 : i] + [vec[i] - step] + vec[i + 1 :]\n elif vec[i] < domain[i][1]:\n return vec[0 : i] + [vec[i] + step] + vec[i + 1 :]\n\n def crossOver(r1, r2):\n i = random.randint(1, len(domain) - 2)\n return r1[0 : i] + r2[i :]\n\n pop = list()\n for i in range(popSize):\n vec = [random.randint(domain[i][0], domain[i][1])\n for i in range(len(domain))]\n pop.append(vec)\n\n topElite = int(elite * popSize)\n for i in range(maxIter):\n scores = [(costf(v), v) for v in pop if v != None]\n scores.sort()\n ranked = [v for (s, v) in scores]\n pop = ranked[0 : topElite]\n while len(pop) < popSize:\n if random.random() < mutProb:\n pop.append(mutate(ranked[random.randint(0, topElite)]))\n else:\n c1 = random.randint(0, topElite)\n c2 = random.randint(0, topElite)\n pop.append(crossOver(ranked[c1], ranked[c2]))\n print scores[0][0]\n\n return scores[0][1]\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from math import *
import math
import re
import numpy as np
class atom:
aid=0
atype=''
x=0.0
y=0.0
z=0.0
rid=0
rtype=''
model=[]
chainid=''
def getlen(atm1,atm2):
dist=sqrt(pow(atm1.x-atm2.x,2)+pow(atm1.y-atm2.y,2)+pow(atm1.z-atm2.z,2))
return dist
def getangle(atm1,atm2,atm3):
dist1=sqrt(pow(atm1.x-atm2.x,2)+pow(atm1.y-atm2.y,2)+pow(atm1.z-atm2.z,2))
dist2=sqrt(pow(atm3.x-atm2.x,2)+pow(atm3.y-atm2.y,2)+pow(atm3.z-atm2.z,2))
dotp=(atm1.x-atm2.x)*(atm3.x-atm2.x)+(atm1.y-atm2.y)*(atm3.y-atm2.y)+(atm1.z-atm2.z)*(atm3.z-atm2.z)
angle=acos(dotp/(dist1*dist2))*180/pi
return angle
def getangledihedral(atm1,atm2,atm3,atm4):
ab=np.zeros(3)
bc=np.zeros(3)
cd=np.zeros(3)
p=[]
q=[]
ab[0]=atm2.x-atm1.x
ab[1]=atm2.y-atm1.y
ab[2]=atm2.z-atm1.z
bc[0]=atm3.x-atm2.x
bc[1]=atm3.y-atm2.y
bc[2]=atm3.z-atm2.z
cd[0]=atm4.x-atm3.x
cd[1]=atm4.y-atm3.y
cd[2]=atm4.z-atm3.z
p.append(ab[1]*bc[2]-ab[2]*bc[1])
p.append(ab[2]*bc[0]-ab[0]*bc[2])
p.append(ab[0]*bc[1]-ab[1]*bc[0])
q.append(bc[1]*cd[2]-bc[2]*cd[1])
q.append(bc[2]*cd[0]-bc[0]*cd[2])
q.append(bc[0]*cd[1]-bc[1]*cd[0])
r1=0
r2=0
dp=0
dpcd=0
for i in range(0,3):
r1 += math.pow(p[i],2)
r2 += math.pow(q[i],2)
dp += p[i]*q[i]
dpcd += p[i]*cd[i]
dih=(dpcd/abs(dpcd))*math.acos(dp/(math.sqrt(r1)*math.sqrt(r2)))*180/math.pi
return dih
def getdihedralstrain(a1,a2,a3,a4,a5):
dse=8.37*(1+math.cos(3*a1*math.pi/180))+8.37*(1+math.cos(3*a5*math.pi/180))+4.18*(1+math.cos(3*a2*math.pi/180))+4.18*(1+math.cos(3*a4*math.pi/180))+14.64*(1+math.cos(2*a3*math.pi/180))+2.51*(1+math.cos(3*a3*math.pi/180))
return dse
s_s_l=1.6
s_s_u=2.5
filetxt=open('filelist.txt')
txt_lines=filetxt.read().split('\n')
filetxt.close()
fileout=open('out_C-S-S-C_BACKBONE_scan.txt','w')
f1=open('error_C-S-S-C_scan.txt','w')
intr=[]
lenlines=len(txt_lines)
for ppp in range(lenlines):
filename=txt_lines[ppp]
if filename=='':
continue
print('%.2f'%((ppp+1)*100.0/(lenlines-1))+'% ('+str(ppp+1)+'/'+str(lenlines-1)+') Executing for:'+filename)
file=open(filename,'r')
lines=file.read().split('\n')
file.close()
T=[]
D=[]
S=[]
C=[]
SX=[]
TX=[]
A=[]
B=[]
E=[]
F=[]
modelno=[]
try:
for ln in lines:
if len(ln)>=6 and (ln[0:4]=='ATOM' or ln[0:6]=='HETATM'):
atm=atom()
atm.aid=int(ln[6:11])
atm.atype=ln[12:16].strip()
atm.rtype=ln[17:20].strip()
atm.chainid=ln[21]
atm.rid=int(ln[22:26])
atm.x=float(ln[30:38])
atm.y=float(ln[38:46])
atm.z=float(ln[47:54])
atm.model=modelno
symb=ln[13].strip()
if atm.atype=='CB' and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS' :
C.append(atm)
D.append(atm)
if atm.atype=='SG'and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS':
SX.append(atm)
TX.append(atm)
if atm.atype=='CA' and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS':
B.append(atm)
E.append(atm)
if atm.atype=='N' and (modelno==1 or modelno==A or modelno==[]) :
if atm.rtype=='CYS' :
A.append(atm)
F.append(atm)
elif len(ln)>=5 and ln[0:5]=='MODEL':
modelno=int(ln[12:])
except:
f1.write(filename+'\n')
for k in SX:
for k1 in SX:
if k1.chainid==k.chainid:
if k1.rid==k.rid and k1.aid!=k.aid :
break
else:
S.append(k)
for m in TX:
for m1 in TX:
if m1.chainid==m.chainid:
if m1.rid==m.rid and m1.aid!=m.aid :
break
else:
T.append(m)
for a in range(len(A)):
for b in range(len(B)):
if A[a].rid==B[b].rid:
for j in range(len(C)):
for k in range(len(S)):
if C[j].rid==S[k].rid and C[j].rid==B[b].rid and C[j].chainid==B[b].chainid==S[k].chainid==A[a].chainid :
for m in range(len(T)):
if getlen(S[k],T[m])>=s_s_l and getlen(S[k],T[m])<=s_s_u and S[k].rid<T[m].rid :
for n in range(len(D)):
for e in range(len(E)):
if E[e].rid==D[n].rid:
for f in range(len(F)):
if D[n].rid==T[m].rid and E[e].rid==F[f].rid and D[n].chainid==T[m].chainid==E[e].chainid==F[f].chainid :
a1=getangledihedral(A[a],B[b],C[j],S[k])
a2=getangledihedral(B[b],C[j],S[k],T[m])
a3=getangledihedral(C[j],S[k],T[m],D[n])
a4=getangledihedral(S[k],T[m],D[n],E[e])
a5=getangledihedral(T[m],D[n],E[e],F[f])
dse=getdihedralstrain(a1,a2,a3,a4,a5)
intr.append([])
intr[len(intr)-1].append(filename)
intr[len(intr)-1].append(C[j].chainid)
intr[len(intr)-1].append(C[j].rid)
intr[len(intr)-1].append(T[m].rid)
intr[len(intr)-1].append(T[m].chainid)
intr[len(intr)-1].append(getlen(C[j],S[k]))
intr[len(intr)-1].append(getlen(T[m],S[k]))
intr[len(intr)-1].append(getlen(T[m],D[n]))
intr[len(intr)-1].append(a1)
intr[len(intr)-1].append(a2)
intr[len(intr)-1].append(a3)
intr[len(intr)-1].append(a4)
intr[len(intr)-1].append(a5)
intr[len(intr)-1].append(dse)
C=[]
T=[]
D=[]
S=[]
SX=[]
TX=[]
A=[]
B=[]
E=[]
F=[]
for line in intr:
for xxd in line:
fileout.write(str(xxd))
fileout.write('\t')
fileout.write('\n')
intr=[]
fileout.close()
fileout=open('out_C-S-S-C_BACKBONE_scan.txt','a')
fileout.close()
f1.close()
|
normal
|
{
"blob_id": "78123c806e5a8c0cc7511a5024769f8c61621efa",
"index": 9877,
"step-1": "<mask token>\n\n\nclass atom:\n aid = 0\n atype = ''\n x = 0.0\n y = 0.0\n z = 0.0\n rid = 0\n rtype = ''\n model = []\n chainid = ''\n\n\ndef getlen(atm1, atm2):\n dist = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n return dist\n\n\n<mask token>\n\n\ndef getangledihedral(atm1, atm2, atm3, atm4):\n ab = np.zeros(3)\n bc = np.zeros(3)\n cd = np.zeros(3)\n p = []\n q = []\n ab[0] = atm2.x - atm1.x\n ab[1] = atm2.y - atm1.y\n ab[2] = atm2.z - atm1.z\n bc[0] = atm3.x - atm2.x\n bc[1] = atm3.y - atm2.y\n bc[2] = atm3.z - atm2.z\n cd[0] = atm4.x - atm3.x\n cd[1] = atm4.y - atm3.y\n cd[2] = atm4.z - atm3.z\n p.append(ab[1] * bc[2] - ab[2] * bc[1])\n p.append(ab[2] * bc[0] - ab[0] * bc[2])\n p.append(ab[0] * bc[1] - ab[1] * bc[0])\n q.append(bc[1] * cd[2] - bc[2] * cd[1])\n q.append(bc[2] * cd[0] - bc[0] * cd[2])\n q.append(bc[0] * cd[1] - bc[1] * cd[0])\n r1 = 0\n r2 = 0\n dp = 0\n dpcd = 0\n for i in range(0, 3):\n r1 += math.pow(p[i], 2)\n r2 += math.pow(q[i], 2)\n dp += p[i] * q[i]\n dpcd += p[i] * cd[i]\n dih = dpcd / abs(dpcd) * math.acos(dp / (math.sqrt(r1) * math.sqrt(r2))\n ) * 180 / math.pi\n return dih\n\n\ndef getdihedralstrain(a1, a2, a3, a4, a5):\n dse = 8.37 * (1 + math.cos(3 * a1 * math.pi / 180)) + 8.37 * (1 + math.\n cos(3 * a5 * math.pi / 180)) + 4.18 * (1 + math.cos(3 * a2 * math.\n pi / 180)) + 4.18 * (1 + math.cos(3 * a4 * math.pi / 180)) + 14.64 * (\n 1 + math.cos(2 * a3 * math.pi / 180)) + 2.51 * (1 + math.cos(3 * a3 *\n math.pi / 180))\n return dse\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass atom:\n aid = 0\n atype = ''\n x = 0.0\n y = 0.0\n z = 0.0\n rid = 0\n rtype = ''\n model = []\n chainid = ''\n\n\ndef getlen(atm1, atm2):\n dist = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n return dist\n\n\ndef getangle(atm1, atm2, atm3):\n dist1 = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n dist2 = sqrt(pow(atm3.x - atm2.x, 2) + pow(atm3.y - atm2.y, 2) + pow(\n atm3.z - atm2.z, 2))\n dotp = (atm1.x - atm2.x) * (atm3.x - atm2.x) + (atm1.y - atm2.y) * (atm3\n .y - atm2.y) + (atm1.z - atm2.z) * (atm3.z - atm2.z)\n angle = acos(dotp / (dist1 * dist2)) * 180 / pi\n return angle\n\n\ndef getangledihedral(atm1, atm2, atm3, atm4):\n ab = np.zeros(3)\n bc = np.zeros(3)\n cd = np.zeros(3)\n p = []\n q = []\n ab[0] = atm2.x - atm1.x\n ab[1] = atm2.y - atm1.y\n ab[2] = atm2.z - atm1.z\n bc[0] = atm3.x - atm2.x\n bc[1] = atm3.y - atm2.y\n bc[2] = atm3.z - atm2.z\n cd[0] = atm4.x - atm3.x\n cd[1] = atm4.y - atm3.y\n cd[2] = atm4.z - atm3.z\n p.append(ab[1] * bc[2] - ab[2] * bc[1])\n p.append(ab[2] * bc[0] - ab[0] * bc[2])\n p.append(ab[0] * bc[1] - ab[1] * bc[0])\n q.append(bc[1] * cd[2] - bc[2] * cd[1])\n q.append(bc[2] * cd[0] - bc[0] * cd[2])\n q.append(bc[0] * cd[1] - bc[1] * cd[0])\n r1 = 0\n r2 = 0\n dp = 0\n dpcd = 0\n for i in range(0, 3):\n r1 += math.pow(p[i], 2)\n r2 += math.pow(q[i], 2)\n dp += p[i] * q[i]\n dpcd += p[i] * cd[i]\n dih = dpcd / abs(dpcd) * math.acos(dp / (math.sqrt(r1) * math.sqrt(r2))\n ) * 180 / math.pi\n return dih\n\n\ndef getdihedralstrain(a1, a2, a3, a4, a5):\n dse = 8.37 * (1 + math.cos(3 * a1 * math.pi / 180)) + 8.37 * (1 + math.\n cos(3 * a5 * math.pi / 180)) + 4.18 * (1 + math.cos(3 * a2 * math.\n pi / 180)) + 4.18 * (1 + math.cos(3 * a4 * math.pi / 180)) + 14.64 * (\n 1 + math.cos(2 * a3 * math.pi / 180)) + 2.51 * (1 + math.cos(3 * a3 *\n math.pi / 180))\n return dse\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass atom:\n aid = 0\n atype = ''\n x = 0.0\n y = 0.0\n z = 0.0\n rid = 0\n rtype = ''\n model = []\n chainid = ''\n\n\ndef getlen(atm1, atm2):\n dist = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n return dist\n\n\ndef getangle(atm1, atm2, atm3):\n dist1 = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n dist2 = sqrt(pow(atm3.x - atm2.x, 2) + pow(atm3.y - atm2.y, 2) + pow(\n atm3.z - atm2.z, 2))\n dotp = (atm1.x - atm2.x) * (atm3.x - atm2.x) + (atm1.y - atm2.y) * (atm3\n .y - atm2.y) + (atm1.z - atm2.z) * (atm3.z - atm2.z)\n angle = acos(dotp / (dist1 * dist2)) * 180 / pi\n return angle\n\n\ndef getangledihedral(atm1, atm2, atm3, atm4):\n ab = np.zeros(3)\n bc = np.zeros(3)\n cd = np.zeros(3)\n p = []\n q = []\n ab[0] = atm2.x - atm1.x\n ab[1] = atm2.y - atm1.y\n ab[2] = atm2.z - atm1.z\n bc[0] = atm3.x - atm2.x\n bc[1] = atm3.y - atm2.y\n bc[2] = atm3.z - atm2.z\n cd[0] = atm4.x - atm3.x\n cd[1] = atm4.y - atm3.y\n cd[2] = atm4.z - atm3.z\n p.append(ab[1] * bc[2] - ab[2] * bc[1])\n p.append(ab[2] * bc[0] - ab[0] * bc[2])\n p.append(ab[0] * bc[1] - ab[1] * bc[0])\n q.append(bc[1] * cd[2] - bc[2] * cd[1])\n q.append(bc[2] * cd[0] - bc[0] * cd[2])\n q.append(bc[0] * cd[1] - bc[1] * cd[0])\n r1 = 0\n r2 = 0\n dp = 0\n dpcd = 0\n for i in range(0, 3):\n r1 += math.pow(p[i], 2)\n r2 += math.pow(q[i], 2)\n dp += p[i] * q[i]\n dpcd += p[i] * cd[i]\n dih = dpcd / abs(dpcd) * math.acos(dp / (math.sqrt(r1) * math.sqrt(r2))\n ) * 180 / math.pi\n return dih\n\n\ndef getdihedralstrain(a1, a2, a3, a4, a5):\n dse = 8.37 * (1 + math.cos(3 * a1 * math.pi / 180)) + 8.37 * (1 + math.\n cos(3 * a5 * math.pi / 180)) + 4.18 * (1 + math.cos(3 * a2 * math.\n pi / 180)) + 4.18 * (1 + math.cos(3 * a4 * math.pi / 180)) + 14.64 * (\n 1 + math.cos(2 * a3 * math.pi / 180)) + 2.51 * (1 + math.cos(3 * a3 *\n math.pi / 180))\n return dse\n\n\ns_s_l = 1.6\ns_s_u = 2.5\nfiletxt = open('filelist.txt')\ntxt_lines = filetxt.read().split('\\n')\nfiletxt.close()\nfileout = open('out_C-S-S-C_BACKBONE_scan.txt', 'w')\nf1 = open('error_C-S-S-C_scan.txt', 'w')\nintr = []\nlenlines = len(txt_lines)\nfor ppp in range(lenlines):\n filename = txt_lines[ppp]\n if filename == '':\n continue\n print('%.2f' % ((ppp + 1) * 100.0 / (lenlines - 1)) + '% (' + str(ppp +\n 1) + '/' + str(lenlines - 1) + ') Executing for:' + filename)\n file = open(filename, 'r')\n lines = file.read().split('\\n')\n file.close()\n T = []\n D = []\n S = []\n C = []\n SX = []\n TX = []\n A = []\n B = []\n E = []\n F = []\n modelno = []\n try:\n for ln in lines:\n if len(ln) >= 6 and (ln[0:4] == 'ATOM' or ln[0:6] == 'HETATM'):\n atm = atom()\n atm.aid = int(ln[6:11])\n atm.atype = ln[12:16].strip()\n atm.rtype = ln[17:20].strip()\n atm.chainid = ln[21]\n atm.rid = int(ln[22:26])\n atm.x = float(ln[30:38])\n atm.y = float(ln[38:46])\n atm.z = float(ln[47:54])\n atm.model = modelno\n symb = ln[13].strip()\n if atm.atype == 'CB' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n C.append(atm)\n D.append(atm)\n if atm.atype == 'SG' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n SX.append(atm)\n TX.append(atm)\n if atm.atype == 'CA' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n B.append(atm)\n E.append(atm)\n if atm.atype == 'N' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n A.append(atm)\n F.append(atm)\n elif len(ln) >= 5 and ln[0:5] == 'MODEL':\n modelno = int(ln[12:])\n except:\n f1.write(filename + '\\n')\n for k in SX:\n for k1 in SX:\n if k1.chainid == k.chainid:\n if k1.rid == k.rid and k1.aid != k.aid:\n break\n else:\n S.append(k)\n for m in TX:\n for m1 in TX:\n if m1.chainid == m.chainid:\n if m1.rid == m.rid and m1.aid != m.aid:\n break\n else:\n T.append(m)\n for a in range(len(A)):\n for b in range(len(B)):\n if A[a].rid == B[b].rid:\n for j in range(len(C)):\n for k in range(len(S)):\n if C[j].rid == S[k].rid and C[j].rid == B[b].rid and C[\n j].chainid == B[b].chainid == S[k].chainid == A[a\n ].chainid:\n for m in range(len(T)):\n if getlen(S[k], T[m]) >= s_s_l and getlen(S\n [k], T[m]) <= s_s_u and S[k].rid < T[m\n ].rid:\n for n in range(len(D)):\n for e in range(len(E)):\n if E[e].rid == D[n].rid:\n for f in range(len(F)):\n if D[n].rid == T[m].rid and E[e].rid == F[f\n ].rid and D[n].chainid == T[m\n ].chainid == E[e].chainid == F[f\n ].chainid:\n a1 = getangledihedral(A[a], B[b], C[j],\n S[k])\n a2 = getangledihedral(B[b], C[j], S[k],\n T[m])\n a3 = getangledihedral(C[j], S[k], T[m],\n D[n])\n a4 = getangledihedral(S[k], T[m], D[n],\n E[e])\n a5 = getangledihedral(T[m], D[n], E[e],\n F[f])\n dse = (getdihedralstrain(a1, a2, a3, a4,\n a5))\n intr.append([])\n intr[len(intr) - 1].append(filename)\n intr[len(intr) - 1].append(C[j].chainid)\n intr[len(intr) - 1].append(C[j].rid)\n intr[len(intr) - 1].append(T[m].rid)\n intr[len(intr) - 1].append(T[m].chainid)\n intr[len(intr) - 1].append(getlen(C[j],\n S[k]))\n intr[len(intr) - 1].append(getlen(T[m],\n S[k]))\n intr[len(intr) - 1].append(getlen(T[m],\n D[n]))\n intr[len(intr) - 1].append(a1)\n intr[len(intr) - 1].append(a2)\n intr[len(intr) - 1].append(a3)\n intr[len(intr) - 1].append(a4)\n intr[len(intr) - 1].append(a5)\n intr[len(intr) - 1].append(dse)\n C = []\n T = []\n D = []\n S = []\n SX = []\n TX = []\n A = []\n B = []\n E = []\n F = []\n for line in intr:\n for xxd in line:\n fileout.write(str(xxd))\n fileout.write('\\t')\n fileout.write('\\n')\n intr = []\n fileout.close()\n fileout = open('out_C-S-S-C_BACKBONE_scan.txt', 'a')\nfileout.close()\nf1.close()\n",
"step-4": "from math import *\nimport math\nimport re\nimport numpy as np\n\n\nclass atom:\n aid = 0\n atype = ''\n x = 0.0\n y = 0.0\n z = 0.0\n rid = 0\n rtype = ''\n model = []\n chainid = ''\n\n\ndef getlen(atm1, atm2):\n dist = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n return dist\n\n\ndef getangle(atm1, atm2, atm3):\n dist1 = sqrt(pow(atm1.x - atm2.x, 2) + pow(atm1.y - atm2.y, 2) + pow(\n atm1.z - atm2.z, 2))\n dist2 = sqrt(pow(atm3.x - atm2.x, 2) + pow(atm3.y - atm2.y, 2) + pow(\n atm3.z - atm2.z, 2))\n dotp = (atm1.x - atm2.x) * (atm3.x - atm2.x) + (atm1.y - atm2.y) * (atm3\n .y - atm2.y) + (atm1.z - atm2.z) * (atm3.z - atm2.z)\n angle = acos(dotp / (dist1 * dist2)) * 180 / pi\n return angle\n\n\ndef getangledihedral(atm1, atm2, atm3, atm4):\n ab = np.zeros(3)\n bc = np.zeros(3)\n cd = np.zeros(3)\n p = []\n q = []\n ab[0] = atm2.x - atm1.x\n ab[1] = atm2.y - atm1.y\n ab[2] = atm2.z - atm1.z\n bc[0] = atm3.x - atm2.x\n bc[1] = atm3.y - atm2.y\n bc[2] = atm3.z - atm2.z\n cd[0] = atm4.x - atm3.x\n cd[1] = atm4.y - atm3.y\n cd[2] = atm4.z - atm3.z\n p.append(ab[1] * bc[2] - ab[2] * bc[1])\n p.append(ab[2] * bc[0] - ab[0] * bc[2])\n p.append(ab[0] * bc[1] - ab[1] * bc[0])\n q.append(bc[1] * cd[2] - bc[2] * cd[1])\n q.append(bc[2] * cd[0] - bc[0] * cd[2])\n q.append(bc[0] * cd[1] - bc[1] * cd[0])\n r1 = 0\n r2 = 0\n dp = 0\n dpcd = 0\n for i in range(0, 3):\n r1 += math.pow(p[i], 2)\n r2 += math.pow(q[i], 2)\n dp += p[i] * q[i]\n dpcd += p[i] * cd[i]\n dih = dpcd / abs(dpcd) * math.acos(dp / (math.sqrt(r1) * math.sqrt(r2))\n ) * 180 / math.pi\n return dih\n\n\ndef getdihedralstrain(a1, a2, a3, a4, a5):\n dse = 8.37 * (1 + math.cos(3 * a1 * math.pi / 180)) + 8.37 * (1 + math.\n cos(3 * a5 * math.pi / 180)) + 4.18 * (1 + math.cos(3 * a2 * math.\n pi / 180)) + 4.18 * (1 + math.cos(3 * a4 * math.pi / 180)) + 14.64 * (\n 1 + math.cos(2 * a3 * math.pi / 180)) + 2.51 * (1 + math.cos(3 * a3 *\n math.pi / 180))\n return dse\n\n\ns_s_l = 1.6\ns_s_u = 2.5\nfiletxt = open('filelist.txt')\ntxt_lines = filetxt.read().split('\\n')\nfiletxt.close()\nfileout = open('out_C-S-S-C_BACKBONE_scan.txt', 'w')\nf1 = open('error_C-S-S-C_scan.txt', 'w')\nintr = []\nlenlines = len(txt_lines)\nfor ppp in range(lenlines):\n filename = txt_lines[ppp]\n if filename == '':\n continue\n print('%.2f' % ((ppp + 1) * 100.0 / (lenlines - 1)) + '% (' + str(ppp +\n 1) + '/' + str(lenlines - 1) + ') Executing for:' + filename)\n file = open(filename, 'r')\n lines = file.read().split('\\n')\n file.close()\n T = []\n D = []\n S = []\n C = []\n SX = []\n TX = []\n A = []\n B = []\n E = []\n F = []\n modelno = []\n try:\n for ln in lines:\n if len(ln) >= 6 and (ln[0:4] == 'ATOM' or ln[0:6] == 'HETATM'):\n atm = atom()\n atm.aid = int(ln[6:11])\n atm.atype = ln[12:16].strip()\n atm.rtype = ln[17:20].strip()\n atm.chainid = ln[21]\n atm.rid = int(ln[22:26])\n atm.x = float(ln[30:38])\n atm.y = float(ln[38:46])\n atm.z = float(ln[47:54])\n atm.model = modelno\n symb = ln[13].strip()\n if atm.atype == 'CB' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n C.append(atm)\n D.append(atm)\n if atm.atype == 'SG' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n SX.append(atm)\n TX.append(atm)\n if atm.atype == 'CA' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n B.append(atm)\n E.append(atm)\n if atm.atype == 'N' and (modelno == 1 or modelno == A or \n modelno == []):\n if atm.rtype == 'CYS':\n A.append(atm)\n F.append(atm)\n elif len(ln) >= 5 and ln[0:5] == 'MODEL':\n modelno = int(ln[12:])\n except:\n f1.write(filename + '\\n')\n for k in SX:\n for k1 in SX:\n if k1.chainid == k.chainid:\n if k1.rid == k.rid and k1.aid != k.aid:\n break\n else:\n S.append(k)\n for m in TX:\n for m1 in TX:\n if m1.chainid == m.chainid:\n if m1.rid == m.rid and m1.aid != m.aid:\n break\n else:\n T.append(m)\n for a in range(len(A)):\n for b in range(len(B)):\n if A[a].rid == B[b].rid:\n for j in range(len(C)):\n for k in range(len(S)):\n if C[j].rid == S[k].rid and C[j].rid == B[b].rid and C[\n j].chainid == B[b].chainid == S[k].chainid == A[a\n ].chainid:\n for m in range(len(T)):\n if getlen(S[k], T[m]) >= s_s_l and getlen(S\n [k], T[m]) <= s_s_u and S[k].rid < T[m\n ].rid:\n for n in range(len(D)):\n for e in range(len(E)):\n if E[e].rid == D[n].rid:\n for f in range(len(F)):\n if D[n].rid == T[m].rid and E[e].rid == F[f\n ].rid and D[n].chainid == T[m\n ].chainid == E[e].chainid == F[f\n ].chainid:\n a1 = getangledihedral(A[a], B[b], C[j],\n S[k])\n a2 = getangledihedral(B[b], C[j], S[k],\n T[m])\n a3 = getangledihedral(C[j], S[k], T[m],\n D[n])\n a4 = getangledihedral(S[k], T[m], D[n],\n E[e])\n a5 = getangledihedral(T[m], D[n], E[e],\n F[f])\n dse = (getdihedralstrain(a1, a2, a3, a4,\n a5))\n intr.append([])\n intr[len(intr) - 1].append(filename)\n intr[len(intr) - 1].append(C[j].chainid)\n intr[len(intr) - 1].append(C[j].rid)\n intr[len(intr) - 1].append(T[m].rid)\n intr[len(intr) - 1].append(T[m].chainid)\n intr[len(intr) - 1].append(getlen(C[j],\n S[k]))\n intr[len(intr) - 1].append(getlen(T[m],\n S[k]))\n intr[len(intr) - 1].append(getlen(T[m],\n D[n]))\n intr[len(intr) - 1].append(a1)\n intr[len(intr) - 1].append(a2)\n intr[len(intr) - 1].append(a3)\n intr[len(intr) - 1].append(a4)\n intr[len(intr) - 1].append(a5)\n intr[len(intr) - 1].append(dse)\n C = []\n T = []\n D = []\n S = []\n SX = []\n TX = []\n A = []\n B = []\n E = []\n F = []\n for line in intr:\n for xxd in line:\n fileout.write(str(xxd))\n fileout.write('\\t')\n fileout.write('\\n')\n intr = []\n fileout.close()\n fileout = open('out_C-S-S-C_BACKBONE_scan.txt', 'a')\nfileout.close()\nf1.close()\n",
"step-5": "from math import *\nimport math\nimport re\nimport numpy as np\nclass atom:\n aid=0 \n atype='' \n x=0.0 \n y=0.0 \n z=0.0 \n rid=0 \n rtype='' \n model=[]\n chainid=''\n\ndef getlen(atm1,atm2):\n dist=sqrt(pow(atm1.x-atm2.x,2)+pow(atm1.y-atm2.y,2)+pow(atm1.z-atm2.z,2)) \n return dist\n\ndef getangle(atm1,atm2,atm3):\n dist1=sqrt(pow(atm1.x-atm2.x,2)+pow(atm1.y-atm2.y,2)+pow(atm1.z-atm2.z,2)) \n dist2=sqrt(pow(atm3.x-atm2.x,2)+pow(atm3.y-atm2.y,2)+pow(atm3.z-atm2.z,2)) \n dotp=(atm1.x-atm2.x)*(atm3.x-atm2.x)+(atm1.y-atm2.y)*(atm3.y-atm2.y)+(atm1.z-atm2.z)*(atm3.z-atm2.z) \n angle=acos(dotp/(dist1*dist2))*180/pi \n return angle\n\ndef getangledihedral(atm1,atm2,atm3,atm4):\n ab=np.zeros(3)\n bc=np.zeros(3)\n cd=np.zeros(3)\n p=[]\n q=[]\n ab[0]=atm2.x-atm1.x\n ab[1]=atm2.y-atm1.y\n ab[2]=atm2.z-atm1.z\n bc[0]=atm3.x-atm2.x\n bc[1]=atm3.y-atm2.y\n bc[2]=atm3.z-atm2.z\n cd[0]=atm4.x-atm3.x\n cd[1]=atm4.y-atm3.y\n cd[2]=atm4.z-atm3.z\n p.append(ab[1]*bc[2]-ab[2]*bc[1])\n p.append(ab[2]*bc[0]-ab[0]*bc[2])\n p.append(ab[0]*bc[1]-ab[1]*bc[0])\n q.append(bc[1]*cd[2]-bc[2]*cd[1])\n q.append(bc[2]*cd[0]-bc[0]*cd[2])\n q.append(bc[0]*cd[1]-bc[1]*cd[0])\n\n\n r1=0\n r2=0\n dp=0\n dpcd=0\n for i in range(0,3):\n r1 += math.pow(p[i],2)\n r2 += math.pow(q[i],2)\n dp += p[i]*q[i]\n dpcd += p[i]*cd[i]\n\n dih=(dpcd/abs(dpcd))*math.acos(dp/(math.sqrt(r1)*math.sqrt(r2)))*180/math.pi\n \n\n return dih\n\ndef getdihedralstrain(a1,a2,a3,a4,a5):\n dse=8.37*(1+math.cos(3*a1*math.pi/180))+8.37*(1+math.cos(3*a5*math.pi/180))+4.18*(1+math.cos(3*a2*math.pi/180))+4.18*(1+math.cos(3*a4*math.pi/180))+14.64*(1+math.cos(2*a3*math.pi/180))+2.51*(1+math.cos(3*a3*math.pi/180))\n return dse\n\ns_s_l=1.6\ns_s_u=2.5\n\nfiletxt=open('filelist.txt') \ntxt_lines=filetxt.read().split('\\n') \nfiletxt.close()\nfileout=open('out_C-S-S-C_BACKBONE_scan.txt','w')\nf1=open('error_C-S-S-C_scan.txt','w')\nintr=[]\nlenlines=len(txt_lines)\nfor ppp in range(lenlines):\n filename=txt_lines[ppp]\n if filename=='':\n continue\n print('%.2f'%((ppp+1)*100.0/(lenlines-1))+'% ('+str(ppp+1)+'/'+str(lenlines-1)+') Executing for:'+filename)\n file=open(filename,'r')\n lines=file.read().split('\\n')\n file.close()\n T=[]\n D=[]\n S=[] \n C=[]\n SX=[]\n TX=[]\n A=[]\n B=[]\n E=[]\n F=[]\n modelno=[]\n\n \n try:\n for ln in lines:\n if len(ln)>=6 and (ln[0:4]=='ATOM' or ln[0:6]=='HETATM'):\n atm=atom()\n atm.aid=int(ln[6:11]) \n atm.atype=ln[12:16].strip() \n atm.rtype=ln[17:20].strip() \n atm.chainid=ln[21]\n atm.rid=int(ln[22:26]) \n atm.x=float(ln[30:38]) \n atm.y=float(ln[38:46]) \n atm.z=float(ln[47:54]) \n atm.model=modelno\n symb=ln[13].strip()\n if atm.atype=='CB' and (modelno==1 or modelno==A or modelno==[]) :\n if atm.rtype=='CYS' : \n C.append(atm)\n D.append(atm)\n if atm.atype=='SG'and (modelno==1 or modelno==A or modelno==[]) :\n if atm.rtype=='CYS': \n SX.append(atm)\n TX.append(atm)\n if atm.atype=='CA' and (modelno==1 or modelno==A or modelno==[]) :\n if atm.rtype=='CYS':\n B.append(atm)\n E.append(atm)\n if atm.atype=='N' and (modelno==1 or modelno==A or modelno==[]) :\n if atm.rtype=='CYS' :\n A.append(atm)\n F.append(atm)\n elif len(ln)>=5 and ln[0:5]=='MODEL':\n modelno=int(ln[12:])\n\n except:\n f1.write(filename+'\\n')\n\n\n for k in SX:\n for k1 in SX:\n if k1.chainid==k.chainid: \n if k1.rid==k.rid and k1.aid!=k.aid :\n break\n else:\n S.append(k)\n for m in TX:\n for m1 in TX:\n if m1.chainid==m.chainid:\n if m1.rid==m.rid and m1.aid!=m.aid :\n break\n else:\n T.append(m)\n \n for a in range(len(A)):\n for b in range(len(B)):\n if A[a].rid==B[b].rid:\n for j in range(len(C)):\n for k in range(len(S)):\n if C[j].rid==S[k].rid and C[j].rid==B[b].rid and C[j].chainid==B[b].chainid==S[k].chainid==A[a].chainid :\n for m in range(len(T)):\n if getlen(S[k],T[m])>=s_s_l and getlen(S[k],T[m])<=s_s_u and S[k].rid<T[m].rid :\n for n in range(len(D)):\n for e in range(len(E)):\n if E[e].rid==D[n].rid:\n for f in range(len(F)):\n if D[n].rid==T[m].rid and E[e].rid==F[f].rid and D[n].chainid==T[m].chainid==E[e].chainid==F[f].chainid :\n a1=getangledihedral(A[a],B[b],C[j],S[k])\n a2=getangledihedral(B[b],C[j],S[k],T[m])\n a3=getangledihedral(C[j],S[k],T[m],D[n])\n a4=getangledihedral(S[k],T[m],D[n],E[e])\n a5=getangledihedral(T[m],D[n],E[e],F[f])\n dse=getdihedralstrain(a1,a2,a3,a4,a5)\n intr.append([])\n intr[len(intr)-1].append(filename) \n intr[len(intr)-1].append(C[j].chainid)\n intr[len(intr)-1].append(C[j].rid) \n intr[len(intr)-1].append(T[m].rid)\n intr[len(intr)-1].append(T[m].chainid)\n intr[len(intr)-1].append(getlen(C[j],S[k])) \n intr[len(intr)-1].append(getlen(T[m],S[k])) \n intr[len(intr)-1].append(getlen(T[m],D[n])) \n intr[len(intr)-1].append(a1)\n intr[len(intr)-1].append(a2)\n intr[len(intr)-1].append(a3)\n intr[len(intr)-1].append(a4)\n intr[len(intr)-1].append(a5)\n intr[len(intr)-1].append(dse)\n\n \n C=[]\n T=[]\n D=[]\n S=[]\n SX=[]\n TX=[]\n A=[]\n B=[]\n E=[]\n F=[]\n for line in intr:\n for xxd in line:\n fileout.write(str(xxd))\n fileout.write('\\t')\n fileout.write('\\n')\n intr=[]\n fileout.close()\n fileout=open('out_C-S-S-C_BACKBONE_scan.txt','a')\nfileout.close()\nf1.close()\n",
"step-ids": [
5,
6,
8,
9,
10
]
}
|
[
5,
6,
8,
9,
10
] |
"""Ex026 Faça um programa que leia uma frase pelo teclado e mostre:
Quantas vezes aparece a letra "A".
Em que posição ela aparece a primeira vez.
Em que posição ela aparece pela última vez."""
frase = str(input('Digite uma frase: ')).strip().lower()
n_a = frase.count('a')
f_a = frase.find('a')+1
l_a= frase.rfind('a')-1
print(f'Sua frase tem {n_a} letras a')
print(f'A letra A aparece pela primeira vez na {f_a}° posição')
print(f'A letra A apaerece pela ultima vez na {l_a}° posição')
|
normal
|
{
"blob_id": "58f3b8c5470c765c81f27d39d9c28751a8c2b719",
"index": 277,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(f'Sua frase tem {n_a} letras a')\nprint(f'A letra A aparece pela primeira vez na {f_a}° posição')\nprint(f'A letra A apaerece pela ultima vez na {l_a}° posição')\n",
"step-3": "<mask token>\nfrase = str(input('Digite uma frase: ')).strip().lower()\nn_a = frase.count('a')\nf_a = frase.find('a') + 1\nl_a = frase.rfind('a') - 1\nprint(f'Sua frase tem {n_a} letras a')\nprint(f'A letra A aparece pela primeira vez na {f_a}° posição')\nprint(f'A letra A apaerece pela ultima vez na {l_a}° posição')\n",
"step-4": "\"\"\"Ex026 Faça um programa que leia uma frase pelo teclado e mostre:\nQuantas vezes aparece a letra \"A\".\nEm que posição ela aparece a primeira vez.\nEm que posição ela aparece pela última vez.\"\"\"\nfrase = str(input('Digite uma frase: ')).strip().lower()\nn_a = frase.count('a')\nf_a = frase.find('a')+1\nl_a= frase.rfind('a')-1\nprint(f'Sua frase tem {n_a} letras a')\nprint(f'A letra A aparece pela primeira vez na {f_a}° posição')\nprint(f'A letra A apaerece pela ultima vez na {l_a}° posição')\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
"""
Created on Tue Mai 15 11:34:22 2018
@author: Diogo Leite
"""
from SQL_obj_new.Dataset_config_dataset_new_sql import _DS_config_DS_SQL
class Dataset_conf_ds(object):
"""
This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database
NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration
By default, all FK are in the lasts positions in the parameters declaration
"""
def __init__(self, id_ds_conf_ds = -1, value_configuration = -1, FK_id_configuration_DCT_DCD = -1, FK_id_dataset_DS_DCD = -1):
"""
Constructor of the DDI_interactionDB object. All the parameters have a default value
:param id_ds_conf_ds: id of the configurations dataset - -1 if unknown
:param value_configuration: value of the bins - -1 if unknown
:param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown
:param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)
:type id_ds_conf_ds: int - not required
:type value_configuration: int - not required
:type FK_id_configuration_DCT_DCD: text (date format) - required
:type FK_id_dataset_DS_DCD: int - required
"""
self.id_ds_conf_ds = id_ds_conf_ds
self.value_configuration = value_configuration
self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD
self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD
def get_all_datasets_conf_ds():
"""
return an array with all the configurations of datasets in the database
:return: array of datasets configurations
:rtype: array(DDI_interaction_DB)
"""
listOfDatasetDSConfig = []
sqlObj = _DS_config_DS_SQL()
results = sqlObj.select_all_DDI_DB()
for element in results:
listOfDatasetDSConfig.append(Dataset_conf_ds(element[0], element[1], element[2], element[3]))
return listOfDatasetDSConfig
def create_ds_config_ds(self):
"""
Insert a dataset configuration of Dataset in the database return it id
The ds_conf_ds contain:
- value of the creation
- FK of the configuration
- FK of the dataset
:return: id Dataset_conf_ds
:rtype int
"""
sqlObj = _DS_config_DS_SQL()
value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(self.value_configuration, self.FK_id_configuration_DCT_DCD, self.FK_id_dataset_DS_DCD)
self.id_ds_conf_ds = value_id_ds_conf_ds
return value_id_ds_conf_ds
|
normal
|
{
"blob_id": "76d2c3f74e8fae160396b4015ccec478dba97b87",
"index": 7422,
"step-1": "<mask token>\n\n\nclass Dataset_conf_ds(object):\n <mask token>\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Dataset_conf_ds(object):\n <mask token>\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n <mask token>\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(\n self.value_configuration, self.FK_id_configuration_DCT_DCD,\n self.FK_id_dataset_DS_DCD)\n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds\n",
"step-3": "<mask token>\n\n\nclass Dataset_conf_ds(object):\n \"\"\"\n This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database\n\n NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration\n\n By default, all FK are in the lasts positions in the parameters declaration\n \"\"\"\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n\n def get_all_datasets_conf_ds():\n \"\"\"\n return an array with all the configurations of datasets in the database\n\n :return: array of datasets configurations\n :rtype: array(DDI_interaction_DB)\n \"\"\"\n listOfDatasetDSConfig = []\n sqlObj = _DS_config_DS_SQL()\n results = sqlObj.select_all_DDI_DB()\n for element in results:\n listOfDatasetDSConfig.append(Dataset_conf_ds(element[0],\n element[1], element[2], element[3]))\n return listOfDatasetDSConfig\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(\n self.value_configuration, self.FK_id_configuration_DCT_DCD,\n self.FK_id_dataset_DS_DCD)\n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds\n",
"step-4": "<mask token>\nfrom SQL_obj_new.Dataset_config_dataset_new_sql import _DS_config_DS_SQL\n\n\nclass Dataset_conf_ds(object):\n \"\"\"\n This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database\n\n NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration\n\n By default, all FK are in the lasts positions in the parameters declaration\n \"\"\"\n\n def __init__(self, id_ds_conf_ds=-1, value_configuration=-1,\n FK_id_configuration_DCT_DCD=-1, FK_id_dataset_DS_DCD=-1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n\n def get_all_datasets_conf_ds():\n \"\"\"\n return an array with all the configurations of datasets in the database\n\n :return: array of datasets configurations\n :rtype: array(DDI_interaction_DB)\n \"\"\"\n listOfDatasetDSConfig = []\n sqlObj = _DS_config_DS_SQL()\n results = sqlObj.select_all_DDI_DB()\n for element in results:\n listOfDatasetDSConfig.append(Dataset_conf_ds(element[0],\n element[1], element[2], element[3]))\n return listOfDatasetDSConfig\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(\n self.value_configuration, self.FK_id_configuration_DCT_DCD,\n self.FK_id_dataset_DS_DCD)\n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mai 15 11:34:22 2018\n\n@author: Diogo Leite\n\"\"\"\n\nfrom SQL_obj_new.Dataset_config_dataset_new_sql import _DS_config_DS_SQL\n\nclass Dataset_conf_ds(object):\n \"\"\"\n This class treat the datasets configuration connection tables object has it exists in DATASET_CONF_DS table database\n\n NOTE: It consistes on a conection class (N to N) to know for each dataset with a given configuration\n\n By default, all FK are in the lasts positions in the parameters declaration\n \"\"\" \n\n def __init__(self, id_ds_conf_ds = -1, value_configuration = -1, FK_id_configuration_DCT_DCD = -1, FK_id_dataset_DS_DCD = -1):\n \"\"\"\n Constructor of the DDI_interactionDB object. All the parameters have a default value\n\n :param id_ds_conf_ds: id of the configurations dataset - -1 if unknown\n :param value_configuration: value of the bins - -1 if unknown\n :param FK_id_configuration_DCT_DCD: FK of the configurations (see table DATASET_CONFIGURATIONS_TYPES)- -1 if unknown\n :param FK_id_dataset_DS_DCD: FK of the dataset (see table DATASETS)\n\n :type id_ds_conf_ds: int - not required\n :type value_configuration: int - not required\n :type FK_id_configuration_DCT_DCD: text (date format) - required \n :type FK_id_dataset_DS_DCD: int - required \n \"\"\"\n\n self.id_ds_conf_ds = id_ds_conf_ds\n self.value_configuration = value_configuration\n self.FK_id_configuration_DCT_DCD = FK_id_configuration_DCT_DCD\n self.FK_id_dataset_DS_DCD = FK_id_dataset_DS_DCD\n\n def get_all_datasets_conf_ds():\n \"\"\"\n return an array with all the configurations of datasets in the database\n\n :return: array of datasets configurations\n :rtype: array(DDI_interaction_DB)\n \"\"\"\n listOfDatasetDSConfig = []\n sqlObj = _DS_config_DS_SQL()\n results = sqlObj.select_all_DDI_DB()\n for element in results:\n listOfDatasetDSConfig.append(Dataset_conf_ds(element[0], element[1], element[2], element[3]))\n return listOfDatasetDSConfig\n\n def create_ds_config_ds(self):\n \"\"\"\n Insert a dataset configuration of Dataset in the database return it id\n The ds_conf_ds contain:\n - value of the creation\n - FK of the configuration\n - FK of the dataset\n\n :return: id Dataset_conf_ds\n :rtype int\n \"\"\"\n\n\n sqlObj = _DS_config_DS_SQL()\n value_id_ds_conf_ds = sqlObj.insert_DS_conf_DS_return_id_if_not_exists(self.value_configuration, self.FK_id_configuration_DCT_DCD, self.FK_id_dataset_DS_DCD)\n \n self.id_ds_conf_ds = value_id_ds_conf_ds\n return value_id_ds_conf_ds",
"step-ids": [
2,
3,
5,
6,
7
]
}
|
[
2,
3,
5,
6,
7
] |
from flask import Flask
from raven.contrib.flask import Sentry
from flask.signals import got_request_exception
app = Flask(__name__)
sentry = Sentry(dsn=app.config['SENTRY_DSN'])
@got_request_exception.connect
def log_exception_to_sentry(app, exception=None, **kwargs):
"""
Logs an exception to sentry.
:param app: The current application
:param exception: The exception that occurred
"""
sentry.captureException(exception)
|
normal
|
{
"blob_id": "f739fb56eae1ada2409ef7d75958bad2018f5134",
"index": 2743,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\n@got_request_exception.connect\ndef log_exception_to_sentry(app, exception=None, **kwargs):\n \"\"\"\n Logs an exception to sentry.\n\n :param app: The current application\n :param exception: The exception that occurred\n \"\"\"\n sentry.captureException(exception)\n",
"step-3": "<mask token>\napp = Flask(__name__)\nsentry = Sentry(dsn=app.config['SENTRY_DSN'])\n\n\n@got_request_exception.connect\ndef log_exception_to_sentry(app, exception=None, **kwargs):\n \"\"\"\n Logs an exception to sentry.\n\n :param app: The current application\n :param exception: The exception that occurred\n \"\"\"\n sentry.captureException(exception)\n",
"step-4": "from flask import Flask\nfrom raven.contrib.flask import Sentry\nfrom flask.signals import got_request_exception\napp = Flask(__name__)\nsentry = Sentry(dsn=app.config['SENTRY_DSN'])\n\n\n@got_request_exception.connect\ndef log_exception_to_sentry(app, exception=None, **kwargs):\n \"\"\"\n Logs an exception to sentry.\n\n :param app: The current application\n :param exception: The exception that occurred\n \"\"\"\n sentry.captureException(exception)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import time
import itertools
import re
from pyspark import SparkContext, SparkConf
from pyspark.rdd import portable_hash
from datetime import datetime
APP_NAME = 'in-shuffle-secondary-sort-compute'
INPUT_FILE = '/data/Taxi_Trips.csv.xsmall'
OUTPUT_DIR = '/data/output-in-shuffle-sort-compute-{timestamp}.txt'
COMMA_DELIMITER = re.compile(''',(?=(?:[^"]*"[^"]*")*[^"]*$)''')
FIRST_KEY = 1
SECOND_KEY = 2
TRIP_END_TIMESTAMP = 3
TIMESTAMP = int(time.time())
def partition_func(key):
return portable_hash(key[0])
def key_func(entry):
return entry[0], entry[1]
def make_pair(entry):
key = (entry[FIRST_KEY], entry[SECOND_KEY])
return key, entry
def unpair(entry):
return entry[0][0], entry[1][0], entry[1][1]
def create_pair_rdd(ctx):
rawRDD = ctx.textFile(INPUT_FILE)
headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))
rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))
validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)
pairRDD = validRDD.map(make_pair)
compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[TRIP_END_TIMESTAMP]))
return compressedRDD
def sorted_group(lines):
return itertools.groupby(lines, key=lambda x: x[0])
def calculate_loss(entry):
key, group = entry
loss = 0
_, _, prev_end = next(group)
for item in group:
_, start, end = item
delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp() \
- datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()
if delta > 0:
loss += delta
prev_end = end
return key, loss
if __name__ == "__main__":
conf = SparkConf()
ctx = SparkContext(master="local[*]", appName=APP_NAME, conf=conf)
ctx.setLogLevel('INFO')
rdd = create_pair_rdd(ctx)
sortedRDD = rdd.repartitionAndSortWithinPartitions(partitionFunc=partition_func,
numPartitions=4,
keyfunc=key_func,
ascending=True)
unpairedRDD = sortedRDD.map(unpair, preservesPartitioning=True)
groupedRDD = unpairedRDD.mapPartitions(sorted_group, preservesPartitioning=True)
lossRDD = groupedRDD.map(calculate_loss)
lossRDD.saveAsTextFile(OUTPUT_DIR.format(timestamp=TIMESTAMP))
ctx.stop()
|
normal
|
{
"blob_id": "05d6f15102be41937febeb63ed66a77d3b0a678e",
"index": 8517,
"step-1": "<mask token>\n\n\ndef key_func(entry):\n return entry[0], entry[1]\n\n\ndef make_pair(entry):\n key = entry[FIRST_KEY], entry[SECOND_KEY]\n return key, entry\n\n\ndef unpair(entry):\n return entry[0][0], entry[1][0], entry[1][1]\n\n\n<mask token>\n\n\ndef sorted_group(lines):\n return itertools.groupby(lines, key=lambda x: x[0])\n\n\ndef calculate_loss(entry):\n key, group = entry\n loss = 0\n _, _, prev_end = next(group)\n for item in group:\n _, start, end = item\n delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(\n ) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()\n if delta > 0:\n loss += delta\n prev_end = end\n return key, loss\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef key_func(entry):\n return entry[0], entry[1]\n\n\ndef make_pair(entry):\n key = entry[FIRST_KEY], entry[SECOND_KEY]\n return key, entry\n\n\ndef unpair(entry):\n return entry[0][0], entry[1][0], entry[1][1]\n\n\ndef create_pair_rdd(ctx):\n rawRDD = ctx.textFile(INPUT_FILE)\n headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))\n rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))\n validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[\n SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)\n pairRDD = validRDD.map(make_pair)\n compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[\n TRIP_END_TIMESTAMP]))\n return compressedRDD\n\n\ndef sorted_group(lines):\n return itertools.groupby(lines, key=lambda x: x[0])\n\n\ndef calculate_loss(entry):\n key, group = entry\n loss = 0\n _, _, prev_end = next(group)\n for item in group:\n _, start, end = item\n delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(\n ) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()\n if delta > 0:\n loss += delta\n prev_end = end\n return key, loss\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef partition_func(key):\n return portable_hash(key[0])\n\n\ndef key_func(entry):\n return entry[0], entry[1]\n\n\ndef make_pair(entry):\n key = entry[FIRST_KEY], entry[SECOND_KEY]\n return key, entry\n\n\ndef unpair(entry):\n return entry[0][0], entry[1][0], entry[1][1]\n\n\ndef create_pair_rdd(ctx):\n rawRDD = ctx.textFile(INPUT_FILE)\n headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))\n rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))\n validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[\n SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)\n pairRDD = validRDD.map(make_pair)\n compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[\n TRIP_END_TIMESTAMP]))\n return compressedRDD\n\n\ndef sorted_group(lines):\n return itertools.groupby(lines, key=lambda x: x[0])\n\n\ndef calculate_loss(entry):\n key, group = entry\n loss = 0\n _, _, prev_end = next(group)\n for item in group:\n _, start, end = item\n delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(\n ) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()\n if delta > 0:\n loss += delta\n prev_end = end\n return key, loss\n\n\nif __name__ == '__main__':\n conf = SparkConf()\n ctx = SparkContext(master='local[*]', appName=APP_NAME, conf=conf)\n ctx.setLogLevel('INFO')\n rdd = create_pair_rdd(ctx)\n sortedRDD = rdd.repartitionAndSortWithinPartitions(partitionFunc=\n partition_func, numPartitions=4, keyfunc=key_func, ascending=True)\n unpairedRDD = sortedRDD.map(unpair, preservesPartitioning=True)\n groupedRDD = unpairedRDD.mapPartitions(sorted_group,\n preservesPartitioning=True)\n lossRDD = groupedRDD.map(calculate_loss)\n lossRDD.saveAsTextFile(OUTPUT_DIR.format(timestamp=TIMESTAMP))\n ctx.stop()\n",
"step-4": "import time\nimport itertools\nimport re\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.rdd import portable_hash\nfrom datetime import datetime\nAPP_NAME = 'in-shuffle-secondary-sort-compute'\nINPUT_FILE = '/data/Taxi_Trips.csv.xsmall'\nOUTPUT_DIR = '/data/output-in-shuffle-sort-compute-{timestamp}.txt'\nCOMMA_DELIMITER = re.compile(',(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)')\nFIRST_KEY = 1\nSECOND_KEY = 2\nTRIP_END_TIMESTAMP = 3\nTIMESTAMP = int(time.time())\n\n\ndef partition_func(key):\n return portable_hash(key[0])\n\n\ndef key_func(entry):\n return entry[0], entry[1]\n\n\ndef make_pair(entry):\n key = entry[FIRST_KEY], entry[SECOND_KEY]\n return key, entry\n\n\ndef unpair(entry):\n return entry[0][0], entry[1][0], entry[1][1]\n\n\ndef create_pair_rdd(ctx):\n rawRDD = ctx.textFile(INPUT_FILE)\n headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))\n rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))\n validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[\n SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)\n pairRDD = validRDD.map(make_pair)\n compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[\n TRIP_END_TIMESTAMP]))\n return compressedRDD\n\n\ndef sorted_group(lines):\n return itertools.groupby(lines, key=lambda x: x[0])\n\n\ndef calculate_loss(entry):\n key, group = entry\n loss = 0\n _, _, prev_end = next(group)\n for item in group:\n _, start, end = item\n delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp(\n ) - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()\n if delta > 0:\n loss += delta\n prev_end = end\n return key, loss\n\n\nif __name__ == '__main__':\n conf = SparkConf()\n ctx = SparkContext(master='local[*]', appName=APP_NAME, conf=conf)\n ctx.setLogLevel('INFO')\n rdd = create_pair_rdd(ctx)\n sortedRDD = rdd.repartitionAndSortWithinPartitions(partitionFunc=\n partition_func, numPartitions=4, keyfunc=key_func, ascending=True)\n unpairedRDD = sortedRDD.map(unpair, preservesPartitioning=True)\n groupedRDD = unpairedRDD.mapPartitions(sorted_group,\n preservesPartitioning=True)\n lossRDD = groupedRDD.map(calculate_loss)\n lossRDD.saveAsTextFile(OUTPUT_DIR.format(timestamp=TIMESTAMP))\n ctx.stop()\n",
"step-5": "import time\nimport itertools\nimport re\n\nfrom pyspark import SparkContext, SparkConf\nfrom pyspark.rdd import portable_hash\nfrom datetime import datetime\n\nAPP_NAME = 'in-shuffle-secondary-sort-compute'\nINPUT_FILE = '/data/Taxi_Trips.csv.xsmall'\nOUTPUT_DIR = '/data/output-in-shuffle-sort-compute-{timestamp}.txt'\n\nCOMMA_DELIMITER = re.compile(''',(?=(?:[^\"]*\"[^\"]*\")*[^\"]*$)''')\n\nFIRST_KEY = 1\nSECOND_KEY = 2\nTRIP_END_TIMESTAMP = 3\n\n\nTIMESTAMP = int(time.time())\n\n\ndef partition_func(key):\n return portable_hash(key[0])\n\n\ndef key_func(entry):\n return entry[0], entry[1]\n\n\ndef make_pair(entry):\n key = (entry[FIRST_KEY], entry[SECOND_KEY])\n return key, entry\n\n\ndef unpair(entry):\n return entry[0][0], entry[1][0], entry[1][1]\n\n\ndef create_pair_rdd(ctx):\n rawRDD = ctx.textFile(INPUT_FILE)\n headerlessRDD = rawRDD.filter(lambda x: not x.startswith('Trip ID'))\n rdd = headerlessRDD.map(lambda x: COMMA_DELIMITER.split(x))\n validRDD = rdd.filter(lambda x: len(x[FIRST_KEY]) > 0 and len(x[SECOND_KEY]) > 0 and len(x[TRIP_END_TIMESTAMP]) > 0)\n pairRDD = validRDD.map(make_pair)\n compressedRDD = pairRDD.mapValues(lambda x: (x[SECOND_KEY], x[TRIP_END_TIMESTAMP]))\n\n return compressedRDD\n\n\ndef sorted_group(lines):\n return itertools.groupby(lines, key=lambda x: x[0])\n\n\ndef calculate_loss(entry):\n key, group = entry\n loss = 0\n _, _, prev_end = next(group)\n\n for item in group:\n _, start, end = item\n delta = datetime.strptime(start, '%m/%d/%Y %I:%M:%S %p').timestamp() \\\n - datetime.strptime(prev_end, '%m/%d/%Y %I:%M:%S %p').timestamp()\n if delta > 0:\n loss += delta\n prev_end = end\n\n return key, loss\n\n\nif __name__ == \"__main__\":\n conf = SparkConf()\n ctx = SparkContext(master=\"local[*]\", appName=APP_NAME, conf=conf)\n ctx.setLogLevel('INFO')\n\n rdd = create_pair_rdd(ctx)\n\n sortedRDD = rdd.repartitionAndSortWithinPartitions(partitionFunc=partition_func,\n numPartitions=4,\n keyfunc=key_func,\n ascending=True)\n unpairedRDD = sortedRDD.map(unpair, preservesPartitioning=True)\n groupedRDD = unpairedRDD.mapPartitions(sorted_group, preservesPartitioning=True)\n\n lossRDD = groupedRDD.map(calculate_loss)\n lossRDD.saveAsTextFile(OUTPUT_DIR.format(timestamp=TIMESTAMP))\n\n ctx.stop()\n",
"step-ids": [
5,
6,
8,
10,
11
]
}
|
[
5,
6,
8,
10,
11
] |
def solution(A):
if not A:
return 1
elif len(A) == 1:
if A[0] == 1:
return 2
else:
return 1
A.sort()
prev = 0
for i in A:
if i != (prev + 1):
return i - 1
else:
prev = i
return prev + 1
|
normal
|
{
"blob_id": "8c3c066ed37fe0f67acfd2d5dc9d57ec2b996275",
"index": 5640,
"step-1": "<mask token>\n",
"step-2": "def solution(A):\n if not A:\n return 1\n elif len(A) == 1:\n if A[0] == 1:\n return 2\n else:\n return 1\n A.sort()\n prev = 0\n for i in A:\n if i != prev + 1:\n return i - 1\n else:\n prev = i\n return prev + 1\n",
"step-3": "def solution(A):\n if not A:\n return 1\n elif len(A) == 1:\n if A[0] == 1:\n return 2\n else:\n return 1\n\n A.sort()\n prev = 0\n for i in A:\n if i != (prev + 1):\n return i - 1\n else:\n prev = i\n\n return prev + 1\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import visgraph.dbcore as vg_dbcore
dbinfo = {
'user':'visgraph',
'password':'ohhai!',
'database':'vg_test',
}
def vgtest_basic_database():
#vg_dbcore.initGraphDb(dbinfo)
gstore = vg_dbcore.DbGraphStore(dbinfo)
n1 = gstore.addNode(ninfo={'name':'foo', 'size':20})
n2 = gstore.addNode(ninfo={'name':'bar', 'size':300})
n3 = gstore.addNode(ninfo={'name':'baz'})
n4 = gstore.addNode(ninfo={'name':'faz'})
n5 = gstore.addNode(ninfo={'name':'yer'})
n6 = gstore.addNode(ninfo={'name':'mom'})
gstore.addEdge(n3, n4)
gstore.addEdge(n4, n5)
gstore.addEdge(n5, n6)
print gstore.getNodeInfo(n1, 'name')
print gstore.getNodeInfo(n1, 'size')
print gstore.getNodeInfo(n1, 'owoot', 20)
eid = gstore.addEdge(n1, n2, einfo={'etype':'FooEdge'})
print eid
gstore.setEdgeInfo(eid, 'name', 'asdf')
gstore.setEdgeInfo(eid, 'size', 20)
print gstore.getEdgeInfo(eid, 'size')
sg = gstore.buildSubGraph()
sg.useEdges(size=20)
#n3 = sg.addNode(ninfo={'name':'Tom Jones'})
#sg.addEdge(n2, n3, einfo={'etype':'FBFriend'})
#print sg.getRefsFrom(n2)
for eid, fromid, toid, einfo in sg.getRefsFrom(n2):
print 'NAMES: %s -> %s' % (sg.getNodeInfo(fromid, 'name', 'unknown'), sg.getNodeInfo(toid, 'name', 'unknown'))
sg.expandNode(n3, maxdepth=1)
|
normal
|
{
"blob_id": "ffee0b0e00b4cebecefc3671332af3e2ffe7491b",
"index": 8155,
"step-1": "import visgraph.dbcore as vg_dbcore\n\ndbinfo = {\n'user':'visgraph',\n'password':'ohhai!',\n'database':'vg_test',\n}\n\ndef vgtest_basic_database():\n#vg_dbcore.initGraphDb(dbinfo)\n\n gstore = vg_dbcore.DbGraphStore(dbinfo)\n\n n1 = gstore.addNode(ninfo={'name':'foo', 'size':20})\n n2 = gstore.addNode(ninfo={'name':'bar', 'size':300})\n n3 = gstore.addNode(ninfo={'name':'baz'})\n n4 = gstore.addNode(ninfo={'name':'faz'})\n n5 = gstore.addNode(ninfo={'name':'yer'})\n n6 = gstore.addNode(ninfo={'name':'mom'})\n\n gstore.addEdge(n3, n4)\n gstore.addEdge(n4, n5)\n gstore.addEdge(n5, n6)\n\n print gstore.getNodeInfo(n1, 'name')\n print gstore.getNodeInfo(n1, 'size')\n print gstore.getNodeInfo(n1, 'owoot', 20)\n\n eid = gstore.addEdge(n1, n2, einfo={'etype':'FooEdge'})\n print eid\n gstore.setEdgeInfo(eid, 'name', 'asdf')\n gstore.setEdgeInfo(eid, 'size', 20)\n print gstore.getEdgeInfo(eid, 'size')\n\n sg = gstore.buildSubGraph()\n\n sg.useEdges(size=20)\n #n3 = sg.addNode(ninfo={'name':'Tom Jones'})\n #sg.addEdge(n2, n3, einfo={'etype':'FBFriend'})\n\n #print sg.getRefsFrom(n2)\n\n for eid, fromid, toid, einfo in sg.getRefsFrom(n2):\n print 'NAMES: %s -> %s' % (sg.getNodeInfo(fromid, 'name', 'unknown'), sg.getNodeInfo(toid, 'name', 'unknown'))\n\n sg.expandNode(n3, maxdepth=1)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import os
from celery import Celery
import django
from django.conf import settings
from django.apps import apps
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nightcrawler.settings')
#celery_app = Celery('nightcrawler.tasks.keep_it', broker=settings.CELERY_BROKER_URL)
celery_app = Celery('nightcrawler', broker=settings.CELERY_BROKER_URL)
celery_app.config_from_object('django.conf:settings')
celery_app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
#celery_app.autodiscover_tasks()
@celery_app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
normal
|
{
"blob_id": "d4bc6bfe6bef730273db38f3c99352bbc3f48a5f",
"index": 7604,
"step-1": "<mask token>\n\n\n@celery_app.task(bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request))\n",
"step-2": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nightcrawler.settings')\n<mask token>\ncelery_app.config_from_object('django.conf:settings')\ncelery_app.autodiscover_tasks(lambda : settings.INSTALLED_APPS)\n\n\n@celery_app.task(bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request))\n",
"step-3": "<mask token>\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nightcrawler.settings')\ncelery_app = Celery('nightcrawler', broker=settings.CELERY_BROKER_URL)\ncelery_app.config_from_object('django.conf:settings')\ncelery_app.autodiscover_tasks(lambda : settings.INSTALLED_APPS)\n\n\n@celery_app.task(bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request))\n",
"step-4": "import os\nfrom celery import Celery\nimport django\nfrom django.conf import settings\nfrom django.apps import apps\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nightcrawler.settings')\ncelery_app = Celery('nightcrawler', broker=settings.CELERY_BROKER_URL)\ncelery_app.config_from_object('django.conf:settings')\ncelery_app.autodiscover_tasks(lambda : settings.INSTALLED_APPS)\n\n\n@celery_app.task(bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request))\n",
"step-5": "import os\nfrom celery import Celery\nimport django\nfrom django.conf import settings\nfrom django.apps import apps\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'nightcrawler.settings')\n\n#celery_app = Celery('nightcrawler.tasks.keep_it', broker=settings.CELERY_BROKER_URL)\ncelery_app = Celery('nightcrawler', broker=settings.CELERY_BROKER_URL)\ncelery_app.config_from_object('django.conf:settings')\n\ncelery_app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)\n#celery_app.autodiscover_tasks()\n\n@celery_app.task(bind=True)\ndef debug_task(self):\n print('Request: {0!r}'.format(self.request))\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from turtle import *
def drawSquare():
for i in range(4):
forward(100)
left(90)
if __name__ == '__main__':
drawSquare()
up()
forward(200)
down()
drawSquare()
mainloop()
|
normal
|
{
"blob_id": "1ce5b97148885950983e39b7e99d0cdfafe4bc16",
"index": 5382,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef drawSquare():\n for i in range(4):\n forward(100)\n left(90)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef drawSquare():\n for i in range(4):\n forward(100)\n left(90)\n\n\nif __name__ == '__main__':\n drawSquare()\nup()\nforward(200)\ndown()\ndrawSquare()\nmainloop()\n",
"step-4": "from turtle import *\n\n\ndef drawSquare():\n for i in range(4):\n forward(100)\n left(90)\n\n\nif __name__ == '__main__':\n drawSquare()\nup()\nforward(200)\ndown()\ndrawSquare()\nmainloop()\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import pandas as pd
import numpy as np
from dataloader import *
from keras.optimizers import Adam, SGD
from mylib.models.misc import set_gpu_usage
set_gpu_usage()
from mylib.models import densesharp, metrics, losses
from keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau, \
LearningRateScheduler
os.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'
def main(batch_size, crop_size, learning_rate, segmentation_task_ratio, weight_decay, save_folder, epochs,
alpha):
print(learning_rate)
print(alpha)
print(weight_decay)
train_dataset = ClfSegDataset(subset=[0, 1])
train_loader = get_mixup_loader(train_dataset, batch_size=batch_size, alpha=alpha)
val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])
val_loader = get_loader(val_dataset, batch_size=batch_size)
model = densesharp.get_compiled(output_size=1,
optimizer=Adam(lr=learning_rate),
loss={"clf": 'binary_crossentropy',
"seg": losses.DiceLoss()},
metrics={'clf': ['accuracy', metrics.precision, metrics.recall, metrics.fmeasure,
metrics.auc],
'seg': [metrics.precision, metrics.recall, metrics.fmeasure]},
loss_weights={"clf": 1., "seg": segmentation_task_ratio},
weight_decay=weight_decay, weights='tmp/test/weights42_222639.h5')
checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' % save_folder, verbose=1,
period=1, save_weights_only=True)
csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)
tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)
best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder, verbose=1, save_weights_only=True,
monitor='val_clf_acc', save_best_only=True, period=1, mode='max')
early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode='max',
patience=20, verbose=1)
lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334, patience=10,
verbose=1, mode='min', epsilon=1.e-5, cooldown=2, min_lr=0)
model.fit_generator(generator=train_loader, steps_per_epoch=50, max_queue_size=10, workers=1,
validation_data=val_loader, epochs=epochs, validation_steps=50,
callbacks=[checkpointer, csv_logger, best_keeper, early_stopping, lr_reducer, tensorboard])
if __name__ == '__main__':
main(batch_size=32,
crop_size=[32, 32, 32],
learning_rate=1.e-5,
segmentation_task_ratio=0.2,
weight_decay=0.0,
save_folder='test',
epochs=10,
alpha=1.0)
|
normal
|
{
"blob_id": "94b3fa700d7da0ca913adeb0ad5324d1fec0be50",
"index": 7104,
"step-1": "<mask token>\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\n<mask token>\n",
"step-2": "<mask token>\nset_gpu_usage()\n<mask token>\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,\n segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',\n epochs=10, alpha=1.0)\n",
"step-3": "<mask token>\nset_gpu_usage()\n<mask token>\nos.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,\n segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',\n epochs=10, alpha=1.0)\n",
"step-4": "import os\nimport pandas as pd\nimport numpy as np\nfrom dataloader import *\nfrom keras.optimizers import Adam, SGD\nfrom mylib.models.misc import set_gpu_usage\nset_gpu_usage()\nfrom mylib.models import densesharp, metrics, losses\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau, LearningRateScheduler\nos.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio,\n weight_decay, save_folder, epochs, alpha):\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size,\n alpha=alpha)\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n model = densesharp.get_compiled(output_size=1, optimizer=Adam(lr=\n learning_rate), loss={'clf': 'binary_crossentropy', 'seg': losses.\n DiceLoss()}, metrics={'clf': ['accuracy', metrics.precision,\n metrics.recall, metrics.fmeasure, metrics.auc], 'seg': [metrics.\n precision, metrics.recall, metrics.fmeasure]}, loss_weights={'clf':\n 1.0, 'seg': segmentation_task_ratio}, weight_decay=weight_decay,\n weights='tmp/test/weights42_222639.h5')\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' %\n save_folder, verbose=1, period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder,\n verbose=1, save_weights_only=True, monitor='val_clf_acc',\n save_best_only=True, period=1, mode='max')\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode\n ='max', patience=20, verbose=1)\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334,\n patience=10, verbose=1, mode='min', epsilon=1e-05, cooldown=2, min_lr=0\n )\n model.fit_generator(generator=train_loader, steps_per_epoch=50,\n max_queue_size=10, workers=1, validation_data=val_loader, epochs=\n epochs, validation_steps=50, callbacks=[checkpointer, csv_logger,\n best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32, crop_size=[32, 32, 32], learning_rate=1e-05,\n segmentation_task_ratio=0.2, weight_decay=0.0, save_folder='test',\n epochs=10, alpha=1.0)\n",
"step-5": "import os\nimport pandas as pd\nimport numpy as np\n\nfrom dataloader import *\nfrom keras.optimizers import Adam, SGD\nfrom mylib.models.misc import set_gpu_usage\n\nset_gpu_usage()\n\nfrom mylib.models import densesharp, metrics, losses\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, TensorBoard, EarlyStopping, ReduceLROnPlateau, \\\n LearningRateScheduler\n\nos.environ['CUDA_VISIBLE_DEVICES'] = '/gpu:0'\n\n\ndef main(batch_size, crop_size, learning_rate, segmentation_task_ratio, weight_decay, save_folder, epochs,\n alpha):\n\n print(learning_rate)\n print(alpha)\n print(weight_decay)\n\n train_dataset = ClfSegDataset(subset=[0, 1])\n train_loader = get_mixup_loader(train_dataset, batch_size=batch_size, alpha=alpha)\n\n val_dataset = ClfvalSegDataset(crop_size=crop_size, move=None, subset=[2])\n val_loader = get_loader(val_dataset, batch_size=batch_size)\n\n model = densesharp.get_compiled(output_size=1,\n optimizer=Adam(lr=learning_rate),\n loss={\"clf\": 'binary_crossentropy',\n \"seg\": losses.DiceLoss()},\n metrics={'clf': ['accuracy', metrics.precision, metrics.recall, metrics.fmeasure,\n metrics.auc],\n 'seg': [metrics.precision, metrics.recall, metrics.fmeasure]},\n loss_weights={\"clf\": 1., \"seg\": segmentation_task_ratio},\n weight_decay=weight_decay, weights='tmp/test/weights42_222639.h5')\n\n checkpointer = ModelCheckpoint(filepath='tmp/%s/weights.{epoch:02d}.h5' % save_folder, verbose=1,\n period=1, save_weights_only=True)\n csv_logger = CSVLogger('tmp/%s/training.csv' % save_folder)\n tensorboard = TensorBoard(log_dir='tmp/%s/logs/' % save_folder)\n\n best_keeper = ModelCheckpoint(filepath='tmp/%s/best.h5' % save_folder, verbose=1, save_weights_only=True,\n monitor='val_clf_acc', save_best_only=True, period=1, mode='max')\n\n early_stopping = EarlyStopping(monitor='val_clf_acc', min_delta=0, mode='max',\n patience=20, verbose=1)\n\n lr_reducer = ReduceLROnPlateau(monitor='val_loss', factor=0.334, patience=10,\n verbose=1, mode='min', epsilon=1.e-5, cooldown=2, min_lr=0)\n\n model.fit_generator(generator=train_loader, steps_per_epoch=50, max_queue_size=10, workers=1,\n validation_data=val_loader, epochs=epochs, validation_steps=50,\n callbacks=[checkpointer, csv_logger, best_keeper, early_stopping, lr_reducer, tensorboard])\n\n\nif __name__ == '__main__':\n main(batch_size=32,\n crop_size=[32, 32, 32],\n learning_rate=1.e-5,\n segmentation_task_ratio=0.2,\n weight_decay=0.0,\n save_folder='test',\n epochs=10,\n alpha=1.0)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/',methods=["GET","POST"])
def inicio():
nombre = "jose"
return render_template("inicio.html",nombre=nombre)
app.run(debug=True)
|
normal
|
{
"blob_id": "caa28bd64141c8d2f3212b5e4e77129d81d24c71",
"index": 2290,
"step-1": "<mask token>\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef inicio():\n nombre = 'jose'\n return render_template('inicio.html', nombre=nombre)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef inicio():\n nombre = 'jose'\n return render_template('inicio.html', nombre=nombre)\n\n\napp.run(debug=True)\n",
"step-3": "<mask token>\napp = Flask(__name__)\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef inicio():\n nombre = 'jose'\n return render_template('inicio.html', nombre=nombre)\n\n\napp.run(debug=True)\n",
"step-4": "from flask import Flask, render_template\napp = Flask(__name__)\n\n\[email protected]('/', methods=['GET', 'POST'])\ndef inicio():\n nombre = 'jose'\n return render_template('inicio.html', nombre=nombre)\n\n\napp.run(debug=True)\n",
"step-5": "from flask import Flask, render_template\napp = Flask(__name__)\t\n\[email protected]('/',methods=[\"GET\",\"POST\"])\ndef inicio():\n\tnombre = \"jose\"\n\treturn render_template(\"inicio.html\",nombre=nombre)\n\napp.run(debug=True)",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#coding=utf-8
'初始化Package,加载url,生成app对象'
import web
from myapp.urls import urls
app = web.application(urls, globals())
|
normal
|
{
"blob_id": "4480b305a6f71ff64022f2b890998326bf402bf0",
"index": 1669,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp = web.application(urls, globals())\n",
"step-3": "<mask token>\nimport web\nfrom myapp.urls import urls\napp = web.application(urls, globals())\n",
"step-4": "#coding=utf-8\r\n'初始化Package,加载url,生成app对象'\r\nimport web\r\nfrom myapp.urls import urls\r\n\r\napp = web.application(urls, globals())\r\n\r\n\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import cv2
import numpy as np
import os
from tqdm import tqdm
DIR = '/home/nghiatruong/Desktop'
INPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')
INPUT_2 = os.path.join(DIR, '20190715_180940.mp4')
INPUT_3 = os.path.join(DIR, '20190715_181200.mp4')
RIGHT_SYNC_1 = 1965
LEFT_SYNC_1 = 1700
RIGHT_SYNC_2 = 5765
LEFT_SYNC_2 = 1282
def add_frame_id(video, output_dir):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return -1
os.makedirs(output_dir, exist_ok=True)
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
for frame_id in tqdm(range(frame_count)):
has_frame, frame = reader.read()
if not has_frame:
break
cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)
reader.release()
return 0
def get_meta(video):
reader = cv2.VideoCapture(video)
if not reader.isOpened():
return None, None, None
width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))
return width, height, frame_count
w1, h1, fc1 = get_meta(INPUT_1)
h2, w2, fc2 = get_meta(INPUT_2)
ratio = h1 / h2
w2 = int(w2*ratio)+1
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (w1+w2+10, h1))
border = np.zeros((h1, 10, 3), dtype='uint8')
filler = np.zeros((h1, w2, 3), dtype='uint8')
reader1 = cv2.VideoCapture(INPUT_1)
reader2 = cv2.VideoCapture(INPUT_2)
reader3 = cv2.VideoCapture(INPUT_3)
last_shape = (h1, w1+w2+10, 3)
for fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1)):
_, right_frame = reader1.read()
if fid < RIGHT_SYNC_1-LEFT_SYNC_1:
left_frame = filler
else:
_, left_frame = reader2.read()
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
for fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1, RIGHT_SYNC_2-LEFT_SYNC_2)):
_, right_frame = reader1.read()
new_frame = np.concatenate([filler, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
for fid in tqdm(range(RIGHT_SYNC_2-LEFT_SYNC_2, fc1)):
r1, right_frame = reader1.read()
if not r1:
break
r3, left_frame = reader3.read()
if not r3:
left_frame = filler
else:
left_frame = cv2.transpose(left_frame)
left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)
left_frame = cv2.flip(left_frame, 1)
new_frame = np.concatenate([left_frame, border, right_frame], axis=1)
# cv2.imshow('out', new_frame)
writer.write(new_frame)
# if cv2.waitKey(1) & 0xFF == ord('q'):
# break
reader1.release()
reader2.release()
writer.release()
cv2.destroyAllWindows()
|
normal
|
{
"blob_id": "f8f538773693b9d9530775094d9948626247a3bb",
"index": 6950,
"step-1": "<mask token>\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\n<mask token>\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -\n LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-3": "<mask token>\nDIR = '/home/nghiatruong/Desktop'\nINPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')\nINPUT_2 = os.path.join(DIR, '20190715_180940.mp4')\nINPUT_3 = os.path.join(DIR, '20190715_181200.mp4')\nRIGHT_SYNC_1 = 1965\nLEFT_SYNC_1 = 1700\nRIGHT_SYNC_2 = 5765\nLEFT_SYNC_2 = 1282\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\nw1, h1, fc1 = get_meta(INPUT_1)\nh2, w2, fc2 = get_meta(INPUT_2)\nratio = h1 / h2\nw2 = int(w2 * ratio) + 1\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nwriter = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (\n w1 + w2 + 10, h1))\nborder = np.zeros((h1, 10, 3), dtype='uint8')\nfiller = np.zeros((h1, w2, 3), dtype='uint8')\nreader1 = cv2.VideoCapture(INPUT_1)\nreader2 = cv2.VideoCapture(INPUT_2)\nreader3 = cv2.VideoCapture(INPUT_3)\nlast_shape = h1, w1 + w2 + 10, 3\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -\n LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-4": "import cv2\nimport numpy as np\nimport os\nfrom tqdm import tqdm\nDIR = '/home/nghiatruong/Desktop'\nINPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')\nINPUT_2 = os.path.join(DIR, '20190715_180940.mp4')\nINPUT_3 = os.path.join(DIR, '20190715_181200.mp4')\nRIGHT_SYNC_1 = 1965\nLEFT_SYNC_1 = 1700\nRIGHT_SYNC_2 = 5765\nLEFT_SYNC_2 = 1282\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\nw1, h1, fc1 = get_meta(INPUT_1)\nh2, w2, fc2 = get_meta(INPUT_2)\nratio = h1 / h2\nw2 = int(w2 * ratio) + 1\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nwriter = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (\n w1 + w2 + 10, h1))\nborder = np.zeros((h1, 10, 3), dtype='uint8')\nfiller = np.zeros((h1, w2, 3), dtype='uint8')\nreader1 = cv2.VideoCapture(INPUT_1)\nreader2 = cv2.VideoCapture(INPUT_2)\nreader3 = cv2.VideoCapture(INPUT_3)\nlast_shape = h1, w1 + w2 + 10, 3\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1 - LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(fc2 + RIGHT_SYNC_1 - LEFT_SYNC_1, RIGHT_SYNC_2 -\n LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n writer.write(new_frame)\nfor fid in tqdm(range(RIGHT_SYNC_2 - LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n writer.write(new_frame)\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-5": "import cv2\nimport numpy as np\nimport os\nfrom tqdm import tqdm\n\n\nDIR = '/home/nghiatruong/Desktop'\nINPUT_1 = os.path.join(DIR, 'GOPR1806.MP4')\nINPUT_2 = os.path.join(DIR, '20190715_180940.mp4')\nINPUT_3 = os.path.join(DIR, '20190715_181200.mp4')\nRIGHT_SYNC_1 = 1965\nLEFT_SYNC_1 = 1700\nRIGHT_SYNC_2 = 5765\nLEFT_SYNC_2 = 1282\n\n\ndef add_frame_id(video, output_dir):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return -1\n os.makedirs(output_dir, exist_ok=True)\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n\n for frame_id in tqdm(range(frame_count)):\n has_frame, frame = reader.read()\n if not has_frame:\n break\n cv2.imwrite(os.path.join(output_dir, f'{frame_id}.jpg'), frame)\n\n reader.release()\n return 0\n\n\ndef get_meta(video):\n reader = cv2.VideoCapture(video)\n if not reader.isOpened():\n return None, None, None\n width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))\n frame_count = int(reader.get(cv2.CAP_PROP_FRAME_COUNT))\n return width, height, frame_count\n\n\nw1, h1, fc1 = get_meta(INPUT_1)\nh2, w2, fc2 = get_meta(INPUT_2)\nratio = h1 / h2\nw2 = int(w2*ratio)+1\nfourcc = cv2.VideoWriter_fourcc(*'MJPG')\nwriter = cv2.VideoWriter(os.path.join(DIR, 'output.avi'), fourcc, 29.97, (w1+w2+10, h1))\nborder = np.zeros((h1, 10, 3), dtype='uint8')\nfiller = np.zeros((h1, w2, 3), dtype='uint8')\n\nreader1 = cv2.VideoCapture(INPUT_1)\nreader2 = cv2.VideoCapture(INPUT_2)\nreader3 = cv2.VideoCapture(INPUT_3)\n\nlast_shape = (h1, w1+w2+10, 3)\nfor fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1)):\n _, right_frame = reader1.read()\n if fid < RIGHT_SYNC_1-LEFT_SYNC_1:\n left_frame = filler\n else:\n _, left_frame = reader2.read()\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n # cv2.imshow('out', new_frame)\n writer.write(new_frame)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\nfor fid in tqdm(range(fc2+RIGHT_SYNC_1-LEFT_SYNC_1, RIGHT_SYNC_2-LEFT_SYNC_2)):\n _, right_frame = reader1.read()\n new_frame = np.concatenate([filler, border, right_frame], axis=1)\n # cv2.imshow('out', new_frame)\n writer.write(new_frame)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\nfor fid in tqdm(range(RIGHT_SYNC_2-LEFT_SYNC_2, fc1)):\n r1, right_frame = reader1.read()\n if not r1:\n break\n r3, left_frame = reader3.read()\n if not r3:\n left_frame = filler\n else:\n left_frame = cv2.transpose(left_frame)\n left_frame = cv2.resize(left_frame, None, fx=ratio, fy=ratio)\n left_frame = cv2.flip(left_frame, 1)\n new_frame = np.concatenate([left_frame, border, right_frame], axis=1)\n # cv2.imshow('out', new_frame)\n writer.write(new_frame)\n # if cv2.waitKey(1) & 0xFF == ord('q'):\n # break\n\n\nreader1.release()\nreader2.release()\nwriter.release()\ncv2.destroyAllWindows()\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from models.bearing_registry import BearingRegistry
from models.faction import Faction
from models.maneuver import Maneuver
import time
class Activation:
"""
This class represents the Activation phase of a turn
"""
def __init__(self, game):
"""
Constructor
game: The game under way
"""
self._game = game
def execute(self):
"""
Run the Activation phase
"""
for pilot in self._game.pilots_by_skill():
pilot.active = True
# Apply this pilot's maneuver
pilot.chosen_maneuver.apply(pilot)
# Choose an action to perform
if pilot.can_perform_action():
chosen_action = self._game.player(pilot.faction).choose_action(pilot)
# TODO: Do something with this
pilot.active = False
|
normal
|
{
"blob_id": "0774bad4082e0eb04ae3f7aa898c0376147e9779",
"index": 2645,
"step-1": "<mask token>\n\n\nclass Activation:\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Activation:\n <mask token>\n\n def __init__(self, game):\n \"\"\"\n Constructor\n game: The game under way\n \"\"\"\n self._game = game\n\n def execute(self):\n \"\"\"\n Run the Activation phase\n \"\"\"\n for pilot in self._game.pilots_by_skill():\n pilot.active = True\n pilot.chosen_maneuver.apply(pilot)\n if pilot.can_perform_action():\n chosen_action = self._game.player(pilot.faction).choose_action(\n pilot)\n pilot.active = False\n",
"step-3": "<mask token>\n\n\nclass Activation:\n \"\"\"\n This class represents the Activation phase of a turn\n \"\"\"\n\n def __init__(self, game):\n \"\"\"\n Constructor\n game: The game under way\n \"\"\"\n self._game = game\n\n def execute(self):\n \"\"\"\n Run the Activation phase\n \"\"\"\n for pilot in self._game.pilots_by_skill():\n pilot.active = True\n pilot.chosen_maneuver.apply(pilot)\n if pilot.can_perform_action():\n chosen_action = self._game.player(pilot.faction).choose_action(\n pilot)\n pilot.active = False\n",
"step-4": "from models.bearing_registry import BearingRegistry\nfrom models.faction import Faction\nfrom models.maneuver import Maneuver\nimport time\n\n\nclass Activation:\n \"\"\"\n This class represents the Activation phase of a turn\n \"\"\"\n\n def __init__(self, game):\n \"\"\"\n Constructor\n game: The game under way\n \"\"\"\n self._game = game\n\n def execute(self):\n \"\"\"\n Run the Activation phase\n \"\"\"\n for pilot in self._game.pilots_by_skill():\n pilot.active = True\n pilot.chosen_maneuver.apply(pilot)\n if pilot.can_perform_action():\n chosen_action = self._game.player(pilot.faction).choose_action(\n pilot)\n pilot.active = False\n",
"step-5": "from models.bearing_registry import BearingRegistry\r\nfrom models.faction import Faction\r\nfrom models.maneuver import Maneuver\r\n\r\nimport time\r\n\r\nclass Activation:\r\n \"\"\"\r\n This class represents the Activation phase of a turn\r\n \"\"\"\r\n def __init__(self, game):\r\n \"\"\"\r\n Constructor\r\n game: The game under way\r\n \"\"\"\r\n self._game = game\r\n\r\n def execute(self):\r\n \"\"\"\r\n Run the Activation phase\r\n \"\"\"\r\n for pilot in self._game.pilots_by_skill():\r\n pilot.active = True\r\n\r\n # Apply this pilot's maneuver\r\n pilot.chosen_maneuver.apply(pilot)\r\n\r\n # Choose an action to perform\r\n if pilot.can_perform_action():\r\n chosen_action = self._game.player(pilot.faction).choose_action(pilot)\r\n\r\n # TODO: Do something with this\r\n\r\n pilot.active = False",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from django.contrib.auth import get_user_model
from django.test import TestCase
from .models import Order
from markets.models import Market
from tickers.models import Ticker
from trades.models import Trade
USER_MODEL = get_user_model()
class Matching:
@staticmethod
def get_bid_ask( market : Market):
bid = market.order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by("-price")
ask = market.order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by("price")
bid_price = None
ask_price = None
if len(bid) > 0:
bid_price = bid[0].price
if len(ask) > 0:
ask_price = ask[0].price
return bid_price, ask_price
@staticmethod
def take(order: Order):
depth = []
if order.side == Order.SIDES_SELL:
depth = order.market.order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by("-price")
if order.side == Order.SIDES_BUY:
depth = order.market.order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by("price")
for o in depth:
if (order.side == Order.SIDES_SELL and order.price != 0 and order.price > o.price) or (order.side == Order.SIDES_BUY and order.price != 0 and order.price < o.price):
break
if order.size - order.filled > o.size - o.filled:
fill_size = o.size - o.filled
else:
fill_size = order.size - order.filled
o.fill( fill_size )
order.fill( fill_size )
o.save()
order.save()
if order.side == Order.SIDES_SELL:
order_buy = o
order_sell = order
else:
order_buy = order
order_sell = o
# Creating trade object
Trade.objects.create(order_buy=order_buy, order_sell=order_sell, price=o.price,side=order.side)
if order.status == Order.STATUS_FILLED:
break
@staticmethod
def process_order(self, order: Order ):
if order.status == Order.STATUS_WAITING_NEW:
order.status = Order.STATUS_NEW
order.save()
# best_bid_price, best_ask_price = self.get_bid_ask(order.market)
if order.price == 0:
Matching.take(order)
order.status = Order.STATUS_FILLED
order.save()
if order.price != 0:
Matching.take(order)
class TestOrder(TestCase):
def setUp(self) -> None:
self.ticker1 = Ticker.objects.create(name="USD")
self.ticker2 = Ticker.objects.create(name="EUR")
self.market = Market.objects.create(name="USD/EUR",
base_currency=self.ticker1,
quote_currency=self.ticker2)
self.Alice = USER_MODEL.objects.create_user(username="Alice", email="[email protected]", password="hhhh")
self.Bob = USER_MODEL.objects.create_user(username="Bob", email="[email protected]", password="hhhh")
# Creating Alice orders
for i in range(20):
Order.objects.create(sender=self.Alice,
side=Order.SIDES_SELL,
price=10000 + i*100,
size=100000+i*10000,
filled=0,
status=Order.STATUS_WAITING_NEW,
hash_signature="SIGA",
market=self.market)
# Creating Alice orders
for i in range(20):
Order.objects.create(sender=self.Alice,
side=Order.SIDES_BUY,
price=10000 - i*100,
size=100000+i*10000,
filled=0,
status=Order.STATUS_WAITING_NEW,
hash_signature="SIGA",
market=self.market)
# Creating Bob orders
for i in range(2):
Order.objects.create(sender=self.Bob,
side=Order.SIDES_BUY,
price=0,
size=100000+i*10000,
filled=0,
status=Order.STATUS_WAITING_NEW,
hash_signature="SIGA",
market=self.market)
def test_market_exists(self):
assert Market.objects.all().count() == 1
def test_orders_created(self):
#assert Market.objects.all()[0].order_set.count() == 40
print("---BID----")
for order in Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY).exclude(price=0).order_by("-price", "created_at"):
print(order.price, order.size)
print("---ASK----")
for order in Market.objects.all()[0].order_set.filter(side=Order.SIDES_SELL).exclude(price=0).order_by("price", "created_at"):
print(order.price, order.size)
def test_get_level_1(self):
bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by("-price")
ask = Market.objects.all()[0].order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by("price")
bid_price = None
ask_price = None
if len(bid) > 0:
bid_price = bid[0].price
if len(ask) > 0:
ask_price = ask[0].price
print(f'Bid {bid_price} Ask {ask_price}')
|
normal
|
{
"blob_id": "866ee2c4fa52bf9bda4730c7a9d46bb4798adcd4",
"index": 1775,
"step-1": "<mask token>\n\n\nclass Matching:\n <mask token>\n <mask token>\n\n @staticmethod\n def process_order(self, order: Order):\n if order.status == Order.STATUS_WAITING_NEW:\n order.status = Order.STATUS_NEW\n order.save()\n if order.price == 0:\n Matching.take(order)\n order.status = Order.STATUS_FILLED\n order.save()\n if order.price != 0:\n Matching.take(order)\n\n\nclass TestOrder(TestCase):\n\n def setUp(self) ->None:\n self.ticker1 = Ticker.objects.create(name='USD')\n self.ticker2 = Ticker.objects.create(name='EUR')\n self.market = Market.objects.create(name='USD/EUR', base_currency=\n self.ticker1, quote_currency=self.ticker2)\n self.Alice = USER_MODEL.objects.create_user(username='Alice', email\n ='[email protected]', password='hhhh')\n self.Bob = USER_MODEL.objects.create_user(username='Bob', email=\n '[email protected]', password='hhhh')\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_SELL,\n price=10000 + i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_BUY,\n price=10000 - i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(2):\n Order.objects.create(sender=self.Bob, side=Order.SIDES_BUY,\n price=0, size=100000 + i * 10000, filled=0, status=Order.\n STATUS_WAITING_NEW, hash_signature='SIGA', market=self.market)\n\n def test_market_exists(self):\n assert Market.objects.all().count() == 1\n\n def test_orders_created(self):\n print('---BID----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_BUY).exclude(price=0).order_by('-price', 'created_at'):\n print(order.price, order.size)\n print('---ASK----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL).exclude(price=0).order_by('price', 'created_at'):\n print(order.price, order.size)\n\n def test_get_level_1(self):\n bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\n '-price')\n ask = Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0\n ).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n print(f'Bid {bid_price} Ask {ask_price}')\n",
"step-2": "<mask token>\n\n\nclass Matching:\n\n @staticmethod\n def get_bid_ask(market: Market):\n bid = market.order_set.filter(side=Order.SIDES_BUY, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('-price')\n ask = market.order_set.filter(side=Order.SIDES_SELL, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n return bid_price, ask_price\n <mask token>\n\n @staticmethod\n def process_order(self, order: Order):\n if order.status == Order.STATUS_WAITING_NEW:\n order.status = Order.STATUS_NEW\n order.save()\n if order.price == 0:\n Matching.take(order)\n order.status = Order.STATUS_FILLED\n order.save()\n if order.price != 0:\n Matching.take(order)\n\n\nclass TestOrder(TestCase):\n\n def setUp(self) ->None:\n self.ticker1 = Ticker.objects.create(name='USD')\n self.ticker2 = Ticker.objects.create(name='EUR')\n self.market = Market.objects.create(name='USD/EUR', base_currency=\n self.ticker1, quote_currency=self.ticker2)\n self.Alice = USER_MODEL.objects.create_user(username='Alice', email\n ='[email protected]', password='hhhh')\n self.Bob = USER_MODEL.objects.create_user(username='Bob', email=\n '[email protected]', password='hhhh')\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_SELL,\n price=10000 + i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_BUY,\n price=10000 - i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(2):\n Order.objects.create(sender=self.Bob, side=Order.SIDES_BUY,\n price=0, size=100000 + i * 10000, filled=0, status=Order.\n STATUS_WAITING_NEW, hash_signature='SIGA', market=self.market)\n\n def test_market_exists(self):\n assert Market.objects.all().count() == 1\n\n def test_orders_created(self):\n print('---BID----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_BUY).exclude(price=0).order_by('-price', 'created_at'):\n print(order.price, order.size)\n print('---ASK----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL).exclude(price=0).order_by('price', 'created_at'):\n print(order.price, order.size)\n\n def test_get_level_1(self):\n bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\n '-price')\n ask = Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0\n ).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n print(f'Bid {bid_price} Ask {ask_price}')\n",
"step-3": "<mask token>\n\n\nclass Matching:\n\n @staticmethod\n def get_bid_ask(market: Market):\n bid = market.order_set.filter(side=Order.SIDES_BUY, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('-price')\n ask = market.order_set.filter(side=Order.SIDES_SELL, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n return bid_price, ask_price\n\n @staticmethod\n def take(order: Order):\n depth = []\n if order.side == Order.SIDES_SELL:\n depth = order.market.order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('-price')\n if order.side == Order.SIDES_BUY:\n depth = order.market.order_set.filter(side=Order.SIDES_SELL,\n status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('price')\n for o in depth:\n if (order.side == Order.SIDES_SELL and order.price != 0 and \n order.price > o.price or order.side == Order.SIDES_BUY and \n order.price != 0 and order.price < o.price):\n break\n if order.size - order.filled > o.size - o.filled:\n fill_size = o.size - o.filled\n else:\n fill_size = order.size - order.filled\n o.fill(fill_size)\n order.fill(fill_size)\n o.save()\n order.save()\n if order.side == Order.SIDES_SELL:\n order_buy = o\n order_sell = order\n else:\n order_buy = order\n order_sell = o\n Trade.objects.create(order_buy=order_buy, order_sell=order_sell,\n price=o.price, side=order.side)\n if order.status == Order.STATUS_FILLED:\n break\n\n @staticmethod\n def process_order(self, order: Order):\n if order.status == Order.STATUS_WAITING_NEW:\n order.status = Order.STATUS_NEW\n order.save()\n if order.price == 0:\n Matching.take(order)\n order.status = Order.STATUS_FILLED\n order.save()\n if order.price != 0:\n Matching.take(order)\n\n\nclass TestOrder(TestCase):\n\n def setUp(self) ->None:\n self.ticker1 = Ticker.objects.create(name='USD')\n self.ticker2 = Ticker.objects.create(name='EUR')\n self.market = Market.objects.create(name='USD/EUR', base_currency=\n self.ticker1, quote_currency=self.ticker2)\n self.Alice = USER_MODEL.objects.create_user(username='Alice', email\n ='[email protected]', password='hhhh')\n self.Bob = USER_MODEL.objects.create_user(username='Bob', email=\n '[email protected]', password='hhhh')\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_SELL,\n price=10000 + i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_BUY,\n price=10000 - i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(2):\n Order.objects.create(sender=self.Bob, side=Order.SIDES_BUY,\n price=0, size=100000 + i * 10000, filled=0, status=Order.\n STATUS_WAITING_NEW, hash_signature='SIGA', market=self.market)\n\n def test_market_exists(self):\n assert Market.objects.all().count() == 1\n\n def test_orders_created(self):\n print('---BID----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_BUY).exclude(price=0).order_by('-price', 'created_at'):\n print(order.price, order.size)\n print('---ASK----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL).exclude(price=0).order_by('price', 'created_at'):\n print(order.price, order.size)\n\n def test_get_level_1(self):\n bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\n '-price')\n ask = Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0\n ).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n print(f'Bid {bid_price} Ask {ask_price}')\n",
"step-4": "from django.contrib.auth import get_user_model\nfrom django.test import TestCase\nfrom .models import Order\nfrom markets.models import Market\nfrom tickers.models import Ticker\nfrom trades.models import Trade\nUSER_MODEL = get_user_model()\n\n\nclass Matching:\n\n @staticmethod\n def get_bid_ask(market: Market):\n bid = market.order_set.filter(side=Order.SIDES_BUY, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('-price')\n ask = market.order_set.filter(side=Order.SIDES_SELL, status__in=[\n Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n return bid_price, ask_price\n\n @staticmethod\n def take(order: Order):\n depth = []\n if order.side == Order.SIDES_SELL:\n depth = order.market.order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('-price')\n if order.side == Order.SIDES_BUY:\n depth = order.market.order_set.filter(side=Order.SIDES_SELL,\n status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.\n STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by('price')\n for o in depth:\n if (order.side == Order.SIDES_SELL and order.price != 0 and \n order.price > o.price or order.side == Order.SIDES_BUY and \n order.price != 0 and order.price < o.price):\n break\n if order.size - order.filled > o.size - o.filled:\n fill_size = o.size - o.filled\n else:\n fill_size = order.size - order.filled\n o.fill(fill_size)\n order.fill(fill_size)\n o.save()\n order.save()\n if order.side == Order.SIDES_SELL:\n order_buy = o\n order_sell = order\n else:\n order_buy = order\n order_sell = o\n Trade.objects.create(order_buy=order_buy, order_sell=order_sell,\n price=o.price, side=order.side)\n if order.status == Order.STATUS_FILLED:\n break\n\n @staticmethod\n def process_order(self, order: Order):\n if order.status == Order.STATUS_WAITING_NEW:\n order.status = Order.STATUS_NEW\n order.save()\n if order.price == 0:\n Matching.take(order)\n order.status = Order.STATUS_FILLED\n order.save()\n if order.price != 0:\n Matching.take(order)\n\n\nclass TestOrder(TestCase):\n\n def setUp(self) ->None:\n self.ticker1 = Ticker.objects.create(name='USD')\n self.ticker2 = Ticker.objects.create(name='EUR')\n self.market = Market.objects.create(name='USD/EUR', base_currency=\n self.ticker1, quote_currency=self.ticker2)\n self.Alice = USER_MODEL.objects.create_user(username='Alice', email\n ='[email protected]', password='hhhh')\n self.Bob = USER_MODEL.objects.create_user(username='Bob', email=\n '[email protected]', password='hhhh')\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_SELL,\n price=10000 + i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(20):\n Order.objects.create(sender=self.Alice, side=Order.SIDES_BUY,\n price=10000 - i * 100, size=100000 + i * 10000, filled=0,\n status=Order.STATUS_WAITING_NEW, hash_signature='SIGA',\n market=self.market)\n for i in range(2):\n Order.objects.create(sender=self.Bob, side=Order.SIDES_BUY,\n price=0, size=100000 + i * 10000, filled=0, status=Order.\n STATUS_WAITING_NEW, hash_signature='SIGA', market=self.market)\n\n def test_market_exists(self):\n assert Market.objects.all().count() == 1\n\n def test_orders_created(self):\n print('---BID----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_BUY).exclude(price=0).order_by('-price', 'created_at'):\n print(order.price, order.size)\n print('---ASK----')\n for order in Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL).exclude(price=0).order_by('price', 'created_at'):\n print(order.price, order.size)\n\n def test_get_level_1(self):\n bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY,\n status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\n '-price')\n ask = Market.objects.all()[0].order_set.filter(side=Order.\n SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0\n ).order_by('price')\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n print(f'Bid {bid_price} Ask {ask_price}')\n",
"step-5": "from django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom .models import Order\nfrom markets.models import Market\nfrom tickers.models import Ticker\nfrom trades.models import Trade\n\n\nUSER_MODEL = get_user_model()\n\n\nclass Matching:\n @staticmethod\n def get_bid_ask( market : Market):\n bid = market.order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by(\"-price\")\n ask = market.order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by(\"price\")\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n\n return bid_price, ask_price\n\n @staticmethod\n def take(order: Order):\n depth = []\n if order.side == Order.SIDES_SELL:\n depth = order.market.order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by(\"-price\")\n if order.side == Order.SIDES_BUY:\n depth = order.market.order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_NEW, Order.STATUS_UPDATED, Order.STATUS_PARTIALLUY_FILLED]).exclude(price=0).order_by(\"price\")\n for o in depth:\n if (order.side == Order.SIDES_SELL and order.price != 0 and order.price > o.price) or (order.side == Order.SIDES_BUY and order.price != 0 and order.price < o.price):\n break\n if order.size - order.filled > o.size - o.filled:\n fill_size = o.size - o.filled\n else:\n fill_size = order.size - order.filled\n o.fill( fill_size )\n order.fill( fill_size )\n o.save()\n order.save()\n if order.side == Order.SIDES_SELL:\n order_buy = o\n order_sell = order\n else:\n order_buy = order\n order_sell = o\n\n # Creating trade object\n Trade.objects.create(order_buy=order_buy, order_sell=order_sell, price=o.price,side=order.side)\n\n if order.status == Order.STATUS_FILLED:\n break\n\n @staticmethod\n def process_order(self, order: Order ):\n if order.status == Order.STATUS_WAITING_NEW:\n order.status = Order.STATUS_NEW\n order.save()\n\n# best_bid_price, best_ask_price = self.get_bid_ask(order.market)\n if order.price == 0:\n Matching.take(order)\n order.status = Order.STATUS_FILLED\n order.save()\n if order.price != 0:\n Matching.take(order)\n\n\n\n\nclass TestOrder(TestCase):\n\n def setUp(self) -> None:\n self.ticker1 = Ticker.objects.create(name=\"USD\")\n self.ticker2 = Ticker.objects.create(name=\"EUR\")\n self.market = Market.objects.create(name=\"USD/EUR\",\n base_currency=self.ticker1,\n quote_currency=self.ticker2)\n\n self.Alice = USER_MODEL.objects.create_user(username=\"Alice\", email=\"[email protected]\", password=\"hhhh\")\n self.Bob = USER_MODEL.objects.create_user(username=\"Bob\", email=\"[email protected]\", password=\"hhhh\")\n\n # Creating Alice orders\n for i in range(20):\n Order.objects.create(sender=self.Alice,\n side=Order.SIDES_SELL,\n price=10000 + i*100,\n size=100000+i*10000,\n filled=0,\n status=Order.STATUS_WAITING_NEW,\n hash_signature=\"SIGA\",\n market=self.market)\n\n # Creating Alice orders\n for i in range(20):\n Order.objects.create(sender=self.Alice,\n side=Order.SIDES_BUY,\n price=10000 - i*100,\n size=100000+i*10000,\n filled=0,\n status=Order.STATUS_WAITING_NEW,\n hash_signature=\"SIGA\",\n market=self.market)\n\n # Creating Bob orders\n for i in range(2):\n Order.objects.create(sender=self.Bob,\n side=Order.SIDES_BUY,\n price=0,\n size=100000+i*10000,\n filled=0,\n status=Order.STATUS_WAITING_NEW,\n hash_signature=\"SIGA\",\n market=self.market)\n\n\n def test_market_exists(self):\n assert Market.objects.all().count() == 1\n\n def test_orders_created(self):\n #assert Market.objects.all()[0].order_set.count() == 40\n print(\"---BID----\")\n for order in Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY).exclude(price=0).order_by(\"-price\", \"created_at\"):\n print(order.price, order.size)\n\n print(\"---ASK----\")\n for order in Market.objects.all()[0].order_set.filter(side=Order.SIDES_SELL).exclude(price=0).order_by(\"price\", \"created_at\"):\n print(order.price, order.size)\n\n\n\n def test_get_level_1(self):\n bid = Market.objects.all()[0].order_set.filter(side=Order.SIDES_BUY, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\"-price\")\n ask = Market.objects.all()[0].order_set.filter(side=Order.SIDES_SELL, status__in=[Order.STATUS_WAITING_NEW]).exclude(price=0).order_by(\"price\")\n bid_price = None\n ask_price = None\n if len(bid) > 0:\n bid_price = bid[0].price\n if len(ask) > 0:\n ask_price = ask[0].price\n\n print(f'Bid {bid_price} Ask {ask_price}')\n\n",
"step-ids": [
7,
8,
9,
11,
12
]
}
|
[
7,
8,
9,
11,
12
] |
from rest_framework import serializers
from .models import Good, Favorite, Comment
class GoodSerializer(serializers.ModelSerializer):
class Meta:
model = Good
fields = ('user', 'article', 'created_at')
class FavoriteSerializer(serializers.ModelSerializer):
class Meta:
model = Favorite
fields = ('user', 'article', 'created_at')
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = ('text', 'image', 'user', 'article', 'created_at')
|
normal
|
{
"blob_id": "fc8b9029955de6b11cbfe8e24107c687f49685c1",
"index": 9179,
"step-1": "<mask token>\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = 'text', 'image', 'user', 'article', 'created_at'\n",
"step-2": "<mask token>\n\n\nclass FavoriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Favorite\n fields = 'user', 'article', 'created_at'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = 'text', 'image', 'user', 'article', 'created_at'\n",
"step-3": "<mask token>\n\n\nclass GoodSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Good\n fields = 'user', 'article', 'created_at'\n\n\nclass FavoriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Favorite\n fields = 'user', 'article', 'created_at'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = 'text', 'image', 'user', 'article', 'created_at'\n",
"step-4": "from rest_framework import serializers\nfrom .models import Good, Favorite, Comment\n\n\nclass GoodSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Good\n fields = 'user', 'article', 'created_at'\n\n\nclass FavoriteSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Favorite\n fields = 'user', 'article', 'created_at'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Comment\n fields = 'text', 'image', 'user', 'article', 'created_at'\n",
"step-5": "from rest_framework import serializers\n\nfrom .models import Good, Favorite, Comment\n\n\nclass GoodSerializer(serializers.ModelSerializer):\n class Meta:\n model = Good\n fields = ('user', 'article', 'created_at')\n\n\nclass FavoriteSerializer(serializers.ModelSerializer):\n class Meta:\n model = Favorite\n fields = ('user', 'article', 'created_at')\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Comment\n fields = ('text', 'image', 'user', 'article', 'created_at')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.