code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
''' extract package names from the Meteor guide and write them to packages-guide
Uses the content folder of https://github.com/meteor/guide '''
from collections import defaultdict
import os
import sys
import markdown
from bs4 import BeautifulSoup
def get_links_from_markdown(path, name):
try:
with open(path, 'r') as file:
md = file.read()
html = markdown.markdown(md)
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all('a')
except PermissionError:
print('Could not open "%s"' % path)
except UnicodeDecodeError:
print('Could not proccess "%s"' % path)
return []
def get_guide_packages(src_dir='content'):
if len(sys.argv) > 1:
src_dir = sys.argv[1]
subjects = defaultdict(list)
for entry in os.scandir(src_dir):
name = entry.name[:-3]
for link in get_links_from_markdown(entry.path, name):
if len(link.text.split(':')) == 2: # packages only
subjects[name].append(link.text)
return subjects
def write_packages(packages, path='packages-guide'):
with open(path, 'w') as out:
out.write('\n# packages from http://guide.meteor.com\n')
for subject, links in packages.items():
out.write('\n# %s\n' % subject)
for link in links:
out.write('%s\n' % link)
if __name__ == '__main__':
GUIDE = get_guide_packages()
write_packages(GUIDE)
|
normal
|
{
"blob_id": "274185896ab5c11256d69699df69fc2c0dde4f2d",
"index": 987,
"step-1": "<mask token>\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2:\n subjects[name].append(link.text)\n return subjects\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2:\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2:\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\nif __name__ == '__main__':\n GUIDE = get_guide_packages()\n write_packages(GUIDE)\n",
"step-4": "<mask token>\nfrom collections import defaultdict\nimport os\nimport sys\nimport markdown\nfrom bs4 import BeautifulSoup\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2:\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\nif __name__ == '__main__':\n GUIDE = get_guide_packages()\n write_packages(GUIDE)\n",
"step-5": "''' extract package names from the Meteor guide and write them to packages-guide\n Uses the content folder of https://github.com/meteor/guide '''\n\nfrom collections import defaultdict\nimport os\nimport sys\n\nimport markdown\nfrom bs4 import BeautifulSoup\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2: # packages only\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\nif __name__ == '__main__':\n GUIDE = get_guide_packages()\n write_packages(GUIDE)\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pandas as pd
from sqlalchemy import create_engine
# file = 'testfile.csv'
# print(pd.read_csv(file, nrows=5))
with open('testfile_short1.csv', 'r') as original: data = original.read()
for i in range(2):
with open('testfile_short3.csv', 'a') as modified: modified.write(data)
|
normal
|
{
"blob_id": "d7b45e76f150107cd62be160e8938f17dad90623",
"index": 58,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open('testfile_short1.csv', 'r') as original:\n data = original.read()\nfor i in range(2):\n with open('testfile_short3.csv', 'a') as modified:\n modified.write(data)\n",
"step-3": "import pandas as pd\nfrom sqlalchemy import create_engine\nwith open('testfile_short1.csv', 'r') as original:\n data = original.read()\nfor i in range(2):\n with open('testfile_short3.csv', 'a') as modified:\n modified.write(data)\n",
"step-4": "import pandas as pd\nfrom sqlalchemy import create_engine\n# file = 'testfile.csv'\n\n# print(pd.read_csv(file, nrows=5))\n\nwith open('testfile_short1.csv', 'r') as original: data = original.read()\nfor i in range(2):\n with open('testfile_short3.csv', 'a') as modified: modified.write(data)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
ID = '113'
TITLE = 'Path Sum II'
DIFFICULTY = 'Medium'
URL = 'https://oj.leetcode.com/problems/path-sum-ii/'
BOOK = False
PROBLEM = r"""Given a binary tree and a sum, find all root-to-leaf paths where each path's
sum equals the given sum.
For example:
Given the below binary tree and `sum = 22`,
5
/ \
4 8
/ / \
11 13 4
/ \ / \
7 2 5 1
return
[
[5,4,11,2],
[5,8,4,5]
]
"""
|
normal
|
{
"blob_id": "9a62a57f6d9af7ef09c8ed6e78a100df7978da6e",
"index": 8631,
"step-1": "<mask token>\n",
"step-2": "ID = '113'\nTITLE = 'Path Sum II'\nDIFFICULTY = 'Medium'\nURL = 'https://oj.leetcode.com/problems/path-sum-ii/'\nBOOK = False\nPROBLEM = \"\"\"Given a binary tree and a sum, find all root-to-leaf paths where each path's\nsum equals the given sum.\n\nFor example: \nGiven the below binary tree and `sum = 22`,\n\n \n \n \n 5\n / \\\\\n 4 8\n / / \\\\\n 11 13 4\n / \\\\ / \\\\\n 7 2 5 1\n \n\nreturn \n\n \n \n \n [\n [5,4,11,2],\n [5,8,4,5]\n ]\n \n\n\n\"\"\"\n",
"step-3": "ID = '113'\nTITLE = 'Path Sum II'\nDIFFICULTY = 'Medium'\nURL = 'https://oj.leetcode.com/problems/path-sum-ii/'\nBOOK = False\nPROBLEM = r\"\"\"Given a binary tree and a sum, find all root-to-leaf paths where each path's\nsum equals the given sum.\n\nFor example: \nGiven the below binary tree and `sum = 22`,\n\n \n \n \n 5\n / \\\n 4 8\n / / \\\n 11 13 4\n / \\ / \\\n 7 2 5 1\n \n\nreturn \n\n \n \n \n [\n [5,4,11,2],\n [5,8,4,5]\n ]\n \n\n\n\"\"\"\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# 文字列(結合)
str1 = "py"
str2 = "thon"
print(str1+str2)
|
normal
|
{
"blob_id": "d95cbca8e892f18f099b370e139176770ce0c1b7",
"index": 8270,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(str1 + str2)\n",
"step-3": "str1 = 'py'\nstr2 = 'thon'\nprint(str1 + str2)\n",
"step-4": "# 文字列(結合)\n\nstr1 = \"py\"\nstr2 = \"thon\"\nprint(str1+str2)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
/home/salmane/anaconda3/lib/python3.7/_weakrefset.py
|
normal
|
{
"blob_id": "05d21a27097cf3295e9328aeafa466973a4d2611",
"index": 5696,
"step-1": "/home/salmane/anaconda3/lib/python3.7/_weakrefset.py",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import shutil
import tempfile
import salt.runners.net as net
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
@skipIf(not net.HAS_NAPALM, "napalm module required for this test")
class NetTest(TestCase, LoaderModuleMockMixin):
"""
Test the net runner
"""
def setup_loader_modules(self):
mock_get = MagicMock(return_value={})
self.extmods_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
self.addCleanup(shutil.rmtree, self.extmods_dir, ignore_errors=True)
return {
net: {
"__opts__": {
"optimization_order": [0, 1, 2],
"renderer": "yaml",
"renderer_blacklist": [],
"renderer_whitelist": [],
"extension_modules": self.extmods_dir,
},
"__salt__": {"mine.get": mock_get},
}
}
def test_interfaces(self):
ret = net.interfaces()
self.assertEqual(None, ret)
def test_findarp(self):
ret = net.findarp()
self.assertEqual(None, ret)
def test_findmac(self):
ret = net.findmac()
self.assertEqual(None, ret)
def test_lldp(self):
ret = net.lldp()
self.assertEqual(None, ret)
def test_find(self):
ret = net.find("")
self.assertEqual({}, ret)
def test_multi_find(self):
ret = net.multi_find()
self.assertEqual(None, ret)
|
normal
|
{
"blob_id": "0fb288e3ab074e021ec726d71cbd5c8546a8455b",
"index": 744,
"step-1": "<mask token>\n\n\n@skipIf(not net.HAS_NAPALM, 'napalm module required for this test')\nclass NetTest(TestCase, LoaderModuleMockMixin):\n <mask token>\n <mask token>\n\n def test_interfaces(self):\n ret = net.interfaces()\n self.assertEqual(None, ret)\n\n def test_findarp(self):\n ret = net.findarp()\n self.assertEqual(None, ret)\n <mask token>\n <mask token>\n\n def test_find(self):\n ret = net.find('')\n self.assertEqual({}, ret)\n\n def test_multi_find(self):\n ret = net.multi_find()\n self.assertEqual(None, ret)\n",
"step-2": "<mask token>\n\n\n@skipIf(not net.HAS_NAPALM, 'napalm module required for this test')\nclass NetTest(TestCase, LoaderModuleMockMixin):\n <mask token>\n <mask token>\n\n def test_interfaces(self):\n ret = net.interfaces()\n self.assertEqual(None, ret)\n\n def test_findarp(self):\n ret = net.findarp()\n self.assertEqual(None, ret)\n\n def test_findmac(self):\n ret = net.findmac()\n self.assertEqual(None, ret)\n <mask token>\n\n def test_find(self):\n ret = net.find('')\n self.assertEqual({}, ret)\n\n def test_multi_find(self):\n ret = net.multi_find()\n self.assertEqual(None, ret)\n",
"step-3": "<mask token>\n\n\n@skipIf(not net.HAS_NAPALM, 'napalm module required for this test')\nclass NetTest(TestCase, LoaderModuleMockMixin):\n <mask token>\n\n def setup_loader_modules(self):\n mock_get = MagicMock(return_value={})\n self.extmods_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)\n self.addCleanup(shutil.rmtree, self.extmods_dir, ignore_errors=True)\n return {net: {'__opts__': {'optimization_order': [0, 1, 2],\n 'renderer': 'yaml', 'renderer_blacklist': [],\n 'renderer_whitelist': [], 'extension_modules': self.extmods_dir\n }, '__salt__': {'mine.get': mock_get}}}\n\n def test_interfaces(self):\n ret = net.interfaces()\n self.assertEqual(None, ret)\n\n def test_findarp(self):\n ret = net.findarp()\n self.assertEqual(None, ret)\n\n def test_findmac(self):\n ret = net.findmac()\n self.assertEqual(None, ret)\n\n def test_lldp(self):\n ret = net.lldp()\n self.assertEqual(None, ret)\n\n def test_find(self):\n ret = net.find('')\n self.assertEqual({}, ret)\n\n def test_multi_find(self):\n ret = net.multi_find()\n self.assertEqual(None, ret)\n",
"step-4": "import shutil\nimport tempfile\nimport salt.runners.net as net\nfrom tests.support.mixins import LoaderModuleMockMixin\nfrom tests.support.mock import MagicMock\nfrom tests.support.runtests import RUNTIME_VARS\nfrom tests.support.unit import TestCase, skipIf\n\n\n@skipIf(not net.HAS_NAPALM, 'napalm module required for this test')\nclass NetTest(TestCase, LoaderModuleMockMixin):\n \"\"\"\n Test the net runner\n \"\"\"\n\n def setup_loader_modules(self):\n mock_get = MagicMock(return_value={})\n self.extmods_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)\n self.addCleanup(shutil.rmtree, self.extmods_dir, ignore_errors=True)\n return {net: {'__opts__': {'optimization_order': [0, 1, 2],\n 'renderer': 'yaml', 'renderer_blacklist': [],\n 'renderer_whitelist': [], 'extension_modules': self.extmods_dir\n }, '__salt__': {'mine.get': mock_get}}}\n\n def test_interfaces(self):\n ret = net.interfaces()\n self.assertEqual(None, ret)\n\n def test_findarp(self):\n ret = net.findarp()\n self.assertEqual(None, ret)\n\n def test_findmac(self):\n ret = net.findmac()\n self.assertEqual(None, ret)\n\n def test_lldp(self):\n ret = net.lldp()\n self.assertEqual(None, ret)\n\n def test_find(self):\n ret = net.find('')\n self.assertEqual({}, ret)\n\n def test_multi_find(self):\n ret = net.multi_find()\n self.assertEqual(None, ret)\n",
"step-5": "import shutil\nimport tempfile\n\nimport salt.runners.net as net\nfrom tests.support.mixins import LoaderModuleMockMixin\nfrom tests.support.mock import MagicMock\nfrom tests.support.runtests import RUNTIME_VARS\nfrom tests.support.unit import TestCase, skipIf\n\n\n@skipIf(not net.HAS_NAPALM, \"napalm module required for this test\")\nclass NetTest(TestCase, LoaderModuleMockMixin):\n \"\"\"\n Test the net runner\n \"\"\"\n\n def setup_loader_modules(self):\n mock_get = MagicMock(return_value={})\n self.extmods_dir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)\n self.addCleanup(shutil.rmtree, self.extmods_dir, ignore_errors=True)\n return {\n net: {\n \"__opts__\": {\n \"optimization_order\": [0, 1, 2],\n \"renderer\": \"yaml\",\n \"renderer_blacklist\": [],\n \"renderer_whitelist\": [],\n \"extension_modules\": self.extmods_dir,\n },\n \"__salt__\": {\"mine.get\": mock_get},\n }\n }\n\n def test_interfaces(self):\n ret = net.interfaces()\n self.assertEqual(None, ret)\n\n def test_findarp(self):\n ret = net.findarp()\n self.assertEqual(None, ret)\n\n def test_findmac(self):\n ret = net.findmac()\n self.assertEqual(None, ret)\n\n def test_lldp(self):\n ret = net.lldp()\n self.assertEqual(None, ret)\n\n def test_find(self):\n ret = net.find(\"\")\n self.assertEqual({}, ret)\n\n def test_multi_find(self):\n ret = net.multi_find()\n self.assertEqual(None, ret)\n",
"step-ids": [
5,
6,
8,
10,
11
]
}
|
[
5,
6,
8,
10,
11
] |
import sys
with open(sys.argv[1], 'r') as test_cases:
for test in test_cases:
stringe = test.strip()
list1 = stringe.split(" | ")
list2 = list1[0].split(" ")
kha = 0
for item in list2:
for c in list1[1]:
if c in item:
kha +=1
if kha == len(list1[1]):
print (item)
break
else:
print (False)
break
|
normal
|
{
"blob_id": "def2721cd89501b1004d5d3f4f58df300616c1be",
"index": 2747,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith open(sys.argv[1], 'r') as test_cases:\n for test in test_cases:\n stringe = test.strip()\n list1 = stringe.split(' | ')\n list2 = list1[0].split(' ')\n kha = 0\n for item in list2:\n for c in list1[1]:\n if c in item:\n kha += 1\n if kha == len(list1[1]):\n print(item)\n break\n else:\n print(False)\n break\n",
"step-3": "import sys\nwith open(sys.argv[1], 'r') as test_cases:\n for test in test_cases:\n stringe = test.strip()\n list1 = stringe.split(' | ')\n list2 = list1[0].split(' ')\n kha = 0\n for item in list2:\n for c in list1[1]:\n if c in item:\n kha += 1\n if kha == len(list1[1]):\n print(item)\n break\n else:\n print(False)\n break\n",
"step-4": "\r\nimport sys\r\n\r\nwith open(sys.argv[1], 'r') as test_cases:\r\n for test in test_cases:\r\n stringe = test.strip()\r\n list1 = stringe.split(\" | \")\r\n list2 = list1[0].split(\" \")\r\n kha = 0\r\n for item in list2:\r\n for c in list1[1]:\r\n if c in item:\r\n kha +=1\r\n if kha == len(list1[1]):\r\n print (item)\r\n break\r\n else:\r\n print (False)\r\n break",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import plotly.express as px
import pandas as pd
def fiig(plan):
df = pd.DataFrame(plan)
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="РЦ", color='РЦ', facet_row_spacing=0.6,
facet_col_spacing=0.6, opacity=0.9, hover_data=['Проект', 'МК', 'Наменование', 'Номер', 'Минут'],
title='график проектов')
for i, d in enumerate(fig.data):
d.width = df[df['РЦ'] == d.name]['Вес']
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
# fig.add_vrect(x0=0.9, x1=2)
# fig.show()
def fig_porc_projects(plan):
df = pd.DataFrame(plan)
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="Проект", color='РЦ', facet_row_spacing=0.2,
facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')
# for i, d in enumerate(fig.data):
# d.width = df[df['РЦ'] == d.name]['РЦ']
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
# fig.add_vrect(x0=0.9, x1=2)
# fig.show()
def fig_podetalno_naproject_rc(plan, proj):
df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="Номер", color='РЦ', facet_row_spacing=0.2,
facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')
# for i, d in enumerate(fig.data):
# d.width = df[df['РЦ'] == d.name]['РЦ']
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
def fig_podetalno_narc_projects(plan, rc):
filtr = [_ for _ in plan if rc in _['РЦ']]
df = pd.DataFrame(filtr)
fig = px.timeline(df, x_start="Начало", x_end="Завершение", y="Номер", color='Проект', facet_row_spacing=0.2,
facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {rc}')
for i, d in enumerate(fig.data):
d.width = df[df['Проект'] == d.name]['Пост']/10 + 0.1
"""
fig.add_hrect( y0="Проект C", y1="Проект C",
annotation_text="аываыв", annotation_position = 'inside top left',
fillcolor="green", opacity=0.25, line_width=0, annotation_font_size=20,
annotation_font_color="blue")
fig.add_vline(x="2009-02-06", line_width=3, line_dash="dash", line_color="green", opacity=0.06)
"""
# fig.add_hline(y=" ")
# fig.add_hline(y=" ")
return fig
|
normal
|
{
"blob_id": "09850f0d3d295170545a6342337e97a0f190989a",
"index": 6578,
"step-1": "<mask token>\n\n\ndef fig_porc_projects(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Проект',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_naproject_rc(plan, proj):\n df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef fig_porc_projects(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Проект',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_naproject_rc(plan, proj):\n df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_narc_projects(plan, rc):\n filtr = [_ for _ in plan if rc in _['РЦ']]\n df = pd.DataFrame(filtr)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='Проект', facet_row_spacing=0.2, facet_col_spacing=0.1,\n opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {rc}')\n for i, d in enumerate(fig.data):\n d.width = df[df['Проект'] == d.name]['Пост'] / 10 + 0.1\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n",
"step-3": "<mask token>\n\n\ndef fiig(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='РЦ',\n color='РЦ', facet_row_spacing=0.6, facet_col_spacing=0.6, opacity=\n 0.9, hover_data=['Проект', 'МК', 'Наменование', 'Номер', 'Минут'],\n title='график проектов')\n for i, d in enumerate(fig.data):\n d.width = df[df['РЦ'] == d.name]['Вес']\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_porc_projects(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Проект',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_naproject_rc(plan, proj):\n df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_narc_projects(plan, rc):\n filtr = [_ for _ in plan if rc in _['РЦ']]\n df = pd.DataFrame(filtr)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='Проект', facet_row_spacing=0.2, facet_col_spacing=0.1,\n opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {rc}')\n for i, d in enumerate(fig.data):\n d.width = df[df['Проект'] == d.name]['Пост'] / 10 + 0.1\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n",
"step-4": "import plotly.express as px\nimport pandas as pd\n\n\ndef fiig(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='РЦ',\n color='РЦ', facet_row_spacing=0.6, facet_col_spacing=0.6, opacity=\n 0.9, hover_data=['Проект', 'МК', 'Наменование', 'Номер', 'Минут'],\n title='график проектов')\n for i, d in enumerate(fig.data):\n d.width = df[df['РЦ'] == d.name]['Вес']\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_porc_projects(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Проект',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_naproject_rc(plan, proj):\n df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='РЦ', facet_row_spacing=0.2, facet_col_spacing=0.1, opacity=\n 0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n\n\ndef fig_podetalno_narc_projects(plan, rc):\n filtr = [_ for _ in plan if rc in _['РЦ']]\n df = pd.DataFrame(filtr)\n fig = px.timeline(df, x_start='Начало', x_end='Завершение', y='Номер',\n color='Проект', facet_row_spacing=0.2, facet_col_spacing=0.1,\n opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {rc}')\n for i, d in enumerate(fig.data):\n d.width = df[df['Проект'] == d.name]['Пост'] / 10 + 0.1\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n return fig\n",
"step-5": "import plotly.express as px\nimport pandas as pd\n\n\ndef fiig(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start=\"Начало\", x_end=\"Завершение\", y=\"РЦ\", color='РЦ', facet_row_spacing=0.6,\n facet_col_spacing=0.6, opacity=0.9, hover_data=['Проект', 'МК', 'Наменование', 'Номер', 'Минут'],\n title='график проектов')\n for i, d in enumerate(fig.data):\n d.width = df[df['РЦ'] == d.name]['Вес']\n\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n\n # fig.add_hline(y=\" \")\n # fig.add_hline(y=\" \")\n return fig\n\n\n# fig.add_vrect(x0=0.9, x1=2)\n# fig.show()\n\ndef fig_porc_projects(plan):\n df = pd.DataFrame(plan)\n fig = px.timeline(df, x_start=\"Начало\", x_end=\"Завершение\", y=\"Проект\", color='РЦ', facet_row_spacing=0.2,\n facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма проектов')\n # for i, d in enumerate(fig.data):\n # d.width = df[df['РЦ'] == d.name]['РЦ']\n\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n\n # fig.add_hline(y=\" \")\n # fig.add_hline(y=\" \")\n return fig\n\n\n# fig.add_vrect(x0=0.9, x1=2)\n# fig.show()\n\ndef fig_podetalno_naproject_rc(plan, proj):\n df = pd.DataFrame([_ for _ in plan if proj in _['Проект']])\n\n fig = px.timeline(df, x_start=\"Начало\", x_end=\"Завершение\", y=\"Номер\", color='РЦ', facet_row_spacing=0.2,\n facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {proj}')\n # for i, d in enumerate(fig.data):\n # d.width = df[df['РЦ'] == d.name]['РЦ']\n\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n\n # fig.add_hline(y=\" \")\n # fig.add_hline(y=\" \")\n return fig\n\ndef fig_podetalno_narc_projects(plan, rc):\n filtr = [_ for _ in plan if rc in _['РЦ']]\n df = pd.DataFrame(filtr)\n\n fig = px.timeline(df, x_start=\"Начало\", x_end=\"Завершение\", y=\"Номер\", color='Проект', facet_row_spacing=0.2,\n facet_col_spacing=0.1, opacity=0.5, hover_data=plan[0].keys(), title=f'Диаграмма по {rc}')\n for i, d in enumerate(fig.data):\n d.width = df[df['Проект'] == d.name]['Пост']/10 + 0.1\n\n\n \"\"\" \n fig.add_hrect( y0=\"Проект C\", y1=\"Проект C\",\n annotation_text=\"аываыв\", annotation_position = 'inside top left',\n fillcolor=\"green\", opacity=0.25, line_width=0, annotation_font_size=20,\n annotation_font_color=\"blue\")\n fig.add_vline(x=\"2009-02-06\", line_width=3, line_dash=\"dash\", line_color=\"green\", opacity=0.06)\n\"\"\"\n\n # fig.add_hline(y=\" \")\n # fig.add_hline(y=\" \")\n return fig\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from HDPython import *
import HDPython.examples as ahe
from enum import Enum, auto
class counter_state(Enum):
idle = auto()
running = auto()
done = auto()
class Counter_cl(v_class_master):
def __init__(self):
super().__init__()
self.counter = v_variable(v_slv(32))
self.counter_max = v_variable(v_slv(32))
self.state = v_variable(v_enum(counter_state.idle))
def _onPull(self):
if self.state == counter_state.running:
self.counter << self.counter + 1
def count_to_max(self, maxValue):
if self.state == counter_state.idle:
self.counter << 0
self.counter_max << maxValue
self.state << counter_state.running
def isDone(self):
return self.state == counter_state.done
def reset(self):
if self.state == counter_state.done:
self.state << counter_state.idle
class my_first_test_bench(v_entity):
def __init__(self):
super().__init__()
self.architecture()
@architecture
def architecture(self):
counter = v_variable(v_slv(32))
max_cnt = v_variable(v_slv(32,300))
clkgen = v_create(ahe.clk_generator())
cnt = Counter_cl()
@rising_edge(clkgen.clk)
def proc():
counter << counter + 1
cnt.count_to_max(max_cnt)
if cnt.isDone():
cnt.reset()
end_architecture()
my_first_instance = v_create(my_first_test_bench())
convert_to_hdl(my_first_instance, "myFirst")
|
normal
|
{
"blob_id": "046db03b146ce0182ba7889908f536a09de051d5",
"index": 5069,
"step-1": "<mask token>\n\n\nclass Counter_cl(v_class_master):\n\n def __init__(self):\n super().__init__()\n self.counter = v_variable(v_slv(32))\n self.counter_max = v_variable(v_slv(32))\n self.state = v_variable(v_enum(counter_state.idle))\n\n def _onPull(self):\n if self.state == counter_state.running:\n self.counter << self.counter + 1\n <mask token>\n\n def isDone(self):\n return self.state == counter_state.done\n <mask token>\n\n\nclass my_first_test_bench(v_entity):\n\n def __init__(self):\n super().__init__()\n self.architecture()\n\n @architecture\n def architecture(self):\n counter = v_variable(v_slv(32))\n max_cnt = v_variable(v_slv(32, 300))\n clkgen = v_create(ahe.clk_generator())\n cnt = Counter_cl()\n\n @rising_edge(clkgen.clk)\n def proc():\n counter << counter + 1\n cnt.count_to_max(max_cnt)\n if cnt.isDone():\n cnt.reset()\n end_architecture()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Counter_cl(v_class_master):\n\n def __init__(self):\n super().__init__()\n self.counter = v_variable(v_slv(32))\n self.counter_max = v_variable(v_slv(32))\n self.state = v_variable(v_enum(counter_state.idle))\n\n def _onPull(self):\n if self.state == counter_state.running:\n self.counter << self.counter + 1\n <mask token>\n\n def isDone(self):\n return self.state == counter_state.done\n\n def reset(self):\n if self.state == counter_state.done:\n self.state << counter_state.idle\n\n\nclass my_first_test_bench(v_entity):\n\n def __init__(self):\n super().__init__()\n self.architecture()\n\n @architecture\n def architecture(self):\n counter = v_variable(v_slv(32))\n max_cnt = v_variable(v_slv(32, 300))\n clkgen = v_create(ahe.clk_generator())\n cnt = Counter_cl()\n\n @rising_edge(clkgen.clk)\n def proc():\n counter << counter + 1\n cnt.count_to_max(max_cnt)\n if cnt.isDone():\n cnt.reset()\n end_architecture()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Counter_cl(v_class_master):\n\n def __init__(self):\n super().__init__()\n self.counter = v_variable(v_slv(32))\n self.counter_max = v_variable(v_slv(32))\n self.state = v_variable(v_enum(counter_state.idle))\n\n def _onPull(self):\n if self.state == counter_state.running:\n self.counter << self.counter + 1\n\n def count_to_max(self, maxValue):\n if self.state == counter_state.idle:\n self.counter << 0\n self.counter_max << maxValue\n self.state << counter_state.running\n\n def isDone(self):\n return self.state == counter_state.done\n\n def reset(self):\n if self.state == counter_state.done:\n self.state << counter_state.idle\n\n\nclass my_first_test_bench(v_entity):\n\n def __init__(self):\n super().__init__()\n self.architecture()\n\n @architecture\n def architecture(self):\n counter = v_variable(v_slv(32))\n max_cnt = v_variable(v_slv(32, 300))\n clkgen = v_create(ahe.clk_generator())\n cnt = Counter_cl()\n\n @rising_edge(clkgen.clk)\n def proc():\n counter << counter + 1\n cnt.count_to_max(max_cnt)\n if cnt.isDone():\n cnt.reset()\n end_architecture()\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass counter_state(Enum):\n idle = auto()\n running = auto()\n done = auto()\n\n\nclass Counter_cl(v_class_master):\n\n def __init__(self):\n super().__init__()\n self.counter = v_variable(v_slv(32))\n self.counter_max = v_variable(v_slv(32))\n self.state = v_variable(v_enum(counter_state.idle))\n\n def _onPull(self):\n if self.state == counter_state.running:\n self.counter << self.counter + 1\n\n def count_to_max(self, maxValue):\n if self.state == counter_state.idle:\n self.counter << 0\n self.counter_max << maxValue\n self.state << counter_state.running\n\n def isDone(self):\n return self.state == counter_state.done\n\n def reset(self):\n if self.state == counter_state.done:\n self.state << counter_state.idle\n\n\nclass my_first_test_bench(v_entity):\n\n def __init__(self):\n super().__init__()\n self.architecture()\n\n @architecture\n def architecture(self):\n counter = v_variable(v_slv(32))\n max_cnt = v_variable(v_slv(32, 300))\n clkgen = v_create(ahe.clk_generator())\n cnt = Counter_cl()\n\n @rising_edge(clkgen.clk)\n def proc():\n counter << counter + 1\n cnt.count_to_max(max_cnt)\n if cnt.isDone():\n cnt.reset()\n end_architecture()\n\n\n<mask token>\nconvert_to_hdl(my_first_instance, 'myFirst')\n",
"step-5": "from HDPython import *\nimport HDPython.examples as ahe\nfrom enum import Enum, auto\n\nclass counter_state(Enum):\n idle = auto()\n running = auto()\n done = auto()\n\nclass Counter_cl(v_class_master):\n def __init__(self):\n super().__init__()\n self.counter = v_variable(v_slv(32))\n self.counter_max = v_variable(v_slv(32))\n self.state = v_variable(v_enum(counter_state.idle))\n\n def _onPull(self):\n if self.state == counter_state.running:\n self.counter << self.counter + 1\n\n def count_to_max(self, maxValue):\n if self.state == counter_state.idle:\n self.counter << 0 \n self.counter_max << maxValue\n self.state << counter_state.running\n\n def isDone(self):\n return self.state == counter_state.done\n\n def reset(self):\n if self.state == counter_state.done:\n self.state << counter_state.idle\n\nclass my_first_test_bench(v_entity):\n def __init__(self):\n super().__init__()\n self.architecture()\n\n\n @architecture\n def architecture(self):\n counter = v_variable(v_slv(32))\n max_cnt = v_variable(v_slv(32,300))\n\n\n clkgen = v_create(ahe.clk_generator())\n\n cnt = Counter_cl()\n\n\n\n @rising_edge(clkgen.clk)\n def proc():\n counter << counter + 1\n cnt.count_to_max(max_cnt)\n if cnt.isDone():\n cnt.reset()\n\n \n\n end_architecture()\n\n\nmy_first_instance = v_create(my_first_test_bench())\n\nconvert_to_hdl(my_first_instance, \"myFirst\")",
"step-ids": [
7,
8,
9,
12,
15
]
}
|
[
7,
8,
9,
12,
15
] |
from django.shortcuts import render
from django.shortcuts import redirect
from django.http import HttpResponse
from .models import *
from django.contrib.auth import logout, authenticate, login
from django.contrib.auth.decorators import login_required
from django.template.loader import get_template
from django.template import Context
from django.views.decorators.csrf import csrf_exempt
from django.template.context_processors import csrf
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import xml.etree.ElementTree as etree
from xml.dom.minidom import Document, parse
import xml.dom.minidom as dom
import datetime
import sys
from .parser import get_data
from django.http import QueryDict
import urllib
# Create your views here.
@csrf_exempt
def login_form(request):
formulario = '<form action="login" method="POST">'
formulario += 'Nombre<br><input type="text" name="Usuario"><br>'
formulario += 'Contraseña<br><input type="password" name="Password"><br>'
formulario += '<br><input type="submit" value="Entrar"></form>'
return formulario
@csrf_exempt
def loginuser(request):
username = request.POST['Usuario']
password = request.POST['Password']
user = authenticate(username=username, password=password)
if user is not None:
login(request,user)
direcc = '/' + str(user)
return redirect(direcc)
else:
Error = "Por favor, introduzca un usuario y contraseña válidos"
template = get_template("fail.html")
c = Context ({'Error': Error})
renderizado = template.render(c)
return HttpResponse(renderizado)
def lista_megustas():
lista_todos = Aparcamiento.objects.all()
lista_ordenada = lista_todos.order_by("-contador_megusta")[:5]
Response = "LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>"
Existe = False
for i in lista_ordenada:
megustas = i.contador_megusta
#comentarios = Comentario.objects.filter(aparcamiento=i)
if megustas != 0:
Response += "<li><a href=" + i.content_url + ">" + i.nombre + "<br></a>"
Response += "Dirección: " + i.clase_vial + " " + i.localizacion + ", nº " + str(i.num)
Response += "<br><a href=http://localhost:1234/aparcamientos/" + i.entidad + ">" + "Más información<br></a><br>"
Existe = True
if Existe == False:
Response += "Aún no se han registrado comentarios para ningún aparcamiento"
Response += "</br></br>"
return Response
def paginas_personales():
Lista = "PÁGINAS DE USUARIOS<br><br>"
usuarios = User.objects.all()
for i in usuarios:
try:
pagina = Usuario.objects.get(nombre=i.id).titulo_pagina
except ObjectDoesNotExist:
pagina = "Página de " + i.username
Lista += "<a href=http://localhost:1234/" + i.username + ">" + pagina + "</a> Usuario: " + i.username + "<br>"
return Lista
def lista_aparcamientos():
lista = ''
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += '<li><p>' + nombre_aparcamiento + '<a href="' + url_aparcamiento + '"> --> Más información</a></p></li>'
return lista
def aparcamientos_seleccionados(user,request):
user_object = User.objects.get(username=user)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
paginator = Paginator(lista_seleccionados,5)
page = request.GET.get('page')
try:
seleccionados = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
seleccionados = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999), deliver last page of results.
seleccionados = paginator.page(paginator.num_pages)
lista = "Listado de aparcamientos seleccionados por " + user + "<br>"
for i in seleccionados:
lista += "<br><li>Fecha de selección: " + str(i.fecha_seleccion)
lista += "<br><a href=" + i.aparcamiento.content_url + ">" + i.aparcamiento.nombre + "<br></a>"
lista += "Dirección: " + i.aparcamiento.clase_vial + " " + i.aparcamiento.localizacion + ", nº " + str(i.aparcamiento.num)
lista += "<br><a href=http://localhost:1234/aparcamientos/" + i.aparcamiento.entidad + ">" + "Más información</a><br>"
except ObjectDoesNotExist:
lista = "El usuario aún no ha seleccionado ningún aparcamiento"
seleccionados = ""
return lista,seleccionados
def accesibles(value):
accesibles = '<form action="" method="POST">'
accesibles += '<button type="submit" name="Accesible" value="' + str(value) + '"> Accesibles</button></form>'
return accesibles
@csrf_exempt
def pagina_principal(request):
formulario = login_form(request)
list_megustas = lista_megustas()
users = paginas_personales()
value = 1
accesible = accesibles(value)
template = get_template("index.html")
if request.user.is_authenticated():
username = str(request.user)
formulario = 'Bienvenido ' + username
formulario += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
if request.method == 'POST':
key = request.body.decode("utf-8").split('=')[0]
if key == 'Accesible':
value = request.POST['Accesible']
if value == '1':
lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)
lista = '<a href="http://localhost:1234/" > Volver </a>'
value = 0
for i in lista_accesibles:
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista += "<li><p>" + nombre_aparcamiento + "</p><a href=" + url_aparcamiento + ">" + url_aparcamiento + "</a></li>"
else:
lista = '<a href="http://localhost:1234/" > Volver </a>'
aparcamientos = Aparcamiento.objects.all()
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
url_aparcamiento = aparcamiento.entidad
lista += '<li><p>' + nombre_aparcamiento + '. URL del aparcamiento: ' + '<a href="aparcamientos/' + url_aparcamiento + '"> ⇾ Más información</a></br></p>'
value = 1
accesible = accesibles(value)
c = Context({'login': formulario, 'list_users':lista, 'accesible': accesible})
else:
init = Aparcamiento.objects.all()
if len(init) == 0:
get_data()
c = Context({'login': formulario, 'list':list_megustas, 'list_users':users, 'accesible': accesible})
renderizado = template.render(c)
return HttpResponse(renderizado)
def mylogout(request):
logout(request)
return redirect("/")
@csrf_exempt
def usuarios(request, peticion):
formulario = '<form action="" method="POST">'
formulario += '<br>Introduzca un título nuevo a su página personal<br><input type="text" name="Titulo">'
formulario += '<input type="submit" value=" Actualizar"></form>'
css = '<form action="" method="POST">'
css += 'Modifique el tamaño de letra<br><input type="text" name="Letra">'
css += '<br><br>Modifique el color de letra <input type="color" name="Color"><br>'
css += '<br><input type="submit" value="Modificar"></form>'
aparcamientos = Aparcamiento.objects.all()
lista= "<br>LISTADO DE APARCAMIENTOS<br><br>"
for aparcamiento in aparcamientos:
nombre_aparcamiento = aparcamiento.nombre
lista += nombre_aparcamiento
lista += '<form action="" method="POST">'
lista += '<button type="submit" name="Seleccionar" value="' + nombre_aparcamiento + '">Seleccionar</button><br></form>'
user_object= User.objects.get(username=peticion)
if request.method == 'POST':
key = request.body.decode("utf-8").split('=')[0]
if key == "Titulo":
titulo = request.POST['Titulo']
try:
user = Usuario.objects.get(nombre=user_object)
user.titulo_pagina = titulo
user.save()
except ObjectDoesNotExist:
p = Usuario(nombre=user_object, titulo_pagina=titulo)
p.save()
elif key == "Seleccionar":
nombre_aparcamiento = request.POST['Seleccionar']
today = datetime.datetime.today()
try:
selector = Usuario.objects.get(nombre=user_object)
aparcamiento = Aparcamiento.objects.get(nombre=nombre_aparcamiento)
except:
p = Usuario(nombre=user_object)
p.save()
selector = Usuario.objects.get(nombre=user_object)
Check = False
lista_usuario = Seleccionados.objects.filter(selector=selector)
for i in lista_usuario:
if nombre_aparcamiento == i.aparcamiento.nombre:
Check=True
if Check == False:
p = Seleccionados(aparcamiento=aparcamiento, selector=selector, fecha_seleccion=today)
p.save()
elif key == "Letra":
letra = request.POST['Letra']
color = request.POST['Color']
try:
user = Usuario.objects.get(nombre=user_object)
except:
p = Usuario(nombre=user_object)
p.save()
user = Usuario.objects.get(nombre=user_object)
if letra == "":
letra = "15"
user.letra = letra
user.color = color
user.save()
lista_seleccionados, seleccionados= aparcamientos_seleccionados(peticion,request)
if request.user.is_authenticated():
username = str(request.user)
if peticion != username: #Si no es igual es que solo puedo acceder a la parte publica, ya qu eno es la mia
template = get_template("publicuser.html")
titulo_pagina = "Página pública de " + peticion + "<br><br>"
form_user = 'Bienvenido ' + username
form_user += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
c = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'titulo': titulo_pagina, 'login':form_user})
else: #Si es igual es que es la mia y puedo acceder a la parte privada, ya que es lamia
template = get_template("privateuser.html")
try:
titulo_pagina = Usuario.objects.get(nombre=user_object).titulo_pagina
except ObjectDoesNotExist:
titulo_pagina = "Página personal de " + str(request.user) + "<br><br>"
c = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'lista': lista, 'form': formulario, 'css':css, 'titulo': titulo_pagina})
else:
template = get_template("publicuser.html")
titulo_pagina = "Página pública de " + peticion + "<br><br>"
form_user = 'Para loguearse vaya al botón de Inicio'
c = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'titulo': titulo_pagina, 'login':form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def personalizar(request):
if request.user.is_authenticated():
user_object = User.objects.get(username=request.user)
user = Usuario.objects.get(nombre=user_object)
letra = user.letra
color = user.color
else:
letra = "14px"
color = "#FCFCFC"
css = get_template("change.css")
c = Context({'letra':letra, 'color':color})
renderizado = css.render(c)
return HttpResponse(renderizado, content_type="text/css")
def usuarios_xml(request, peticion):
user_object = User.objects.get(username=peticion)
doc = Document()
cont = doc.createElement("Contenidos")
doc.appendChild(cont)
info = doc.createElement("infoDataset")
cont.appendChild(info)
nombre = doc.createElement("Nombre")
info.appendChild(nombre)
ptext = doc.createTextNode("XML de aparcamientos seleccionados por el usuario " + peticion)
nombre.appendChild(ptext)
url = doc.createElement("url")
info.appendChild(url)
ptext = doc.createTextNode("http://localhost:1234/" + peticion + "/xml/")
url.appendChild(ptext)
aparc = doc.createElement("Aparcamientos")
cont.appendChild(aparc)
try:
usuario = Usuario.objects.get(nombre=user_object)
lista_seleccionados = Seleccionados.objects.filter(selector=usuario)
for i in lista_seleccionados:
item = doc.createElement("Contenido")
aparc.appendChild(item)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "ID-ENTIDAD")
ptext = doc.createTextNode(i.aparcamiento.entidad)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "NOMBRE")
ptext = doc.createTextNode(i.aparcamiento.nombre)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "DESCRIPCION")
ptext = doc.createTextNode(i.aparcamiento.descripcion)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "ACCESIBILIDAD")
if i.aparcamiento.accesibilidad == True:
acces = 1
else:
acces = 0
ptext = doc.createTextNode(str(acces))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "CONTENT_URL")
ptext = doc.createTextNode(i.aparcamiento.content_url)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "LOCALIZACION")
ptext = doc.createTextNode(i.aparcamiento.localizacion)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "CLASE VIAL")
ptext = doc.createTextNode(i.aparcamiento.clase_vial)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "TIPO NUM")
ptext = doc.createTextNode(i.aparcamiento.tipo_num)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "NUM")
ptext = doc.createTextNode(str(i.aparcamiento.num))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "LOCALIDAD")
ptext = doc.createTextNode(i.aparcamiento.localidad)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "PROVINCIA")
ptext = doc.createTextNode(i.aparcamiento.provincia)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "CODIGO POSTAL")
ptext = doc.createTextNode(str(i.aparcamiento.codigo_postal))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "BARRIO")
ptext = doc.createTextNode(i.aparcamiento.barrio)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "DISTRITO")
ptext = doc.createTextNode(i.aparcamiento.distrito)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "COORDENADA X")
ptext = doc.createTextNode(str(i.aparcamiento.coordenada_x))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
atributo.setAttribute("nombre", "COORDENADA Y")
ptext = doc.createTextNode(str(i.aparcamiento.coordenada_y))
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
item.appendChild(atributo)
datos = doc.createElement("DATOSDECONTACTO")
item.appendChild(datos)
atributo = doc.createElement("atributo")
datos.appendChild(atributo)
atributo.setAttribute("nombre", "TELEFONO")
ptext = doc.createTextNode(i.aparcamiento.telefono)
atributo.appendChild(ptext)
atributo = doc.createElement("atributo")
datos.appendChild(atributo)
atributo.setAttribute("nombre", "EMAIL")
ptext = doc.createTextNode(i.aparcamiento.email)
atributo.appendChild(ptext)
except:
print("")
xml = doc.toprettyxml(indent=" ")
return HttpResponse(xml, content_type = "text/xml")
@csrf_exempt
def aparcamientos(request):
lista = lista_aparcamientos()
filtrar = '<form action="" method="POST">'
filtrar += '<br><br><input type="text" name="distrito">'
filtrar += '<input type="submit" value="Filtrar por distrito">'
template = get_template("aparcamientos.html")
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
else:
form_user = "Para loguearse vaya al botón de Inicio"
if request.method == "POST":
filtro_distrito = request.POST['distrito']
filtro_distrito = filtro_distrito.upper()
if filtro_distrito == '':
lista_filtrada = "No ha introducido ningún filtro, introduzca distrito para filtrar " + lista
else:
aparcamientos_filtrados = Aparcamiento.objects.all()
Encontrado = False
lista_filtrada = "Los aparcamientos en el " + filtro_distrito + " son: "
for i in aparcamientos_filtrados:
if filtro_distrito == i.distrito:
Encontrado = True
nombre_aparcamiento = i.nombre
url_aparcamiento = i.content_url
lista_filtrada += "<p>" + nombre_aparcamiento + "</p><li><a href=" + url_aparcamiento + ">" + url_aparcamiento + "</a></li>"
if Encontrado == False: #No es un distrito válido el que se ha introducido y no ha entrado por el bucle anterior
lista_filtrada = "Introduzca un nuevo distrito. " + filtro_distrito + " no es válido"
c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':form_user})
else:
c = Context({'distrito': filtrar, 'lista': lista, 'login':form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
@csrf_exempt
def aparcamientos_id(request, recurso):
template = get_template("aparcamientos.html")
num_megustas = 0
if request.method == 'POST':
key = request.body.decode("utf-8").split('=')[0]
print(key)
#tipo = request.POST
#print(tipo)
#qd = urllib.unquote(tipo).decode("utf-8")
#qd = QueryDict(tipo).decode("utf-8")
#qd.getlist('Me Gusta')
#print(qd)
if key == 'Me+Gusta':
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1
aparcamiento.save()
num_megustas = aparcamiento.contador_megusta
else:
coment = request.POST['Comentario']
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
aparcamiento.contador_coments = aparcamiento.contador_coments + 1
aparcamiento.save()
p = Comentario (aparcamiento= aparcamiento, coment=coment)
p.save()
try:
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
nombre = aparcamiento.nombre
descripcion = aparcamiento.descripcion
accesibilidad = aparcamiento.accesibilidad
localizacion = aparcamiento.localizacion
via = aparcamiento.clase_vial
num = aparcamiento.num
localidad = aparcamiento.localidad
provincia = aparcamiento.provincia
codigo_postal = aparcamiento.codigo_postal
barrio = aparcamiento.barrio
distrito = aparcamiento.distrito
coordenada_x = aparcamiento.coordenada_x
coordenada_y = aparcamiento.coordenada_y
telefono = aparcamiento.telefono
email = aparcamiento.email
if telefono == '':
telefono = "No disponible"
if email == '':
email = "No disponible"
if accesibilidad == 1:
acces = "Libre"
else:
acces = "Ocupado"
lista_aparcamientos = Aparcamiento.objects.all()
list_coments = ""
aparcamiento = Aparcamiento.objects.get(entidad=recurso)
num_megustas = aparcamiento.contador_megusta
for i in lista_aparcamientos:
if i.entidad == recurso:
comentarios = Comentario.objects.filter(aparcamiento=i)
if len(comentarios) != 0:
list_coments = "<li><p>COMENTARIOS</p><ol>"
for j in comentarios:
list_coments += "<li>" + j.coment + "<br>"
Response = "<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: " + recurso + "</br></p>"
Response += "<a href=" + i.content_url + ">" + i.nombre + "</a><br>"
Response += "Descripción: " + descripcion + "</br>"
Response += "Accesibilidad: " + acces + "</br>"
Response += "Localización: " + via + " " + localizacion + ", nº " + str(num)
Response += " " + localidad + " (" + str(codigo_postal) + ")</br>"
Response += "Ubicación: " + barrio + " " + distrito + " Coordenadas: " + str(coordenada_x) + " , " + str(coordenada_y) + "<br><br>"
Response += "INFORMACIÓN DE CONTACTO </br>"
Response += "Teléfono: " + telefono + "</br>"
Response += "Email: " + email + "</br>" + list_coments + "</ol>"
if num_megustas != 0:
Response += "</br><li>Numero de me gustas es: " + str(num_megustas) + "<br>"
else:
Response += "</br><li>Se el primero en indicar que te gusta la página<br>"
if request.user.is_authenticated():
username = str(request.user)
form_user = 'Bienvenido ' + username
form_user += '<br><br><a href="http://localhost:1234/logout" > Logout </a>'
formulario = '<form action="" method="POST">'
formulario += '<br>Puede introducir un comentario si lo desea ' + str(request.user) + '<br><input type="text" name="Comentario">'
formulario += '<input type="submit" value="Comentar"></form>'
Response += formulario
else:
form_user = "Para loguearse vaya al botón de Inicio"
megusta = ''
megusta += '<br> Indica que te gusta este aparcamiento</br>'
megusta += '<form action="" method="POST">'
megusta += '<button type="submit" name="Me Gusta" value="Me Gusta"> +1 </button></form>'
Response += megusta
except ObjectDoesNotExist:
Response = "Este id no se corresponde con ningún aparcamiento"
c = Context({'lista': Response, 'login': form_user})
renderizado = template.render(c)
return HttpResponse(renderizado)
def about(request):
template = get_template("about.html")
Cuerpo = "DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>"
Cuerpo += "------------------------------------ Página principal ---------------------------------------------------"
Cuerpo += "<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>"
Cuerpo += "<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>"
Cuerpo += "<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>"
Cuerpo += "<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>"
Cuerpo += "------------------------------------ Página con los aparcamientos ---------------------------------------------------"
Cuerpo += "<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>"
Cuerpo += "<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>"
Cuerpo += "<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>"
Cuerpo += "------------------------------------ Interfaz pública de usuario ---------------------------------------------------"
Cuerpo += "<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>"
Cuerpo += "<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>"
Cuerpo += "------------------------------------ Interfaz privada de usuario ---------------------------------------------------"
Cuerpo += "<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>"
Cuerpo += "<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>"
Cuerpo += "<li> Formulario para cambiar el título de su página personal.</li>"
Cuerpo += "<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>"
Cuerpo += "<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>"
Cuerpo += "------------------------------------ Pie de pagina ---------------------------------------------------"
Cuerpo += "<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>"
Cuerpo += "<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>"
Cuerpo += "------------------------------------ Página XML de un usuario ---------------------------------------------------"
Cuerpo += "<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>"
c = Context({'lista': Cuerpo})
renderizado = template.render(c)
return HttpResponse(renderizado)
|
normal
|
{
"blob_id": "e982fd5bed540b836fd4e2caaec033d8cbfb0e4f",
"index": 9854,
"step-1": "<mask token>\n\n\n@csrf_exempt\ndef login_form(request):\n formulario = '<form action=\"login\" method=\"POST\">'\n formulario += 'Nombre<br><input type=\"text\" name=\"Usuario\"><br>'\n formulario += 'Contraseña<br><input type=\"password\" name=\"Password\"><br>'\n formulario += '<br><input type=\"submit\" value=\"Entrar\"></form>'\n return formulario\n\n\n<mask token>\n\n\ndef lista_megustas():\n lista_todos = Aparcamiento.objects.all()\n lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]\n Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'\n Existe = False\n for i in lista_ordenada:\n megustas = i.contador_megusta\n if megustas != 0:\n Response += ('<li><a href=' + i.content_url + '>' + i.nombre +\n '<br></a>')\n Response += ('Dirección: ' + i.clase_vial + ' ' + i.\n localizacion + ', nº ' + str(i.num))\n Response += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.entidad + '>' + 'Más información<br></a><br>')\n Existe = True\n if Existe == False:\n Response += (\n 'Aún no se han registrado comentarios para ningún aparcamiento')\n Response += '</br></br>'\n return Response\n\n\n<mask token>\n\n\ndef lista_aparcamientos():\n lista = ''\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento + '<a href=\"' +\n url_aparcamiento + '\">\\t--> Más información</a></p></li>')\n return lista\n\n\ndef aparcamientos_seleccionados(user, request):\n user_object = User.objects.get(username=user)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n paginator = Paginator(lista_seleccionados, 5)\n page = request.GET.get('page')\n try:\n seleccionados = paginator.page(page)\n except PageNotAnInteger:\n seleccionados = paginator.page(1)\n except EmptyPage:\n seleccionados = paginator.page(paginator.num_pages)\n lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'\n for i in seleccionados:\n lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)\n lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i\n .aparcamiento.nombre + '<br></a>')\n lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.\n aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))\n lista += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.aparcamiento.entidad + '>' + 'Más información</a><br>')\n except ObjectDoesNotExist:\n lista = 'El usuario aún no ha seleccionado ningún aparcamiento'\n seleccionados = ''\n return lista, seleccionados\n\n\ndef accesibles(value):\n accesibles = '<form action=\"\" method=\"POST\">'\n accesibles += '<button type=\"submit\" name=\"Accesible\" value=\"' + str(value\n ) + '\"> Accesibles</button></form>'\n return accesibles\n\n\n@csrf_exempt\ndef pagina_principal(request):\n formulario = login_form(request)\n list_megustas = lista_megustas()\n users = paginas_personales()\n value = 1\n accesible = accesibles(value)\n template = get_template('index.html')\n if request.user.is_authenticated():\n username = str(request.user)\n formulario = 'Bienvenido ' + username\n formulario += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Accesible':\n value = request.POST['Accesible']\n if value == '1':\n lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n value = 0\n for i in lista_accesibles:\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista += ('<li><p>' + nombre_aparcamiento +\n '</p><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n else:\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento +\n '. URL del aparcamiento: ' +\n '<a href=\"aparcamientos/' + url_aparcamiento +\n '\">\\t⇾ Más información</a></br></p>')\n value = 1\n accesible = accesibles(value)\n c = Context({'login': formulario, 'list_users': lista,\n 'accesible': accesible})\n else:\n init = Aparcamiento.objects.all()\n if len(init) == 0:\n get_data()\n c = Context({'login': formulario, 'list': list_megustas,\n 'list_users': users, 'accesible': accesible})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef aparcamientos(request):\n lista = lista_aparcamientos()\n filtrar = '<form action=\"\" method=\"POST\">'\n filtrar += '<br><br><input type=\"text\" name=\"distrito\">'\n filtrar += '<input type=\"submit\" value=\"Filtrar por distrito\">'\n template = get_template('aparcamientos.html')\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n if request.method == 'POST':\n filtro_distrito = request.POST['distrito']\n filtro_distrito = filtro_distrito.upper()\n if filtro_distrito == '':\n lista_filtrada = (\n 'No ha introducido ningún filtro, introduzca distrito para filtrar '\n + lista)\n else:\n aparcamientos_filtrados = Aparcamiento.objects.all()\n Encontrado = False\n lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +\n ' son: ')\n for i in aparcamientos_filtrados:\n if filtro_distrito == i.distrito:\n Encontrado = True\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista_filtrada += ('<p>' + nombre_aparcamiento +\n '</p><li><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n if Encontrado == False:\n lista_filtrada = ('Introduzca un nuevo distrito. ' +\n filtro_distrito + ' no es válido')\n c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':\n form_user})\n else:\n c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n@csrf_exempt\ndef aparcamientos_id(request, recurso):\n template = get_template('aparcamientos.html')\n num_megustas = 0\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n print(key)\n if key == 'Me+Gusta':\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1\n aparcamiento.save()\n num_megustas = aparcamiento.contador_megusta\n else:\n coment = request.POST['Comentario']\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_coments = aparcamiento.contador_coments + 1\n aparcamiento.save()\n p = Comentario(aparcamiento=aparcamiento, coment=coment)\n p.save()\n try:\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n nombre = aparcamiento.nombre\n descripcion = aparcamiento.descripcion\n accesibilidad = aparcamiento.accesibilidad\n localizacion = aparcamiento.localizacion\n via = aparcamiento.clase_vial\n num = aparcamiento.num\n localidad = aparcamiento.localidad\n provincia = aparcamiento.provincia\n codigo_postal = aparcamiento.codigo_postal\n barrio = aparcamiento.barrio\n distrito = aparcamiento.distrito\n coordenada_x = aparcamiento.coordenada_x\n coordenada_y = aparcamiento.coordenada_y\n telefono = aparcamiento.telefono\n email = aparcamiento.email\n if telefono == '':\n telefono = 'No disponible'\n if email == '':\n email = 'No disponible'\n if accesibilidad == 1:\n acces = 'Libre'\n else:\n acces = 'Ocupado'\n lista_aparcamientos = Aparcamiento.objects.all()\n list_coments = ''\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n num_megustas = aparcamiento.contador_megusta\n for i in lista_aparcamientos:\n if i.entidad == recurso:\n comentarios = Comentario.objects.filter(aparcamiento=i)\n if len(comentarios) != 0:\n list_coments = '<li><p>COMENTARIOS</p><ol>'\n for j in comentarios:\n list_coments += '<li>' + j.coment + '<br>'\n Response = (\n '<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +\n recurso + '</br></p>')\n Response += ('<a href=' + i.content_url + '>' + i.nombre +\n '</a><br>')\n Response += 'Descripción: ' + descripcion + '</br>'\n Response += 'Accesibilidad: ' + acces + '</br>'\n Response += ('Localización: ' + via + ' ' + localizacion +\n ', nº ' + str(num))\n Response += ' ' + localidad + ' (' + str(codigo_postal\n ) + ')</br>'\n Response += ('Ubicación: ' + barrio + ' ' + distrito +\n ' Coordenadas: ' + str(coordenada_x) + ' , ' + str(\n coordenada_y) + '<br><br>')\n Response += 'INFORMACIÓN DE CONTACTO </br>'\n Response += 'Teléfono: ' + telefono + '</br>'\n Response += ('Email: ' + email + '</br>' + list_coments +\n '</ol>')\n if num_megustas != 0:\n Response += '</br><li>Numero de me gustas es: ' + str(\n num_megustas) + '<br>'\n else:\n Response += (\n '</br><li>Se el primero en indicar que te gusta la página<br>'\n )\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Puede introducir un comentario si lo desea ' + str(\n request.user) + '<br><input type=\"text\" name=\"Comentario\">')\n formulario += '<input type=\"submit\" value=\"Comentar\"></form>'\n Response += formulario\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n megusta = ''\n megusta += '<br> Indica que te gusta este aparcamiento</br>'\n megusta += '<form action=\"\" method=\"POST\">'\n megusta += (\n '<button type=\"submit\" name=\"Me Gusta\" value=\"Me Gusta\"> +1 </button></form>'\n )\n Response += megusta\n except ObjectDoesNotExist:\n Response = 'Este id no se corresponde con ningún aparcamiento'\n c = Context({'lista': Response, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\n@csrf_exempt\ndef login_form(request):\n formulario = '<form action=\"login\" method=\"POST\">'\n formulario += 'Nombre<br><input type=\"text\" name=\"Usuario\"><br>'\n formulario += 'Contraseña<br><input type=\"password\" name=\"Password\"><br>'\n formulario += '<br><input type=\"submit\" value=\"Entrar\"></form>'\n return formulario\n\n\n@csrf_exempt\ndef loginuser(request):\n username = request.POST['Usuario']\n password = request.POST['Password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n direcc = '/' + str(user)\n return redirect(direcc)\n else:\n Error = 'Por favor, introduzca un usuario y contraseña válidos'\n template = get_template('fail.html')\n c = Context({'Error': Error})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef lista_megustas():\n lista_todos = Aparcamiento.objects.all()\n lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]\n Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'\n Existe = False\n for i in lista_ordenada:\n megustas = i.contador_megusta\n if megustas != 0:\n Response += ('<li><a href=' + i.content_url + '>' + i.nombre +\n '<br></a>')\n Response += ('Dirección: ' + i.clase_vial + ' ' + i.\n localizacion + ', nº ' + str(i.num))\n Response += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.entidad + '>' + 'Más información<br></a><br>')\n Existe = True\n if Existe == False:\n Response += (\n 'Aún no se han registrado comentarios para ningún aparcamiento')\n Response += '</br></br>'\n return Response\n\n\n<mask token>\n\n\ndef lista_aparcamientos():\n lista = ''\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento + '<a href=\"' +\n url_aparcamiento + '\">\\t--> Más información</a></p></li>')\n return lista\n\n\ndef aparcamientos_seleccionados(user, request):\n user_object = User.objects.get(username=user)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n paginator = Paginator(lista_seleccionados, 5)\n page = request.GET.get('page')\n try:\n seleccionados = paginator.page(page)\n except PageNotAnInteger:\n seleccionados = paginator.page(1)\n except EmptyPage:\n seleccionados = paginator.page(paginator.num_pages)\n lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'\n for i in seleccionados:\n lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)\n lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i\n .aparcamiento.nombre + '<br></a>')\n lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.\n aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))\n lista += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.aparcamiento.entidad + '>' + 'Más información</a><br>')\n except ObjectDoesNotExist:\n lista = 'El usuario aún no ha seleccionado ningún aparcamiento'\n seleccionados = ''\n return lista, seleccionados\n\n\ndef accesibles(value):\n accesibles = '<form action=\"\" method=\"POST\">'\n accesibles += '<button type=\"submit\" name=\"Accesible\" value=\"' + str(value\n ) + '\"> Accesibles</button></form>'\n return accesibles\n\n\n@csrf_exempt\ndef pagina_principal(request):\n formulario = login_form(request)\n list_megustas = lista_megustas()\n users = paginas_personales()\n value = 1\n accesible = accesibles(value)\n template = get_template('index.html')\n if request.user.is_authenticated():\n username = str(request.user)\n formulario = 'Bienvenido ' + username\n formulario += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Accesible':\n value = request.POST['Accesible']\n if value == '1':\n lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n value = 0\n for i in lista_accesibles:\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista += ('<li><p>' + nombre_aparcamiento +\n '</p><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n else:\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento +\n '. URL del aparcamiento: ' +\n '<a href=\"aparcamientos/' + url_aparcamiento +\n '\">\\t⇾ Más información</a></br></p>')\n value = 1\n accesible = accesibles(value)\n c = Context({'login': formulario, 'list_users': lista,\n 'accesible': accesible})\n else:\n init = Aparcamiento.objects.all()\n if len(init) == 0:\n get_data()\n c = Context({'login': formulario, 'list': list_megustas,\n 'list_users': users, 'accesible': accesible})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef usuarios(request, peticion):\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Introduzca un título nuevo a su página personal<br><input type=\"text\" name=\"Titulo\">'\n )\n formulario += '<input type=\"submit\" value=\" Actualizar\"></form>'\n css = '<form action=\"\" method=\"POST\">'\n css += 'Modifique el tamaño de letra<br><input type=\"text\" name=\"Letra\">'\n css += (\n '<br><br>Modifique el color de letra\\t<input type=\"color\" name=\"Color\"><br>'\n )\n css += '<br><input type=\"submit\" value=\"Modificar\"></form>'\n aparcamientos = Aparcamiento.objects.all()\n lista = '<br>LISTADO DE APARCAMIENTOS<br><br>'\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n lista += nombre_aparcamiento\n lista += '<form action=\"\" method=\"POST\">'\n lista += ('<button type=\"submit\" name=\"Seleccionar\" value=\"' +\n nombre_aparcamiento + '\">Seleccionar</button><br></form>')\n user_object = User.objects.get(username=peticion)\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Titulo':\n titulo = request.POST['Titulo']\n try:\n user = Usuario.objects.get(nombre=user_object)\n user.titulo_pagina = titulo\n user.save()\n except ObjectDoesNotExist:\n p = Usuario(nombre=user_object, titulo_pagina=titulo)\n p.save()\n elif key == 'Seleccionar':\n nombre_aparcamiento = request.POST['Seleccionar']\n today = datetime.datetime.today()\n try:\n selector = Usuario.objects.get(nombre=user_object)\n aparcamiento = Aparcamiento.objects.get(nombre=\n nombre_aparcamiento)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n selector = Usuario.objects.get(nombre=user_object)\n Check = False\n lista_usuario = Seleccionados.objects.filter(selector=selector)\n for i in lista_usuario:\n if nombre_aparcamiento == i.aparcamiento.nombre:\n Check = True\n if Check == False:\n p = Seleccionados(aparcamiento=aparcamiento, selector=\n selector, fecha_seleccion=today)\n p.save()\n elif key == 'Letra':\n letra = request.POST['Letra']\n color = request.POST['Color']\n try:\n user = Usuario.objects.get(nombre=user_object)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n user = Usuario.objects.get(nombre=user_object)\n if letra == '':\n letra = '15'\n user.letra = letra\n user.color = color\n user.save()\n lista_seleccionados, seleccionados = aparcamientos_seleccionados(peticion,\n request)\n if request.user.is_authenticated():\n username = str(request.user)\n if peticion != username:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'titulo': titulo_pagina,\n 'login': form_user})\n else:\n template = get_template('privateuser.html')\n try:\n titulo_pagina = Usuario.objects.get(nombre=user_object\n ).titulo_pagina\n except ObjectDoesNotExist:\n titulo_pagina = 'Página personal de ' + str(request.user\n ) + '<br><br>'\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'lista': lista, 'form':\n formulario, 'css': css, 'titulo': titulo_pagina})\n else:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Para loguearse vaya al botón de Inicio'\n c = Context({'lista_selecc': lista_seleccionados, 'seleccionados':\n seleccionados, 'titulo': titulo_pagina, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef personalizar(request):\n if request.user.is_authenticated():\n user_object = User.objects.get(username=request.user)\n user = Usuario.objects.get(nombre=user_object)\n letra = user.letra\n color = user.color\n else:\n letra = '14px'\n color = '#FCFCFC'\n css = get_template('change.css')\n c = Context({'letra': letra, 'color': color})\n renderizado = css.render(c)\n return HttpResponse(renderizado, content_type='text/css')\n\n\n<mask token>\n\n\n@csrf_exempt\ndef aparcamientos(request):\n lista = lista_aparcamientos()\n filtrar = '<form action=\"\" method=\"POST\">'\n filtrar += '<br><br><input type=\"text\" name=\"distrito\">'\n filtrar += '<input type=\"submit\" value=\"Filtrar por distrito\">'\n template = get_template('aparcamientos.html')\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n if request.method == 'POST':\n filtro_distrito = request.POST['distrito']\n filtro_distrito = filtro_distrito.upper()\n if filtro_distrito == '':\n lista_filtrada = (\n 'No ha introducido ningún filtro, introduzca distrito para filtrar '\n + lista)\n else:\n aparcamientos_filtrados = Aparcamiento.objects.all()\n Encontrado = False\n lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +\n ' son: ')\n for i in aparcamientos_filtrados:\n if filtro_distrito == i.distrito:\n Encontrado = True\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista_filtrada += ('<p>' + nombre_aparcamiento +\n '</p><li><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n if Encontrado == False:\n lista_filtrada = ('Introduzca un nuevo distrito. ' +\n filtro_distrito + ' no es válido')\n c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':\n form_user})\n else:\n c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n@csrf_exempt\ndef aparcamientos_id(request, recurso):\n template = get_template('aparcamientos.html')\n num_megustas = 0\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n print(key)\n if key == 'Me+Gusta':\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1\n aparcamiento.save()\n num_megustas = aparcamiento.contador_megusta\n else:\n coment = request.POST['Comentario']\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_coments = aparcamiento.contador_coments + 1\n aparcamiento.save()\n p = Comentario(aparcamiento=aparcamiento, coment=coment)\n p.save()\n try:\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n nombre = aparcamiento.nombre\n descripcion = aparcamiento.descripcion\n accesibilidad = aparcamiento.accesibilidad\n localizacion = aparcamiento.localizacion\n via = aparcamiento.clase_vial\n num = aparcamiento.num\n localidad = aparcamiento.localidad\n provincia = aparcamiento.provincia\n codigo_postal = aparcamiento.codigo_postal\n barrio = aparcamiento.barrio\n distrito = aparcamiento.distrito\n coordenada_x = aparcamiento.coordenada_x\n coordenada_y = aparcamiento.coordenada_y\n telefono = aparcamiento.telefono\n email = aparcamiento.email\n if telefono == '':\n telefono = 'No disponible'\n if email == '':\n email = 'No disponible'\n if accesibilidad == 1:\n acces = 'Libre'\n else:\n acces = 'Ocupado'\n lista_aparcamientos = Aparcamiento.objects.all()\n list_coments = ''\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n num_megustas = aparcamiento.contador_megusta\n for i in lista_aparcamientos:\n if i.entidad == recurso:\n comentarios = Comentario.objects.filter(aparcamiento=i)\n if len(comentarios) != 0:\n list_coments = '<li><p>COMENTARIOS</p><ol>'\n for j in comentarios:\n list_coments += '<li>' + j.coment + '<br>'\n Response = (\n '<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +\n recurso + '</br></p>')\n Response += ('<a href=' + i.content_url + '>' + i.nombre +\n '</a><br>')\n Response += 'Descripción: ' + descripcion + '</br>'\n Response += 'Accesibilidad: ' + acces + '</br>'\n Response += ('Localización: ' + via + ' ' + localizacion +\n ', nº ' + str(num))\n Response += ' ' + localidad + ' (' + str(codigo_postal\n ) + ')</br>'\n Response += ('Ubicación: ' + barrio + ' ' + distrito +\n ' Coordenadas: ' + str(coordenada_x) + ' , ' + str(\n coordenada_y) + '<br><br>')\n Response += 'INFORMACIÓN DE CONTACTO </br>'\n Response += 'Teléfono: ' + telefono + '</br>'\n Response += ('Email: ' + email + '</br>' + list_coments +\n '</ol>')\n if num_megustas != 0:\n Response += '</br><li>Numero de me gustas es: ' + str(\n num_megustas) + '<br>'\n else:\n Response += (\n '</br><li>Se el primero en indicar que te gusta la página<br>'\n )\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Puede introducir un comentario si lo desea ' + str(\n request.user) + '<br><input type=\"text\" name=\"Comentario\">')\n formulario += '<input type=\"submit\" value=\"Comentar\"></form>'\n Response += formulario\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n megusta = ''\n megusta += '<br> Indica que te gusta este aparcamiento</br>'\n megusta += '<form action=\"\" method=\"POST\">'\n megusta += (\n '<button type=\"submit\" name=\"Me Gusta\" value=\"Me Gusta\"> +1 </button></form>'\n )\n Response += megusta\n except ObjectDoesNotExist:\n Response = 'Este id no se corresponde con ningún aparcamiento'\n c = Context({'lista': Response, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef about(request):\n template = get_template('about.html')\n Cuerpo = 'DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>'\n Cuerpo += (\n '------------------------------------ Página principal ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>'\n )\n Cuerpo += (\n \"<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>\"\n )\n Cuerpo += (\n '<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>'\n )\n Cuerpo += (\n '<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página con los aparcamientos ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>\"\n )\n Cuerpo += (\n \"<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>\"\n )\n Cuerpo += (\n '<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz pública de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz privada de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>'\n )\n Cuerpo += (\n \"<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>\"\n )\n Cuerpo += (\n '<li> Formulario para cambiar el título de su página personal.</li>')\n Cuerpo += (\n '<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>'\n )\n Cuerpo += (\n \"<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>\"\n )\n Cuerpo += (\n '------------------------------------ Pie de pagina ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página XML de un usuario ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>\"\n )\n c = Context({'lista': Cuerpo})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n",
"step-3": "<mask token>\n\n\n@csrf_exempt\ndef login_form(request):\n formulario = '<form action=\"login\" method=\"POST\">'\n formulario += 'Nombre<br><input type=\"text\" name=\"Usuario\"><br>'\n formulario += 'Contraseña<br><input type=\"password\" name=\"Password\"><br>'\n formulario += '<br><input type=\"submit\" value=\"Entrar\"></form>'\n return formulario\n\n\n@csrf_exempt\ndef loginuser(request):\n username = request.POST['Usuario']\n password = request.POST['Password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n direcc = '/' + str(user)\n return redirect(direcc)\n else:\n Error = 'Por favor, introduzca un usuario y contraseña válidos'\n template = get_template('fail.html')\n c = Context({'Error': Error})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef lista_megustas():\n lista_todos = Aparcamiento.objects.all()\n lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]\n Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'\n Existe = False\n for i in lista_ordenada:\n megustas = i.contador_megusta\n if megustas != 0:\n Response += ('<li><a href=' + i.content_url + '>' + i.nombre +\n '<br></a>')\n Response += ('Dirección: ' + i.clase_vial + ' ' + i.\n localizacion + ', nº ' + str(i.num))\n Response += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.entidad + '>' + 'Más información<br></a><br>')\n Existe = True\n if Existe == False:\n Response += (\n 'Aún no se han registrado comentarios para ningún aparcamiento')\n Response += '</br></br>'\n return Response\n\n\n<mask token>\n\n\ndef lista_aparcamientos():\n lista = ''\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento + '<a href=\"' +\n url_aparcamiento + '\">\\t--> Más información</a></p></li>')\n return lista\n\n\ndef aparcamientos_seleccionados(user, request):\n user_object = User.objects.get(username=user)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n paginator = Paginator(lista_seleccionados, 5)\n page = request.GET.get('page')\n try:\n seleccionados = paginator.page(page)\n except PageNotAnInteger:\n seleccionados = paginator.page(1)\n except EmptyPage:\n seleccionados = paginator.page(paginator.num_pages)\n lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'\n for i in seleccionados:\n lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)\n lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i\n .aparcamiento.nombre + '<br></a>')\n lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.\n aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))\n lista += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.aparcamiento.entidad + '>' + 'Más información</a><br>')\n except ObjectDoesNotExist:\n lista = 'El usuario aún no ha seleccionado ningún aparcamiento'\n seleccionados = ''\n return lista, seleccionados\n\n\ndef accesibles(value):\n accesibles = '<form action=\"\" method=\"POST\">'\n accesibles += '<button type=\"submit\" name=\"Accesible\" value=\"' + str(value\n ) + '\"> Accesibles</button></form>'\n return accesibles\n\n\n@csrf_exempt\ndef pagina_principal(request):\n formulario = login_form(request)\n list_megustas = lista_megustas()\n users = paginas_personales()\n value = 1\n accesible = accesibles(value)\n template = get_template('index.html')\n if request.user.is_authenticated():\n username = str(request.user)\n formulario = 'Bienvenido ' + username\n formulario += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Accesible':\n value = request.POST['Accesible']\n if value == '1':\n lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n value = 0\n for i in lista_accesibles:\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista += ('<li><p>' + nombre_aparcamiento +\n '</p><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n else:\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento +\n '. URL del aparcamiento: ' +\n '<a href=\"aparcamientos/' + url_aparcamiento +\n '\">\\t⇾ Más información</a></br></p>')\n value = 1\n accesible = accesibles(value)\n c = Context({'login': formulario, 'list_users': lista,\n 'accesible': accesible})\n else:\n init = Aparcamiento.objects.all()\n if len(init) == 0:\n get_data()\n c = Context({'login': formulario, 'list': list_megustas,\n 'list_users': users, 'accesible': accesible})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n<mask token>\n\n\n@csrf_exempt\ndef usuarios(request, peticion):\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Introduzca un título nuevo a su página personal<br><input type=\"text\" name=\"Titulo\">'\n )\n formulario += '<input type=\"submit\" value=\" Actualizar\"></form>'\n css = '<form action=\"\" method=\"POST\">'\n css += 'Modifique el tamaño de letra<br><input type=\"text\" name=\"Letra\">'\n css += (\n '<br><br>Modifique el color de letra\\t<input type=\"color\" name=\"Color\"><br>'\n )\n css += '<br><input type=\"submit\" value=\"Modificar\"></form>'\n aparcamientos = Aparcamiento.objects.all()\n lista = '<br>LISTADO DE APARCAMIENTOS<br><br>'\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n lista += nombre_aparcamiento\n lista += '<form action=\"\" method=\"POST\">'\n lista += ('<button type=\"submit\" name=\"Seleccionar\" value=\"' +\n nombre_aparcamiento + '\">Seleccionar</button><br></form>')\n user_object = User.objects.get(username=peticion)\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Titulo':\n titulo = request.POST['Titulo']\n try:\n user = Usuario.objects.get(nombre=user_object)\n user.titulo_pagina = titulo\n user.save()\n except ObjectDoesNotExist:\n p = Usuario(nombre=user_object, titulo_pagina=titulo)\n p.save()\n elif key == 'Seleccionar':\n nombre_aparcamiento = request.POST['Seleccionar']\n today = datetime.datetime.today()\n try:\n selector = Usuario.objects.get(nombre=user_object)\n aparcamiento = Aparcamiento.objects.get(nombre=\n nombre_aparcamiento)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n selector = Usuario.objects.get(nombre=user_object)\n Check = False\n lista_usuario = Seleccionados.objects.filter(selector=selector)\n for i in lista_usuario:\n if nombre_aparcamiento == i.aparcamiento.nombre:\n Check = True\n if Check == False:\n p = Seleccionados(aparcamiento=aparcamiento, selector=\n selector, fecha_seleccion=today)\n p.save()\n elif key == 'Letra':\n letra = request.POST['Letra']\n color = request.POST['Color']\n try:\n user = Usuario.objects.get(nombre=user_object)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n user = Usuario.objects.get(nombre=user_object)\n if letra == '':\n letra = '15'\n user.letra = letra\n user.color = color\n user.save()\n lista_seleccionados, seleccionados = aparcamientos_seleccionados(peticion,\n request)\n if request.user.is_authenticated():\n username = str(request.user)\n if peticion != username:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'titulo': titulo_pagina,\n 'login': form_user})\n else:\n template = get_template('privateuser.html')\n try:\n titulo_pagina = Usuario.objects.get(nombre=user_object\n ).titulo_pagina\n except ObjectDoesNotExist:\n titulo_pagina = 'Página personal de ' + str(request.user\n ) + '<br><br>'\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'lista': lista, 'form':\n formulario, 'css': css, 'titulo': titulo_pagina})\n else:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Para loguearse vaya al botón de Inicio'\n c = Context({'lista_selecc': lista_seleccionados, 'seleccionados':\n seleccionados, 'titulo': titulo_pagina, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef personalizar(request):\n if request.user.is_authenticated():\n user_object = User.objects.get(username=request.user)\n user = Usuario.objects.get(nombre=user_object)\n letra = user.letra\n color = user.color\n else:\n letra = '14px'\n color = '#FCFCFC'\n css = get_template('change.css')\n c = Context({'letra': letra, 'color': color})\n renderizado = css.render(c)\n return HttpResponse(renderizado, content_type='text/css')\n\n\ndef usuarios_xml(request, peticion):\n user_object = User.objects.get(username=peticion)\n doc = Document()\n cont = doc.createElement('Contenidos')\n doc.appendChild(cont)\n info = doc.createElement('infoDataset')\n cont.appendChild(info)\n nombre = doc.createElement('Nombre')\n info.appendChild(nombre)\n ptext = doc.createTextNode(\n 'XML de aparcamientos seleccionados por el usuario ' + peticion)\n nombre.appendChild(ptext)\n url = doc.createElement('url')\n info.appendChild(url)\n ptext = doc.createTextNode('http://localhost:1234/' + peticion + '/xml/')\n url.appendChild(ptext)\n aparc = doc.createElement('Aparcamientos')\n cont.appendChild(aparc)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n for i in lista_seleccionados:\n item = doc.createElement('Contenido')\n aparc.appendChild(item)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'ID-ENTIDAD')\n ptext = doc.createTextNode(i.aparcamiento.entidad)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'NOMBRE')\n ptext = doc.createTextNode(i.aparcamiento.nombre)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'DESCRIPCION')\n ptext = doc.createTextNode(i.aparcamiento.descripcion)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'ACCESIBILIDAD')\n if i.aparcamiento.accesibilidad == True:\n acces = 1\n else:\n acces = 0\n ptext = doc.createTextNode(str(acces))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CONTENT_URL')\n ptext = doc.createTextNode(i.aparcamiento.content_url)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'LOCALIZACION')\n ptext = doc.createTextNode(i.aparcamiento.localizacion)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CLASE VIAL')\n ptext = doc.createTextNode(i.aparcamiento.clase_vial)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'TIPO NUM')\n ptext = doc.createTextNode(i.aparcamiento.tipo_num)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'NUM')\n ptext = doc.createTextNode(str(i.aparcamiento.num))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'LOCALIDAD')\n ptext = doc.createTextNode(i.aparcamiento.localidad)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'PROVINCIA')\n ptext = doc.createTextNode(i.aparcamiento.provincia)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CODIGO POSTAL')\n ptext = doc.createTextNode(str(i.aparcamiento.codigo_postal))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'BARRIO')\n ptext = doc.createTextNode(i.aparcamiento.barrio)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'DISTRITO')\n ptext = doc.createTextNode(i.aparcamiento.distrito)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'COORDENADA X')\n ptext = doc.createTextNode(str(i.aparcamiento.coordenada_x))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'COORDENADA Y')\n ptext = doc.createTextNode(str(i.aparcamiento.coordenada_y))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n datos = doc.createElement('DATOSDECONTACTO')\n item.appendChild(datos)\n atributo = doc.createElement('atributo')\n datos.appendChild(atributo)\n atributo.setAttribute('nombre', 'TELEFONO')\n ptext = doc.createTextNode(i.aparcamiento.telefono)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n datos.appendChild(atributo)\n atributo.setAttribute('nombre', 'EMAIL')\n ptext = doc.createTextNode(i.aparcamiento.email)\n atributo.appendChild(ptext)\n except:\n print('')\n xml = doc.toprettyxml(indent=' ')\n return HttpResponse(xml, content_type='text/xml')\n\n\n@csrf_exempt\ndef aparcamientos(request):\n lista = lista_aparcamientos()\n filtrar = '<form action=\"\" method=\"POST\">'\n filtrar += '<br><br><input type=\"text\" name=\"distrito\">'\n filtrar += '<input type=\"submit\" value=\"Filtrar por distrito\">'\n template = get_template('aparcamientos.html')\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n if request.method == 'POST':\n filtro_distrito = request.POST['distrito']\n filtro_distrito = filtro_distrito.upper()\n if filtro_distrito == '':\n lista_filtrada = (\n 'No ha introducido ningún filtro, introduzca distrito para filtrar '\n + lista)\n else:\n aparcamientos_filtrados = Aparcamiento.objects.all()\n Encontrado = False\n lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +\n ' son: ')\n for i in aparcamientos_filtrados:\n if filtro_distrito == i.distrito:\n Encontrado = True\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista_filtrada += ('<p>' + nombre_aparcamiento +\n '</p><li><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n if Encontrado == False:\n lista_filtrada = ('Introduzca un nuevo distrito. ' +\n filtro_distrito + ' no es válido')\n c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':\n form_user})\n else:\n c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n@csrf_exempt\ndef aparcamientos_id(request, recurso):\n template = get_template('aparcamientos.html')\n num_megustas = 0\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n print(key)\n if key == 'Me+Gusta':\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1\n aparcamiento.save()\n num_megustas = aparcamiento.contador_megusta\n else:\n coment = request.POST['Comentario']\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_coments = aparcamiento.contador_coments + 1\n aparcamiento.save()\n p = Comentario(aparcamiento=aparcamiento, coment=coment)\n p.save()\n try:\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n nombre = aparcamiento.nombre\n descripcion = aparcamiento.descripcion\n accesibilidad = aparcamiento.accesibilidad\n localizacion = aparcamiento.localizacion\n via = aparcamiento.clase_vial\n num = aparcamiento.num\n localidad = aparcamiento.localidad\n provincia = aparcamiento.provincia\n codigo_postal = aparcamiento.codigo_postal\n barrio = aparcamiento.barrio\n distrito = aparcamiento.distrito\n coordenada_x = aparcamiento.coordenada_x\n coordenada_y = aparcamiento.coordenada_y\n telefono = aparcamiento.telefono\n email = aparcamiento.email\n if telefono == '':\n telefono = 'No disponible'\n if email == '':\n email = 'No disponible'\n if accesibilidad == 1:\n acces = 'Libre'\n else:\n acces = 'Ocupado'\n lista_aparcamientos = Aparcamiento.objects.all()\n list_coments = ''\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n num_megustas = aparcamiento.contador_megusta\n for i in lista_aparcamientos:\n if i.entidad == recurso:\n comentarios = Comentario.objects.filter(aparcamiento=i)\n if len(comentarios) != 0:\n list_coments = '<li><p>COMENTARIOS</p><ol>'\n for j in comentarios:\n list_coments += '<li>' + j.coment + '<br>'\n Response = (\n '<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +\n recurso + '</br></p>')\n Response += ('<a href=' + i.content_url + '>' + i.nombre +\n '</a><br>')\n Response += 'Descripción: ' + descripcion + '</br>'\n Response += 'Accesibilidad: ' + acces + '</br>'\n Response += ('Localización: ' + via + ' ' + localizacion +\n ', nº ' + str(num))\n Response += ' ' + localidad + ' (' + str(codigo_postal\n ) + ')</br>'\n Response += ('Ubicación: ' + barrio + ' ' + distrito +\n ' Coordenadas: ' + str(coordenada_x) + ' , ' + str(\n coordenada_y) + '<br><br>')\n Response += 'INFORMACIÓN DE CONTACTO </br>'\n Response += 'Teléfono: ' + telefono + '</br>'\n Response += ('Email: ' + email + '</br>' + list_coments +\n '</ol>')\n if num_megustas != 0:\n Response += '</br><li>Numero de me gustas es: ' + str(\n num_megustas) + '<br>'\n else:\n Response += (\n '</br><li>Se el primero en indicar que te gusta la página<br>'\n )\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Puede introducir un comentario si lo desea ' + str(\n request.user) + '<br><input type=\"text\" name=\"Comentario\">')\n formulario += '<input type=\"submit\" value=\"Comentar\"></form>'\n Response += formulario\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n megusta = ''\n megusta += '<br> Indica que te gusta este aparcamiento</br>'\n megusta += '<form action=\"\" method=\"POST\">'\n megusta += (\n '<button type=\"submit\" name=\"Me Gusta\" value=\"Me Gusta\"> +1 </button></form>'\n )\n Response += megusta\n except ObjectDoesNotExist:\n Response = 'Este id no se corresponde con ningún aparcamiento'\n c = Context({'lista': Response, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef about(request):\n template = get_template('about.html')\n Cuerpo = 'DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>'\n Cuerpo += (\n '------------------------------------ Página principal ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>'\n )\n Cuerpo += (\n \"<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>\"\n )\n Cuerpo += (\n '<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>'\n )\n Cuerpo += (\n '<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página con los aparcamientos ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>\"\n )\n Cuerpo += (\n \"<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>\"\n )\n Cuerpo += (\n '<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz pública de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz privada de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>'\n )\n Cuerpo += (\n \"<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>\"\n )\n Cuerpo += (\n '<li> Formulario para cambiar el título de su página personal.</li>')\n Cuerpo += (\n '<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>'\n )\n Cuerpo += (\n \"<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>\"\n )\n Cuerpo += (\n '------------------------------------ Pie de pagina ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página XML de un usuario ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>\"\n )\n c = Context({'lista': Cuerpo})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n",
"step-4": "<mask token>\n\n\n@csrf_exempt\ndef login_form(request):\n formulario = '<form action=\"login\" method=\"POST\">'\n formulario += 'Nombre<br><input type=\"text\" name=\"Usuario\"><br>'\n formulario += 'Contraseña<br><input type=\"password\" name=\"Password\"><br>'\n formulario += '<br><input type=\"submit\" value=\"Entrar\"></form>'\n return formulario\n\n\n@csrf_exempt\ndef loginuser(request):\n username = request.POST['Usuario']\n password = request.POST['Password']\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n direcc = '/' + str(user)\n return redirect(direcc)\n else:\n Error = 'Por favor, introduzca un usuario y contraseña válidos'\n template = get_template('fail.html')\n c = Context({'Error': Error})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef lista_megustas():\n lista_todos = Aparcamiento.objects.all()\n lista_ordenada = lista_todos.order_by('-contador_megusta')[:5]\n Response = 'LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>'\n Existe = False\n for i in lista_ordenada:\n megustas = i.contador_megusta\n if megustas != 0:\n Response += ('<li><a href=' + i.content_url + '>' + i.nombre +\n '<br></a>')\n Response += ('Dirección: ' + i.clase_vial + ' ' + i.\n localizacion + ', nº ' + str(i.num))\n Response += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.entidad + '>' + 'Más información<br></a><br>')\n Existe = True\n if Existe == False:\n Response += (\n 'Aún no se han registrado comentarios para ningún aparcamiento')\n Response += '</br></br>'\n return Response\n\n\n<mask token>\n\n\ndef lista_aparcamientos():\n lista = ''\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento + '<a href=\"' +\n url_aparcamiento + '\">\\t--> Más información</a></p></li>')\n return lista\n\n\ndef aparcamientos_seleccionados(user, request):\n user_object = User.objects.get(username=user)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n paginator = Paginator(lista_seleccionados, 5)\n page = request.GET.get('page')\n try:\n seleccionados = paginator.page(page)\n except PageNotAnInteger:\n seleccionados = paginator.page(1)\n except EmptyPage:\n seleccionados = paginator.page(paginator.num_pages)\n lista = 'Listado de aparcamientos seleccionados por ' + user + '<br>'\n for i in seleccionados:\n lista += '<br><li>Fecha de selección: ' + str(i.fecha_seleccion)\n lista += ('<br><a href=' + i.aparcamiento.content_url + '>' + i\n .aparcamiento.nombre + '<br></a>')\n lista += ('Dirección: ' + i.aparcamiento.clase_vial + ' ' + i.\n aparcamiento.localizacion + ', nº ' + str(i.aparcamiento.num))\n lista += ('<br><a href=http://localhost:1234/aparcamientos/' +\n i.aparcamiento.entidad + '>' + 'Más información</a><br>')\n except ObjectDoesNotExist:\n lista = 'El usuario aún no ha seleccionado ningún aparcamiento'\n seleccionados = ''\n return lista, seleccionados\n\n\ndef accesibles(value):\n accesibles = '<form action=\"\" method=\"POST\">'\n accesibles += '<button type=\"submit\" name=\"Accesible\" value=\"' + str(value\n ) + '\"> Accesibles</button></form>'\n return accesibles\n\n\n@csrf_exempt\ndef pagina_principal(request):\n formulario = login_form(request)\n list_megustas = lista_megustas()\n users = paginas_personales()\n value = 1\n accesible = accesibles(value)\n template = get_template('index.html')\n if request.user.is_authenticated():\n username = str(request.user)\n formulario = 'Bienvenido ' + username\n formulario += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Accesible':\n value = request.POST['Accesible']\n if value == '1':\n lista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n value = 0\n for i in lista_accesibles:\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista += ('<li><p>' + nombre_aparcamiento +\n '</p><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n else:\n lista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n aparcamientos = Aparcamiento.objects.all()\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n url_aparcamiento = aparcamiento.entidad\n lista += ('<li><p>' + nombre_aparcamiento +\n '. URL del aparcamiento: ' +\n '<a href=\"aparcamientos/' + url_aparcamiento +\n '\">\\t⇾ Más información</a></br></p>')\n value = 1\n accesible = accesibles(value)\n c = Context({'login': formulario, 'list_users': lista,\n 'accesible': accesible})\n else:\n init = Aparcamiento.objects.all()\n if len(init) == 0:\n get_data()\n c = Context({'login': formulario, 'list': list_megustas,\n 'list_users': users, 'accesible': accesible})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef mylogout(request):\n logout(request)\n return redirect('/')\n\n\n@csrf_exempt\ndef usuarios(request, peticion):\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Introduzca un título nuevo a su página personal<br><input type=\"text\" name=\"Titulo\">'\n )\n formulario += '<input type=\"submit\" value=\" Actualizar\"></form>'\n css = '<form action=\"\" method=\"POST\">'\n css += 'Modifique el tamaño de letra<br><input type=\"text\" name=\"Letra\">'\n css += (\n '<br><br>Modifique el color de letra\\t<input type=\"color\" name=\"Color\"><br>'\n )\n css += '<br><input type=\"submit\" value=\"Modificar\"></form>'\n aparcamientos = Aparcamiento.objects.all()\n lista = '<br>LISTADO DE APARCAMIENTOS<br><br>'\n for aparcamiento in aparcamientos:\n nombre_aparcamiento = aparcamiento.nombre\n lista += nombre_aparcamiento\n lista += '<form action=\"\" method=\"POST\">'\n lista += ('<button type=\"submit\" name=\"Seleccionar\" value=\"' +\n nombre_aparcamiento + '\">Seleccionar</button><br></form>')\n user_object = User.objects.get(username=peticion)\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n if key == 'Titulo':\n titulo = request.POST['Titulo']\n try:\n user = Usuario.objects.get(nombre=user_object)\n user.titulo_pagina = titulo\n user.save()\n except ObjectDoesNotExist:\n p = Usuario(nombre=user_object, titulo_pagina=titulo)\n p.save()\n elif key == 'Seleccionar':\n nombre_aparcamiento = request.POST['Seleccionar']\n today = datetime.datetime.today()\n try:\n selector = Usuario.objects.get(nombre=user_object)\n aparcamiento = Aparcamiento.objects.get(nombre=\n nombre_aparcamiento)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n selector = Usuario.objects.get(nombre=user_object)\n Check = False\n lista_usuario = Seleccionados.objects.filter(selector=selector)\n for i in lista_usuario:\n if nombre_aparcamiento == i.aparcamiento.nombre:\n Check = True\n if Check == False:\n p = Seleccionados(aparcamiento=aparcamiento, selector=\n selector, fecha_seleccion=today)\n p.save()\n elif key == 'Letra':\n letra = request.POST['Letra']\n color = request.POST['Color']\n try:\n user = Usuario.objects.get(nombre=user_object)\n except:\n p = Usuario(nombre=user_object)\n p.save()\n user = Usuario.objects.get(nombre=user_object)\n if letra == '':\n letra = '15'\n user.letra = letra\n user.color = color\n user.save()\n lista_seleccionados, seleccionados = aparcamientos_seleccionados(peticion,\n request)\n if request.user.is_authenticated():\n username = str(request.user)\n if peticion != username:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'titulo': titulo_pagina,\n 'login': form_user})\n else:\n template = get_template('privateuser.html')\n try:\n titulo_pagina = Usuario.objects.get(nombre=user_object\n ).titulo_pagina\n except ObjectDoesNotExist:\n titulo_pagina = 'Página personal de ' + str(request.user\n ) + '<br><br>'\n c = Context({'lista_selecc': lista_seleccionados,\n 'seleccionados': seleccionados, 'lista': lista, 'form':\n formulario, 'css': css, 'titulo': titulo_pagina})\n else:\n template = get_template('publicuser.html')\n titulo_pagina = 'Página pública de ' + peticion + '<br><br>'\n form_user = 'Para loguearse vaya al botón de Inicio'\n c = Context({'lista_selecc': lista_seleccionados, 'seleccionados':\n seleccionados, 'titulo': titulo_pagina, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef personalizar(request):\n if request.user.is_authenticated():\n user_object = User.objects.get(username=request.user)\n user = Usuario.objects.get(nombre=user_object)\n letra = user.letra\n color = user.color\n else:\n letra = '14px'\n color = '#FCFCFC'\n css = get_template('change.css')\n c = Context({'letra': letra, 'color': color})\n renderizado = css.render(c)\n return HttpResponse(renderizado, content_type='text/css')\n\n\ndef usuarios_xml(request, peticion):\n user_object = User.objects.get(username=peticion)\n doc = Document()\n cont = doc.createElement('Contenidos')\n doc.appendChild(cont)\n info = doc.createElement('infoDataset')\n cont.appendChild(info)\n nombre = doc.createElement('Nombre')\n info.appendChild(nombre)\n ptext = doc.createTextNode(\n 'XML de aparcamientos seleccionados por el usuario ' + peticion)\n nombre.appendChild(ptext)\n url = doc.createElement('url')\n info.appendChild(url)\n ptext = doc.createTextNode('http://localhost:1234/' + peticion + '/xml/')\n url.appendChild(ptext)\n aparc = doc.createElement('Aparcamientos')\n cont.appendChild(aparc)\n try:\n usuario = Usuario.objects.get(nombre=user_object)\n lista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n for i in lista_seleccionados:\n item = doc.createElement('Contenido')\n aparc.appendChild(item)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'ID-ENTIDAD')\n ptext = doc.createTextNode(i.aparcamiento.entidad)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'NOMBRE')\n ptext = doc.createTextNode(i.aparcamiento.nombre)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'DESCRIPCION')\n ptext = doc.createTextNode(i.aparcamiento.descripcion)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'ACCESIBILIDAD')\n if i.aparcamiento.accesibilidad == True:\n acces = 1\n else:\n acces = 0\n ptext = doc.createTextNode(str(acces))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CONTENT_URL')\n ptext = doc.createTextNode(i.aparcamiento.content_url)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'LOCALIZACION')\n ptext = doc.createTextNode(i.aparcamiento.localizacion)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CLASE VIAL')\n ptext = doc.createTextNode(i.aparcamiento.clase_vial)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'TIPO NUM')\n ptext = doc.createTextNode(i.aparcamiento.tipo_num)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'NUM')\n ptext = doc.createTextNode(str(i.aparcamiento.num))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'LOCALIDAD')\n ptext = doc.createTextNode(i.aparcamiento.localidad)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'PROVINCIA')\n ptext = doc.createTextNode(i.aparcamiento.provincia)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'CODIGO POSTAL')\n ptext = doc.createTextNode(str(i.aparcamiento.codigo_postal))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'BARRIO')\n ptext = doc.createTextNode(i.aparcamiento.barrio)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'DISTRITO')\n ptext = doc.createTextNode(i.aparcamiento.distrito)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'COORDENADA X')\n ptext = doc.createTextNode(str(i.aparcamiento.coordenada_x))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n atributo.setAttribute('nombre', 'COORDENADA Y')\n ptext = doc.createTextNode(str(i.aparcamiento.coordenada_y))\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n item.appendChild(atributo)\n datos = doc.createElement('DATOSDECONTACTO')\n item.appendChild(datos)\n atributo = doc.createElement('atributo')\n datos.appendChild(atributo)\n atributo.setAttribute('nombre', 'TELEFONO')\n ptext = doc.createTextNode(i.aparcamiento.telefono)\n atributo.appendChild(ptext)\n atributo = doc.createElement('atributo')\n datos.appendChild(atributo)\n atributo.setAttribute('nombre', 'EMAIL')\n ptext = doc.createTextNode(i.aparcamiento.email)\n atributo.appendChild(ptext)\n except:\n print('')\n xml = doc.toprettyxml(indent=' ')\n return HttpResponse(xml, content_type='text/xml')\n\n\n@csrf_exempt\ndef aparcamientos(request):\n lista = lista_aparcamientos()\n filtrar = '<form action=\"\" method=\"POST\">'\n filtrar += '<br><br><input type=\"text\" name=\"distrito\">'\n filtrar += '<input type=\"submit\" value=\"Filtrar por distrito\">'\n template = get_template('aparcamientos.html')\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n if request.method == 'POST':\n filtro_distrito = request.POST['distrito']\n filtro_distrito = filtro_distrito.upper()\n if filtro_distrito == '':\n lista_filtrada = (\n 'No ha introducido ningún filtro, introduzca distrito para filtrar '\n + lista)\n else:\n aparcamientos_filtrados = Aparcamiento.objects.all()\n Encontrado = False\n lista_filtrada = ('Los aparcamientos en el ' + filtro_distrito +\n ' son: ')\n for i in aparcamientos_filtrados:\n if filtro_distrito == i.distrito:\n Encontrado = True\n nombre_aparcamiento = i.nombre\n url_aparcamiento = i.content_url\n lista_filtrada += ('<p>' + nombre_aparcamiento +\n '</p><li><a href=' + url_aparcamiento + '>' +\n url_aparcamiento + '</a></li>')\n if Encontrado == False:\n lista_filtrada = ('Introduzca un nuevo distrito. ' +\n filtro_distrito + ' no es válido')\n c = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':\n form_user})\n else:\n c = Context({'distrito': filtrar, 'lista': lista, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\n@csrf_exempt\ndef aparcamientos_id(request, recurso):\n template = get_template('aparcamientos.html')\n num_megustas = 0\n if request.method == 'POST':\n key = request.body.decode('utf-8').split('=')[0]\n print(key)\n if key == 'Me+Gusta':\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1\n aparcamiento.save()\n num_megustas = aparcamiento.contador_megusta\n else:\n coment = request.POST['Comentario']\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_coments = aparcamiento.contador_coments + 1\n aparcamiento.save()\n p = Comentario(aparcamiento=aparcamiento, coment=coment)\n p.save()\n try:\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n nombre = aparcamiento.nombre\n descripcion = aparcamiento.descripcion\n accesibilidad = aparcamiento.accesibilidad\n localizacion = aparcamiento.localizacion\n via = aparcamiento.clase_vial\n num = aparcamiento.num\n localidad = aparcamiento.localidad\n provincia = aparcamiento.provincia\n codigo_postal = aparcamiento.codigo_postal\n barrio = aparcamiento.barrio\n distrito = aparcamiento.distrito\n coordenada_x = aparcamiento.coordenada_x\n coordenada_y = aparcamiento.coordenada_y\n telefono = aparcamiento.telefono\n email = aparcamiento.email\n if telefono == '':\n telefono = 'No disponible'\n if email == '':\n email = 'No disponible'\n if accesibilidad == 1:\n acces = 'Libre'\n else:\n acces = 'Ocupado'\n lista_aparcamientos = Aparcamiento.objects.all()\n list_coments = ''\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n num_megustas = aparcamiento.contador_megusta\n for i in lista_aparcamientos:\n if i.entidad == recurso:\n comentarios = Comentario.objects.filter(aparcamiento=i)\n if len(comentarios) != 0:\n list_coments = '<li><p>COMENTARIOS</p><ol>'\n for j in comentarios:\n list_coments += '<li>' + j.coment + '<br>'\n Response = (\n '<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: ' +\n recurso + '</br></p>')\n Response += ('<a href=' + i.content_url + '>' + i.nombre +\n '</a><br>')\n Response += 'Descripción: ' + descripcion + '</br>'\n Response += 'Accesibilidad: ' + acces + '</br>'\n Response += ('Localización: ' + via + ' ' + localizacion +\n ', nº ' + str(num))\n Response += ' ' + localidad + ' (' + str(codigo_postal\n ) + ')</br>'\n Response += ('Ubicación: ' + barrio + ' ' + distrito +\n ' Coordenadas: ' + str(coordenada_x) + ' , ' + str(\n coordenada_y) + '<br><br>')\n Response += 'INFORMACIÓN DE CONTACTO </br>'\n Response += 'Teléfono: ' + telefono + '</br>'\n Response += ('Email: ' + email + '</br>' + list_coments +\n '</ol>')\n if num_megustas != 0:\n Response += '</br><li>Numero de me gustas es: ' + str(\n num_megustas) + '<br>'\n else:\n Response += (\n '</br><li>Se el primero en indicar que te gusta la página<br>'\n )\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += (\n '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>')\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += (\n '<br>Puede introducir un comentario si lo desea ' + str(\n request.user) + '<br><input type=\"text\" name=\"Comentario\">')\n formulario += '<input type=\"submit\" value=\"Comentar\"></form>'\n Response += formulario\n else:\n form_user = 'Para loguearse vaya al botón de Inicio'\n megusta = ''\n megusta += '<br> Indica que te gusta este aparcamiento</br>'\n megusta += '<form action=\"\" method=\"POST\">'\n megusta += (\n '<button type=\"submit\" name=\"Me Gusta\" value=\"Me Gusta\"> +1 </button></form>'\n )\n Response += megusta\n except ObjectDoesNotExist:\n Response = 'Este id no se corresponde con ningún aparcamiento'\n c = Context({'lista': Response, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\n\ndef about(request):\n template = get_template('about.html')\n Cuerpo = 'DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>'\n Cuerpo += (\n '------------------------------------ Página principal ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>'\n )\n Cuerpo += (\n \"<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>\"\n )\n Cuerpo += (\n '<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>'\n )\n Cuerpo += (\n '<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página con los aparcamientos ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>\"\n )\n Cuerpo += (\n \"<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>\"\n )\n Cuerpo += (\n '<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz pública de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>'\n )\n Cuerpo += (\n '<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Interfaz privada de usuario ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>'\n )\n Cuerpo += (\n \"<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>\"\n )\n Cuerpo += (\n '<li> Formulario para cambiar el título de su página personal.</li>')\n Cuerpo += (\n '<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>'\n )\n Cuerpo += (\n \"<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>\"\n )\n Cuerpo += (\n '------------------------------------ Pie de pagina ---------------------------------------------------'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>'\n )\n Cuerpo += (\n '<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>'\n )\n Cuerpo += (\n '------------------------------------ Página XML de un usuario ---------------------------------------------------'\n )\n Cuerpo += (\n \"<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>\"\n )\n c = Context({'lista': Cuerpo})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n",
"step-5": "from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.http import HttpResponse\nfrom .models import *\nfrom django.contrib.auth import logout, authenticate, login\nfrom django.contrib.auth.decorators import login_required\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.template.context_processors import csrf\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\nimport xml.etree.ElementTree as etree\nfrom xml.dom.minidom import Document, parse\nimport xml.dom.minidom as dom\n\nimport datetime\nimport sys\nfrom .parser import get_data\nfrom django.http import QueryDict\nimport urllib\n\n\n# Create your views here.\n@csrf_exempt\ndef login_form(request):\n\n formulario = '<form action=\"login\" method=\"POST\">'\n formulario += 'Nombre<br><input type=\"text\" name=\"Usuario\"><br>'\n formulario += 'Contraseña<br><input type=\"password\" name=\"Password\"><br>'\n formulario += '<br><input type=\"submit\" value=\"Entrar\"></form>'\n return formulario\n\n@csrf_exempt\ndef loginuser(request):\n\n\tusername = request.POST['Usuario']\n\tpassword = request.POST['Password']\n\tuser = authenticate(username=username, password=password)\n\tif user is not None:\n\t\tlogin(request,user)\n\t\tdirecc = '/' + str(user)\n\t\treturn redirect(direcc)\n\telse:\n\t\tError = \"Por favor, introduzca un usuario y contraseña válidos\"\n\t\ttemplate = get_template(\"fail.html\")\n\t\tc = Context ({'Error': Error})\n\t\trenderizado = template.render(c)\n\t\treturn HttpResponse(renderizado)\n\ndef lista_megustas():\n\n lista_todos = Aparcamiento.objects.all()\n lista_ordenada = lista_todos.order_by(\"-contador_megusta\")[:5]\n Response = \"LISTADO DE APARCAMIENTOS CON MÁS ME GUSTA<br><br>\"\n Existe = False\n for i in lista_ordenada:\n megustas = i.contador_megusta\n #comentarios = Comentario.objects.filter(aparcamiento=i)\n if megustas != 0:\n Response += \"<li><a href=\" + i.content_url + \">\" + i.nombre + \"<br></a>\"\n Response += \"Dirección: \" + i.clase_vial + \" \" + i.localizacion + \", nº \" + str(i.num)\n Response += \"<br><a href=http://localhost:1234/aparcamientos/\" + i.entidad + \">\" + \"Más información<br></a><br>\"\n Existe = True\n if Existe == False:\n Response += \"Aún no se han registrado comentarios para ningún aparcamiento\"\n\n Response += \"</br></br>\"\n return Response\n\ndef paginas_personales():\n\n\tLista = \"PÁGINAS DE USUARIOS<br><br>\"\n\tusuarios = User.objects.all()\n\tfor i in usuarios:\n\t\ttry:\n\t\t\tpagina = Usuario.objects.get(nombre=i.id).titulo_pagina\n\t\texcept ObjectDoesNotExist:\n\t\t\tpagina = \"Página de \" + i.username\n\t\tLista += \"<a href=http://localhost:1234/\" + i.username + \">\" + pagina + \"</a>\tUsuario: \" + i.username + \"<br>\"\n\n\treturn Lista\n\ndef lista_aparcamientos():\n\n\tlista = ''\n\taparcamientos = Aparcamiento.objects.all()\n\tfor aparcamiento in aparcamientos:\n\t\tnombre_aparcamiento = aparcamiento.nombre\n\t\turl_aparcamiento = aparcamiento.entidad\n\t\tlista += '<li><p>' + nombre_aparcamiento + '<a href=\"' + url_aparcamiento + '\">\t--> Más información</a></p></li>'\n\n\treturn lista\n\ndef aparcamientos_seleccionados(user,request):\n\n\tuser_object = User.objects.get(username=user)\n\n\ttry:\n\t\tusuario = Usuario.objects.get(nombre=user_object)\n\t\tlista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n\n\t\tpaginator = Paginator(lista_seleccionados,5)\n\t\tpage = request.GET.get('page')\n\t\ttry:\n\t\t\tseleccionados = paginator.page(page)\n\t\texcept PageNotAnInteger:\n\t\t\t# If page is not an integer, deliver first page.\n\t\t\tseleccionados = paginator.page(1)\n\t\texcept EmptyPage:\n\t\t # If page is out of range (e.g. 9999), deliver last page of results.\n\t\t\tseleccionados = paginator.page(paginator.num_pages)\n\n\t\tlista = \"Listado de aparcamientos seleccionados por \" + user + \"<br>\"\n\n\t\tfor i in seleccionados:\n\t\t\tlista += \"<br><li>Fecha de selección: \" + str(i.fecha_seleccion)\n\t\t\tlista += \"<br><a href=\" + i.aparcamiento.content_url + \">\" + i.aparcamiento.nombre + \"<br></a>\"\n\t\t\tlista += \"Dirección: \" + i.aparcamiento.clase_vial + \" \" + i.aparcamiento.localizacion + \", nº \" + str(i.aparcamiento.num)\n\t\t\tlista += \"<br><a href=http://localhost:1234/aparcamientos/\" + i.aparcamiento.entidad + \">\" + \"Más información</a><br>\"\n\texcept ObjectDoesNotExist:\n\t\tlista = \"El usuario aún no ha seleccionado ningún aparcamiento\"\n\t\tseleccionados = \"\"\n\n\n\treturn lista,seleccionados\n\ndef accesibles(value):\n\taccesibles = '<form action=\"\" method=\"POST\">'\n\taccesibles += '<button type=\"submit\" name=\"Accesible\" value=\"' + str(value) + '\"> Accesibles</button></form>'\n\n\treturn accesibles\n\n@csrf_exempt\ndef pagina_principal(request):\n\n\tformulario = login_form(request)\n\tlist_megustas = lista_megustas()\n\tusers = paginas_personales()\n\n\tvalue = 1\n\taccesible = accesibles(value)\n\n\ttemplate = get_template(\"index.html\")\n\n\tif request.user.is_authenticated():\n\t\tusername = str(request.user)\n\t\tformulario = 'Bienvenido ' + username\n\t\tformulario += '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>'\n\n\tif request.method == 'POST':\n\n\t\tkey = request.body.decode(\"utf-8\").split('=')[0]\n\n\t\tif key == 'Accesible':\n\t\t\tvalue = request.POST['Accesible']\n\n\t\t\tif value == '1':\n\t\t\t\tlista_accesibles = Aparcamiento.objects.filter(accesibilidad=1)\n\t\t\t\tlista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n\t\t\t\tvalue = 0\n\t\t\t\tfor i in lista_accesibles:\n\t\t\t\t\tnombre_aparcamiento = i.nombre\n\t\t\t\t\turl_aparcamiento = i.content_url\n\t\t\t\t\tlista += \"<li><p>\" + nombre_aparcamiento + \"</p><a href=\" + url_aparcamiento + \">\" + url_aparcamiento + \"</a></li>\"\n\t\t\telse:\n\t\t\t\tlista = '<a href=\"http://localhost:1234/\" > Volver </a>'\n\t\t\t\taparcamientos = Aparcamiento.objects.all()\n\t\t\t\tfor aparcamiento in aparcamientos:\n\t\t\t\t\tnombre_aparcamiento = aparcamiento.nombre\n\t\t\t\t\turl_aparcamiento = aparcamiento.entidad\n\t\t\t\t\tlista += '<li><p>' + nombre_aparcamiento + '. URL del aparcamiento: ' + '<a href=\"aparcamientos/' + url_aparcamiento + '\">\t⇾ Más información</a></br></p>'\n\t\t\t\tvalue = 1\n\n\t\t\taccesible = accesibles(value)\n\t\t\tc = Context({'login': formulario, 'list_users':lista, 'accesible': accesible})\n\n\telse:\n\n\t\tinit = Aparcamiento.objects.all()\n\n\t\tif len(init) == 0:\n\t\t\tget_data()\n\n\n\t\tc = Context({'login': formulario, 'list':list_megustas, 'list_users':users, 'accesible': accesible})\n\n\trenderizado = template.render(c)\n\treturn HttpResponse(renderizado)\n\ndef mylogout(request):\n\tlogout(request)\n\treturn redirect(\"/\")\n\n@csrf_exempt\ndef usuarios(request, peticion):\n\n\tformulario = '<form action=\"\" method=\"POST\">'\n\tformulario += '<br>Introduzca un título nuevo a su página personal<br><input type=\"text\" name=\"Titulo\">'\n\tformulario += '<input type=\"submit\" value=\" Actualizar\"></form>'\n\n\tcss = '<form action=\"\" method=\"POST\">'\n\tcss += 'Modifique el tamaño de letra<br><input type=\"text\" name=\"Letra\">'\n\tcss += '<br><br>Modifique el color de letra\t<input type=\"color\" name=\"Color\"><br>'\n\tcss += '<br><input type=\"submit\" value=\"Modificar\"></form>'\n\n\n\taparcamientos = Aparcamiento.objects.all()\n\n\tlista= \"<br>LISTADO DE APARCAMIENTOS<br><br>\"\n\tfor aparcamiento in aparcamientos:\n\t\tnombre_aparcamiento = aparcamiento.nombre\n\t\tlista += nombre_aparcamiento\n\t\tlista += '<form action=\"\" method=\"POST\">'\n\t\tlista += '<button type=\"submit\" name=\"Seleccionar\" value=\"' + nombre_aparcamiento + '\">Seleccionar</button><br></form>'\n\n\tuser_object= User.objects.get(username=peticion)\n\n\tif request.method == 'POST':\n\t\tkey = request.body.decode(\"utf-8\").split('=')[0]\n\t\tif key == \"Titulo\":\n\t\t\ttitulo = request.POST['Titulo']\n\t\t\ttry:\n\t\t\t\tuser = Usuario.objects.get(nombre=user_object)\n\t\t\t\tuser.titulo_pagina = titulo\n\t\t\t\tuser.save()\n\t\t\texcept ObjectDoesNotExist:\n\t\t\t\tp = Usuario(nombre=user_object, titulo_pagina=titulo)\n\t\t\t\tp.save()\n\n\t\telif key == \"Seleccionar\":\n\t\t\tnombre_aparcamiento = request.POST['Seleccionar']\n\t\t\ttoday = datetime.datetime.today()\n\n\n\t\t\ttry:\n\t\t\t\tselector = Usuario.objects.get(nombre=user_object)\n\t\t\t\taparcamiento = Aparcamiento.objects.get(nombre=nombre_aparcamiento)\n\t\t\texcept:\n\t\t\t\tp = Usuario(nombre=user_object)\n\t\t\t\tp.save()\n\t\t\t\tselector = Usuario.objects.get(nombre=user_object)\n\n\n\t\t\tCheck = False\n\t\t\tlista_usuario = Seleccionados.objects.filter(selector=selector)\n\t\t\tfor i in lista_usuario:\n\t\t\t\tif\tnombre_aparcamiento == i.aparcamiento.nombre:\n\t\t\t\t\tCheck=True\n\n\t\t\tif Check == False:\n\t\t\t\tp = Seleccionados(aparcamiento=aparcamiento, selector=selector, fecha_seleccion=today)\n\t\t\t\tp.save()\n\n\t\telif key == \"Letra\":\n\t\t\tletra = request.POST['Letra']\n\t\t\tcolor = request.POST['Color']\n\n\t\t\ttry:\n\t\t\t\tuser = Usuario.objects.get(nombre=user_object)\n\t\t\texcept:\n\t\t\t\tp = Usuario(nombre=user_object)\n\t\t\t\tp.save()\n\t\t\t\tuser = Usuario.objects.get(nombre=user_object)\n\t\t\tif letra == \"\":\n\t\t\t\tletra = \"15\"\n\n\t\t\tuser.letra = letra\n\t\t\tuser.color = color\n\t\t\tuser.save()\n\n\tlista_seleccionados, seleccionados= aparcamientos_seleccionados(peticion,request)\n\n\n\tif request.user.is_authenticated():\n\t\tusername = str(request.user)\n\t\tif peticion != username: #Si no es igual es que solo puedo acceder a la parte publica, ya qu eno es la mia\n\t\t\ttemplate = get_template(\"publicuser.html\")\n\t\t\ttitulo_pagina = \"Página pública de \" + peticion + \"<br><br>\"\n\t\t\tform_user = 'Bienvenido ' + username\n\t\t\tform_user += '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>'\n\t\t\tc = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'titulo': titulo_pagina, 'login':form_user})\n\t\telse:\t #Si es igual es que es la mia y puedo acceder a la parte privada, ya que es lamia\n\t\t\ttemplate = get_template(\"privateuser.html\")\n\t\t\ttry:\n\t\t\t\ttitulo_pagina = Usuario.objects.get(nombre=user_object).titulo_pagina\n\t\t\texcept ObjectDoesNotExist:\n\t\t\t\ttitulo_pagina = \"Página personal de \" + str(request.user) + \"<br><br>\"\n\t\t\tc = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'lista': lista, 'form': formulario, 'css':css, 'titulo': titulo_pagina})\n\telse:\n\t\ttemplate = get_template(\"publicuser.html\")\n\t\ttitulo_pagina = \"Página pública de \" + peticion + \"<br><br>\"\n\t\tform_user = 'Para loguearse vaya al botón de Inicio'\n\t\tc = Context({'lista_selecc':lista_seleccionados, 'seleccionados':seleccionados, 'titulo': titulo_pagina, 'login':form_user})\n\n\n\trenderizado = template.render(c)\n\treturn HttpResponse(renderizado)\n\ndef personalizar(request):\n\tif request.user.is_authenticated():\n\t\tuser_object = User.objects.get(username=request.user)\n\t\tuser = Usuario.objects.get(nombre=user_object)\n\t\tletra = user.letra\n\t\tcolor = user.color\n\telse:\n\t\tletra = \"14px\"\n\t\tcolor = \"#FCFCFC\"\n\n\tcss = get_template(\"change.css\")\n\tc = Context({'letra':letra, 'color':color})\n\trenderizado = css.render(c)\n\n\treturn HttpResponse(renderizado, content_type=\"text/css\")\n\ndef usuarios_xml(request, peticion):\n\n\tuser_object = User.objects.get(username=peticion)\n\n\tdoc = Document()\n\tcont = doc.createElement(\"Contenidos\")\n\tdoc.appendChild(cont)\n\tinfo = doc.createElement(\"infoDataset\")\n\tcont.appendChild(info)\n\tnombre = doc.createElement(\"Nombre\")\n\tinfo.appendChild(nombre)\n\tptext = doc.createTextNode(\"XML de aparcamientos seleccionados por el usuario \" + peticion)\n\tnombre.appendChild(ptext)\n\turl = doc.createElement(\"url\")\n\tinfo.appendChild(url)\n\tptext = doc.createTextNode(\"http://localhost:1234/\" + peticion + \"/xml/\")\n\turl.appendChild(ptext)\n\taparc = doc.createElement(\"Aparcamientos\")\n\tcont.appendChild(aparc)\n\n\ttry:\n\t\tusuario = Usuario.objects.get(nombre=user_object)\n\t\tlista_seleccionados = Seleccionados.objects.filter(selector=usuario)\n\n\n\t\tfor i in lista_seleccionados:\n\t\t\titem = doc.createElement(\"Contenido\")\n\t\t\taparc.appendChild(item)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"ID-ENTIDAD\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.entidad)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"NOMBRE\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.nombre)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"DESCRIPCION\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.descripcion)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"ACCESIBILIDAD\")\n\t\t\tif i.aparcamiento.accesibilidad == True:\n\t\t\t\tacces = 1\n\t\t\telse:\n\t\t\t\tacces = 0\n\t\t\tptext = doc.createTextNode(str(acces))\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"CONTENT_URL\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.content_url)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"LOCALIZACION\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.localizacion)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"CLASE VIAL\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.clase_vial)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"TIPO NUM\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.tipo_num)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"NUM\")\n\t\t\tptext = doc.createTextNode(str(i.aparcamiento.num))\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"LOCALIDAD\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.localidad)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"PROVINCIA\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.provincia)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"CODIGO POSTAL\")\n\t\t\tptext = doc.createTextNode(str(i.aparcamiento.codigo_postal))\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"BARRIO\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.barrio)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"DISTRITO\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.distrito)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"COORDENADA X\")\n\t\t\tptext = doc.createTextNode(str(i.aparcamiento.coordenada_x))\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"COORDENADA Y\")\n\t\t\tptext = doc.createTextNode(str(i.aparcamiento.coordenada_y))\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\titem.appendChild(atributo)\n\t\t\tdatos = doc.createElement(\"DATOSDECONTACTO\")\n\t\t\titem.appendChild(datos)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\tdatos.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"TELEFONO\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.telefono)\n\t\t\tatributo.appendChild(ptext)\n\t\t\tatributo = doc.createElement(\"atributo\")\n\t\t\tdatos.appendChild(atributo)\n\t\t\tatributo.setAttribute(\"nombre\", \"EMAIL\")\n\t\t\tptext = doc.createTextNode(i.aparcamiento.email)\n\t\t\tatributo.appendChild(ptext)\n\texcept:\n\t\tprint(\"\")\n\n\n\txml = doc.toprettyxml(indent=\" \")\n\treturn HttpResponse(xml, content_type = \"text/xml\")\n\n@csrf_exempt\ndef aparcamientos(request):\n\n\tlista = lista_aparcamientos()\n\n\tfiltrar = '<form action=\"\" method=\"POST\">'\n\tfiltrar += '<br><br><input type=\"text\" name=\"distrito\">'\n\tfiltrar += '<input type=\"submit\" value=\"Filtrar por distrito\">'\n\n\ttemplate = get_template(\"aparcamientos.html\")\n\n\tif request.user.is_authenticated():\n\t\tusername = str(request.user)\n\t\tform_user = 'Bienvenido ' + username\n\t\tform_user += '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>'\n\telse:\n\t\tform_user = \"Para loguearse vaya al botón de Inicio\"\n\n\tif request.method == \"POST\":\n\t\tfiltro_distrito = request.POST['distrito']\n\t\tfiltro_distrito = filtro_distrito.upper()\n\n\t\tif filtro_distrito == '':\n\t\t\tlista_filtrada = \"No ha introducido ningún filtro, introduzca distrito para filtrar \" + lista\n\t\telse:\n\t\t\taparcamientos_filtrados = Aparcamiento.objects.all()\n\t\t\tEncontrado = False\n\t\t\tlista_filtrada = \"Los aparcamientos en el \" + filtro_distrito + \" son: \"\n\t\t\tfor i in aparcamientos_filtrados:\n\t\t\t\tif filtro_distrito == i.distrito:\n\t\t\t\t\tEncontrado = True\n\t\t\t\t\tnombre_aparcamiento = i.nombre\n\t\t\t\t\turl_aparcamiento = i.content_url\n\t\t\t\t\tlista_filtrada += \"<p>\" + nombre_aparcamiento + \"</p><li><a href=\" + url_aparcamiento + \">\" + url_aparcamiento + \"</a></li>\"\n\n\n\t\t\tif Encontrado == False:\t\t#No es un distrito válido el que se ha introducido y no ha entrado por el bucle anterior\n\t\t\t\tlista_filtrada = \"Introduzca un nuevo distrito. \" + filtro_distrito + \" no es válido\"\n\n\n\t\tc = Context({'distrito': filtrar, 'lista': lista_filtrada, 'login':form_user})\n\n\telse:\n\n\t\tc = Context({'distrito': filtrar, 'lista': lista, 'login':form_user})\n\n\n\trenderizado = template.render(c)\n\treturn HttpResponse(renderizado)\n\n@csrf_exempt\ndef aparcamientos_id(request, recurso):\n\n template = get_template(\"aparcamientos.html\")\n num_megustas = 0\n\n if request.method == 'POST':\n key = request.body.decode(\"utf-8\").split('=')[0]\n print(key)\n\n #tipo = request.POST\n #print(tipo)\n #qd = urllib.unquote(tipo).decode(\"utf-8\")\n #qd = QueryDict(tipo).decode(\"utf-8\")\n #qd.getlist('Me Gusta')\n #print(qd)\n if key == 'Me+Gusta':\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_megusta = aparcamiento.contador_megusta + 1\n aparcamiento.save()\n num_megustas = aparcamiento.contador_megusta\n else:\n coment = request.POST['Comentario']\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n aparcamiento.contador_coments = aparcamiento.contador_coments + 1\n aparcamiento.save()\n\n p = Comentario (aparcamiento= aparcamiento, coment=coment)\n p.save()\n\n\n\n try:\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n\n nombre = aparcamiento.nombre\n descripcion = aparcamiento.descripcion\n accesibilidad = aparcamiento.accesibilidad\n localizacion = aparcamiento.localizacion\n via = aparcamiento.clase_vial\n num = aparcamiento.num\n localidad = aparcamiento.localidad\n provincia = aparcamiento.provincia\n codigo_postal = aparcamiento.codigo_postal\n barrio = aparcamiento.barrio\n distrito = aparcamiento.distrito\n coordenada_x = aparcamiento.coordenada_x\n coordenada_y = aparcamiento.coordenada_y\n telefono = aparcamiento.telefono\n email = aparcamiento.email\n\n if telefono == '':\n telefono = \"No disponible\"\n\n if email == '':\n email = \"No disponible\"\n\n if accesibilidad == 1:\n acces = \"Libre\"\n else:\n acces = \"Ocupado\"\n\n lista_aparcamientos = Aparcamiento.objects.all()\n list_coments = \"\"\n aparcamiento = Aparcamiento.objects.get(entidad=recurso)\n num_megustas = aparcamiento.contador_megusta\n for i in lista_aparcamientos:\n if i.entidad == recurso:\n comentarios = Comentario.objects.filter(aparcamiento=i)\n if len(comentarios) != 0:\n list_coments = \"<li><p>COMENTARIOS</p><ol>\"\n for j in comentarios:\n list_coments += \"<li>\" + j.coment + \"<br>\"\n\n Response = \"<p>INFORMACIÓN ACERCA DEL APARCAMIENTO CON ID: \" + recurso + \"</br></p>\"\n Response += \"<a href=\" + i.content_url + \">\" + i.nombre + \"</a><br>\"\n Response += \"Descripción: \" + descripcion + \"</br>\"\n Response += \"Accesibilidad: \" + acces + \"</br>\"\n Response += \"Localización: \" + via + \" \" + localizacion + \", nº \" + str(num)\n Response += \" \" + localidad + \" (\" + str(codigo_postal) + \")</br>\"\n Response += \"Ubicación: \" + barrio + \" \" + distrito + \" Coordenadas: \" + str(coordenada_x) + \" , \" + str(coordenada_y) + \"<br><br>\"\n Response += \"INFORMACIÓN DE CONTACTO </br>\"\n Response += \"Teléfono: \" + telefono + \"</br>\"\n Response += \"Email: \" + email + \"</br>\" + list_coments + \"</ol>\"\n if num_megustas != 0:\n Response += \"</br><li>Numero de me gustas es: \" + str(num_megustas) + \"<br>\"\n else:\n Response += \"</br><li>Se el primero en indicar que te gusta la página<br>\"\n\n if request.user.is_authenticated():\n username = str(request.user)\n form_user = 'Bienvenido ' + username\n form_user += '<br><br><a href=\"http://localhost:1234/logout\" > Logout </a>'\n\n formulario = '<form action=\"\" method=\"POST\">'\n formulario += '<br>Puede introducir un comentario si lo desea ' + str(request.user) + '<br><input type=\"text\" name=\"Comentario\">'\n formulario += '<input type=\"submit\" value=\"Comentar\"></form>'\n Response += formulario\n\n else:\n form_user = \"Para loguearse vaya al botón de Inicio\"\n\n megusta = ''\n megusta += '<br> Indica que te gusta este aparcamiento</br>'\n megusta += '<form action=\"\" method=\"POST\">'\n megusta += '<button type=\"submit\" name=\"Me Gusta\" value=\"Me Gusta\"> +1 </button></form>'\n Response += megusta\n\n except ObjectDoesNotExist:\n Response = \"Este id no se corresponde con ningún aparcamiento\"\n\n c = Context({'lista': Response, 'login': form_user})\n renderizado = template.render(c)\n return HttpResponse(renderizado)\n\ndef about(request):\n\n template = get_template(\"about.html\")\n\n Cuerpo = \"DESCRIPCIÓN DE LA APLICACIÓN DE APARCAMIENTOS DE MADRID<br><br>\"\n Cuerpo += \"------------------------------------ Página principal ---------------------------------------------------\"\n Cuerpo += \"<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li>\"\n Cuerpo += \"<li> Un botón Accesibles, que si se selecciona una vez mostrará un listado con sólo aquellos aparcamientos que estén disponibles en ese momento. Si se selecciona de nuevo, mostrará un listado con todos los aparcamientos registrados en la aplicación. Para volver a la página principal se selecciona 'Volver'.</li>\"\n Cuerpo += \"<li> Bajo el botón Accesibles hay un listado de páginas personales de usuario: Muestra un listado con la interfaz pública de los usuarios registrados en la aplicación. Se puede acceder a ellas seleccionando el enlace del título de sus páginas de usuario.</li>\"\n Cuerpo += \"<li> Listado de Aparcamientos con más me gusta: Mostrará los 5 aparcamientos más valorados por usuarios.</li></br></br>\"\n Cuerpo += \"------------------------------------ Página con los aparcamientos ---------------------------------------------------\"\n Cuerpo += \"<li> Se puede acceder a través del botón 'Todos' de la Página Principal.</li>\"\n Cuerpo += \"<li> Muestra un listado con todos los aparcamientos registrados junto con un enlace a 'Más Información' para cada aparcamiento. Este enlace mostrará información más detallada acerca de este aparcamiento y también sus comentarios.</li>\"\n Cuerpo += \"<li> Filtrar por distrito: permite el filtrado por un distrito seleccionado. Mostrará un listado de aquellos aparcamientos que se correspondan con el distrito introducido.</li></br></br>\"\n Cuerpo += \"------------------------------------ Interfaz pública de usuario ---------------------------------------------------\"\n Cuerpo += \"<li> Muestra un listado con los aparcamientos seleccionados por el usuario elegido. Sólo se visualizan de 5 en 5.</li>\"\n Cuerpo += \"<li> Tiene un menú bajo el banner en el que nos permite dirigirnos a Inicio (página principal), a Todos (listado de todos los aparcamientos) o a About (página de ayuda y explicación de la web) </li></br></br>\"\n Cuerpo += \"------------------------------------ Interfaz privada de usuario ---------------------------------------------------\"\n Cuerpo += \"<li> Un usuario podrá loguearse únicamente desde la Página Principal. Para ello debe rellenar el formulario superior. Una vez logueado, accede a su página personal de usuario. Donde puede encontrar: </li>\"\n Cuerpo += \"<li> El listado con los aparcamientos seleccionados por ese usuario, con un enlace a la página del aparcamiento y a su información. Si se accede a 'Más Información', se mostrará la página de ese aparcamiento junto con un formulario para que el usuario pueda poner comentarios si lo desea. </li>\"\n Cuerpo += \"<li> Formulario para cambiar el título de su página personal.</li>\"\n Cuerpo += \"<li> Formulario para cambiar el color y tamaño de letra de todas las páginas de la aplicación.</li>\"\n Cuerpo += \"<li> Listado con todos los aparcamientos registrados para poder seleccionarlos pulsando 'Seleccionar'.</li></br></br>\"\n Cuerpo += \"------------------------------------ Pie de pagina ---------------------------------------------------\"\n Cuerpo += \"<li> Si se selecciona el enlace Datos munimadrid, se redirecciona a la página original de la aplicación de Aparcamientos de Madrid.</li>\"\n Cuerpo += \"<li> Si se selecciona el enlace correspodiente al fichero XML muestra el XML con la información de todos los aparcamientos registrados en la página.</li></br></br>\"\n Cuerpo += \"------------------------------------ Página XML de un usuario ---------------------------------------------------\"\n Cuerpo += \"<li> Si se realiza el recurso 'usuario'/XML, se muestra el XML con la información de los aparcamientos seleccionados por el usuario introducido.</li></br></br>\"\n\n c = Context({'lista': Cuerpo})\n renderizado = template.render(c)\n\n return HttpResponse(renderizado)\n",
"step-ids": [
8,
12,
13,
14,
17
]
}
|
[
8,
12,
13,
14,
17
] |
numero_uno=int(input("ingresa el primer numero "))
numero_dos=int(input("ingresa el segundo numero "))
print(numero_uno)
print(numero_dos)
total=numero_uno +numero_dos
print("el total de la suma de : "+str(numero_uno)+" + "+str(numero_dos)+" es = a "+str(total))
|
normal
|
{
"blob_id": "5685befae923fc336a2a5e0eb5e382c2e7d82d04",
"index": 9613,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(numero_uno)\nprint(numero_dos)\n<mask token>\nprint('el total de la suma de : ' + str(numero_uno) + ' + ' + str(\n numero_dos) + ' es = a ' + str(total))\n",
"step-3": "numero_uno = int(input('ingresa el primer numero '))\nnumero_dos = int(input('ingresa el segundo numero '))\nprint(numero_uno)\nprint(numero_dos)\ntotal = numero_uno + numero_dos\nprint('el total de la suma de : ' + str(numero_uno) + ' + ' + str(\n numero_dos) + ' es = a ' + str(total))\n",
"step-4": "numero_uno=int(input(\"ingresa el primer numero \"))\nnumero_dos=int(input(\"ingresa el segundo numero \"))\nprint(numero_uno)\nprint(numero_dos)\ntotal=numero_uno\t+numero_dos\nprint(\"el total de la suma de : \"+str(numero_uno)+\" + \"+str(numero_dos)+\" es = a \"+str(total))",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from django.contrib import admin
from .models import Client, Adress
# Register your models here.
class ClientInline(admin.StackedInline):
model = Adress
can_delete = False
extra = 1
class ClientAdmin(admin.ModelAdmin):
inlines = [ClientInline]
admin.site.register(Client, ClientAdmin)
|
normal
|
{
"blob_id": "ffd7aef2e72e64ac5b9f85b9d12845479187d89b",
"index": 2010,
"step-1": "<mask token>\n\n\nclass ClientInline(admin.StackedInline):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\nadmin.site.register(Client, ClientAdmin)\n",
"step-4": "from django.contrib import admin\nfrom .models import Client, Adress\n\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\nadmin.site.register(Client, ClientAdmin)\n",
"step-5": "from django.contrib import admin\nfrom .models import Client, Adress\n\n# Register your models here.\n\nclass ClientInline(admin.StackedInline):\n model = Adress\n can_delete = False\n extra = 1\n\nclass ClientAdmin(admin.ModelAdmin):\n inlines = [ClientInline]\n\n\nadmin.site.register(Client, ClientAdmin)",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import numpy as np
import matplotlib.pyplot as plt
conf_arr = [[2987, 58, 955, 832, 1991, 181, 986], [142, 218, 195, 44, 235,
11, 27], [524, 8, 3482, 478, 2406, 708, 588], [140, 0, 386, 12491, 793,
182, 438], [368, 15, 883, 635, 6331, 71, 1357], [77, 0, 942, 394, 223,
4530, 176], [224, 7, 601, 929, 2309, 99, 5761]]
conf_arr = np.transpose(np.array(conf_arr))
norm_conf = []
for i in conf_arr:
a = 0
tmp_arr = []
a = sum(i, 0)
for j in i:
tmp_arr.append(float(j) / float(a))
norm_conf.append(tmp_arr)
fig = plt.figure()
plt.clf()
ax = fig.add_subplot(111)
ax.set_aspect(1)
res = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet, interpolation='nearest')
width, height = conf_arr.shape
for x in range(width):
for y in range(height):
ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=
'center', verticalalignment='center')
cb = fig.colorbar(res)
alphabet = '0123456789'
plt.xticks(range(width), alphabet[:width])
plt.yticks(range(height), alphabet[:height])
plt.xlabel('Predicted Label')
plt.ylabel('True Label')
plt.savefig('confusion_matrix.png', format='png')
|
normal
|
{
"blob_id": "923a2979df3c37583eec712880ad821541bd898b",
"index": 8735,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i, 0)\n for j in i:\n tmp_arr.append(float(j) / float(a))\n norm_conf.append(tmp_arr)\n<mask token>\nplt.clf()\n<mask token>\nax.set_aspect(1)\n<mask token>\nfor x in range(width):\n for y in range(height):\n ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=\n 'center', verticalalignment='center')\n<mask token>\nplt.xticks(range(width), alphabet[:width])\nplt.yticks(range(height), alphabet[:height])\nplt.xlabel('Predicted Label')\nplt.ylabel('True Label')\nplt.savefig('confusion_matrix.png', format='png')\n",
"step-3": "<mask token>\nconf_arr = [[2987, 58, 955, 832, 1991, 181, 986], [142, 218, 195, 44, 235, \n 11, 27], [524, 8, 3482, 478, 2406, 708, 588], [140, 0, 386, 12491, 793,\n 182, 438], [368, 15, 883, 635, 6331, 71, 1357], [77, 0, 942, 394, 223, \n 4530, 176], [224, 7, 601, 929, 2309, 99, 5761]]\nconf_arr = np.transpose(np.array(conf_arr))\nnorm_conf = []\nfor i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i, 0)\n for j in i:\n tmp_arr.append(float(j) / float(a))\n norm_conf.append(tmp_arr)\nfig = plt.figure()\nplt.clf()\nax = fig.add_subplot(111)\nax.set_aspect(1)\nres = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet, interpolation='nearest')\nwidth, height = conf_arr.shape\nfor x in range(width):\n for y in range(height):\n ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=\n 'center', verticalalignment='center')\ncb = fig.colorbar(res)\nalphabet = '0123456789'\nplt.xticks(range(width), alphabet[:width])\nplt.yticks(range(height), alphabet[:height])\nplt.xlabel('Predicted Label')\nplt.ylabel('True Label')\nplt.savefig('confusion_matrix.png', format='png')\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\nconf_arr = [[2987, 58, 955, 832, 1991, 181, 986], [142, 218, 195, 44, 235, \n 11, 27], [524, 8, 3482, 478, 2406, 708, 588], [140, 0, 386, 12491, 793,\n 182, 438], [368, 15, 883, 635, 6331, 71, 1357], [77, 0, 942, 394, 223, \n 4530, 176], [224, 7, 601, 929, 2309, 99, 5761]]\nconf_arr = np.transpose(np.array(conf_arr))\nnorm_conf = []\nfor i in conf_arr:\n a = 0\n tmp_arr = []\n a = sum(i, 0)\n for j in i:\n tmp_arr.append(float(j) / float(a))\n norm_conf.append(tmp_arr)\nfig = plt.figure()\nplt.clf()\nax = fig.add_subplot(111)\nax.set_aspect(1)\nres = ax.imshow(np.array(norm_conf), cmap=plt.cm.jet, interpolation='nearest')\nwidth, height = conf_arr.shape\nfor x in range(width):\n for y in range(height):\n ax.annotate(str(conf_arr[x][y]), xy=(y, x), horizontalalignment=\n 'center', verticalalignment='center')\ncb = fig.colorbar(res)\nalphabet = '0123456789'\nplt.xticks(range(width), alphabet[:width])\nplt.yticks(range(height), alphabet[:height])\nplt.xlabel('Predicted Label')\nplt.ylabel('True Label')\nplt.savefig('confusion_matrix.png', format='png')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import json
import logging
logger = logging.getLogger(__name__)
from django.db.models import Q
from channels_api.bindings import ResourceBinding
from .models import LetterTransaction, UserLetter, TeamWord, Dictionary
from .serializers import LetterTransactionSerializer, UserLetterSerializer, TeamWordSerializer
class TeamWordBinding(ResourceBinding):
model = TeamWord
stream = "teamwords"
serializer_class = TeamWordSerializer
def get_queryset(self):
return TeamWord.objects.filter(user__group__team=self.user.group.team)
@classmethod
def group_names(self, instance, action):
return [str(instance.user.group.team)]
def has_permission(self, user, action, pk):
logger.debug("TW has_permission {} {} {}".format(user, action, pk))
if action in ['update', 'delete']:
return False
if action == 'create':
payload = json.loads(self.message.content['text'])
if 'data' not in payload or 'word' not in payload['data']:
logger.debug("Possibly malicious malformed TeamWord from {}".format(self.user.username))
return False
word = payload['data']['word']
word_letters = set(word.lower())
if len(word_letters) == 0:
return False
user = self.user
user_letters = set()
for letter in UserLetter.objects.filter(user=user):
user_letters.add(letter.letter.lower())
for letter in LetterTransaction.objects.filter(borrower=user, approved=True):
user_letters.add(letter.letter.lower())
if not word_letters.issubset(user_letters):
return False
team_words = set()
for tword in self.get_queryset():
team_words.add(tword.word)
if word in team_words:
return False
try:
wordObj = Dictionary.objects.get(word=word)
except Exception as e:
return False
return True
# allow list, retrieve, subscribe
return True
class UserLetterBinding(ResourceBinding):
model = UserLetter
stream = "userletters"
serializer_class = UserLetterSerializer
def get_queryset(self):
queries = Q(user=self.user)
for profile in self.message.user.group.profile_set.all():
queries |= Q(user=profile.user)
return UserLetter.objects.filter(queries)
@classmethod
def group_names(self, instance, action):
logger.debug(str(instance))
return [instance.user.username + "solo"]
def has_permission(self, user, action, pk):
logger.debug("UL has_permission {} {} {}".format(user, action, pk))
if action in ['create', 'update', 'delete']:
return False
# allow list, retrieve, subscribe
return True
class LetterTransactionBinding(ResourceBinding):
model = LetterTransaction
stream = "lettertransactions"
serializer_class = LetterTransactionSerializer
def get_queryset(self):
return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(letter__user=self.user))
@classmethod
def group_names(self, instance, action):
# Send this to only the borrower and lender
return [instance.borrower.username + "solo", instance.letter.user.username + "solo"]
def has_permission(self, user, action, pk):
logger.debug("TR has_permission {} {} {}".format(user, action, self.message.content['text']))
if action == "delete":
return False
if action == "create" or action == "update":
payload = json.loads(self.message.content['text'])
if 'data' not in payload or 'letter' not in payload['data']:
logger.debug("Possibly malicious malformed LetterTransaction from {}".format(self.user.username))
return False
ul = UserLetter.objects.get(pk=payload['data']['letter'])
# If this UserLetter is not owned by a friend, permission denied
if ul.user.profile not in self.user.group.profile_set.all():
logger.debug("Malicious LetterTransaction creation suspected by {}".format(self.user.username))
return False
# allow list, retrieve, subscribe, and legitimate create
return True
|
normal
|
{
"blob_id": "c2e0f2eda6ef44a52ee4e192b8eb71bde0a69bff",
"index": 8954,
"step-1": "<mask token>\n\n\nclass TeamWordBinding(ResourceBinding):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n",
"step-2": "<mask token>\n\n\nclass TeamWordBinding(ResourceBinding):\n <mask token>\n <mask token>\n <mask token>\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n",
"step-3": "<mask token>\n\n\nclass TeamWordBinding(ResourceBinding):\n model = TeamWord\n stream = 'teamwords'\n serializer_class = TeamWordSerializer\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n",
"step-4": "import json\nimport logging\nlogger = logging.getLogger(__name__)\nfrom django.db.models import Q\nfrom channels_api.bindings import ResourceBinding\nfrom .models import LetterTransaction, UserLetter, TeamWord, Dictionary\nfrom .serializers import LetterTransactionSerializer, UserLetterSerializer, TeamWordSerializer\n\n\nclass TeamWordBinding(ResourceBinding):\n model = TeamWord\n stream = 'teamwords'\n serializer_class = TeamWordSerializer\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug('TW has_permission {} {} {}'.format(user, action, pk))\n if action in ['update', 'delete']:\n return False\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug('Possibly malicious malformed TeamWord from {}'\n .format(self.user.username))\n return False\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user,\n approved=True):\n user_letters.add(letter.letter.lower())\n if not word_letters.issubset(user_letters):\n return False\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n if word in team_words:\n return False\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n return True\n return True\n\n\nclass UserLetterBinding(ResourceBinding):\n model = UserLetter\n stream = 'userletters'\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('UL has_permission {} {} {}'.format(user, action, pk))\n if action in ['create', 'update', 'delete']:\n return False\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n model = LetterTransaction\n stream = 'lettertransactions'\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(\n letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n return [instance.borrower.username + 'solo', instance.letter.user.\n username + 'solo']\n\n def has_permission(self, user, action, pk):\n logger.debug('TR has_permission {} {} {}'.format(user, action, self\n .message.content['text']))\n if action == 'delete':\n return False\n if action == 'create' or action == 'update':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\n 'Possibly malicious malformed LetterTransaction from {}'\n .format(self.user.username))\n return False\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\n 'Malicious LetterTransaction creation suspected by {}'.\n format(self.user.username))\n return False\n return True\n",
"step-5": "import json\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom django.db.models import Q\n\nfrom channels_api.bindings import ResourceBinding\n\nfrom .models import LetterTransaction, UserLetter, TeamWord, Dictionary\nfrom .serializers import LetterTransactionSerializer, UserLetterSerializer, TeamWordSerializer\n\n\nclass TeamWordBinding(ResourceBinding):\n\n model = TeamWord\n stream = \"teamwords\"\n serializer_class = TeamWordSerializer\n\n def get_queryset(self):\n return TeamWord.objects.filter(user__group__team=self.user.group.team)\n\n @classmethod\n def group_names(self, instance, action):\n return [str(instance.user.group.team)]\n\n def has_permission(self, user, action, pk):\n logger.debug(\"TW has_permission {} {} {}\".format(user, action, pk))\n\n if action in ['update', 'delete']:\n return False\n\n if action == 'create':\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'word' not in payload['data']:\n logger.debug(\"Possibly malicious malformed TeamWord from {}\".format(self.user.username))\n return False\n\n word = payload['data']['word']\n word_letters = set(word.lower())\n if len(word_letters) == 0:\n return False\n\n user = self.user\n user_letters = set()\n for letter in UserLetter.objects.filter(user=user):\n user_letters.add(letter.letter.lower())\n for letter in LetterTransaction.objects.filter(borrower=user, approved=True):\n user_letters.add(letter.letter.lower())\n\n if not word_letters.issubset(user_letters):\n return False\n\n team_words = set()\n for tword in self.get_queryset():\n team_words.add(tword.word)\n\n if word in team_words:\n return False\n\n try:\n wordObj = Dictionary.objects.get(word=word)\n except Exception as e:\n return False\n\n return True\n\n # allow list, retrieve, subscribe\n return True\n\n \nclass UserLetterBinding(ResourceBinding):\n\n model = UserLetter\n stream = \"userletters\"\n serializer_class = UserLetterSerializer\n\n def get_queryset(self):\n queries = Q(user=self.user)\n for profile in self.message.user.group.profile_set.all():\n queries |= Q(user=profile.user)\n\n return UserLetter.objects.filter(queries)\n\n @classmethod\n def group_names(self, instance, action):\n logger.debug(str(instance))\n return [instance.user.username + \"solo\"]\n\n def has_permission(self, user, action, pk):\n logger.debug(\"UL has_permission {} {} {}\".format(user, action, pk))\n\n if action in ['create', 'update', 'delete']:\n return False\n\n # allow list, retrieve, subscribe\n return True\n\n\nclass LetterTransactionBinding(ResourceBinding):\n\n model = LetterTransaction\n stream = \"lettertransactions\"\n serializer_class = LetterTransactionSerializer\n\n def get_queryset(self):\n return LetterTransaction.objects.filter(Q(borrower=self.user) | Q(letter__user=self.user))\n\n @classmethod\n def group_names(self, instance, action):\n # Send this to only the borrower and lender\n return [instance.borrower.username + \"solo\", instance.letter.user.username + \"solo\"]\n\n def has_permission(self, user, action, pk):\n logger.debug(\"TR has_permission {} {} {}\".format(user, action, self.message.content['text']))\n\n if action == \"delete\":\n return False\n\n if action == \"create\" or action == \"update\":\n payload = json.loads(self.message.content['text'])\n if 'data' not in payload or 'letter' not in payload['data']:\n logger.debug(\"Possibly malicious malformed LetterTransaction from {}\".format(self.user.username))\n return False\n\n ul = UserLetter.objects.get(pk=payload['data']['letter'])\n\n # If this UserLetter is not owned by a friend, permission denied\n if ul.user.profile not in self.user.group.profile_set.all():\n logger.debug(\"Malicious LetterTransaction creation suspected by {}\".format(self.user.username))\n return False\n\n # allow list, retrieve, subscribe, and legitimate create\n return True\n",
"step-ids": [
13,
14,
15,
17,
18
]
}
|
[
13,
14,
15,
17,
18
] |
"""
Client component of the Quartjes connector. Use the ClientConnector to create
a connection to the Quartjes server.
Usage
-----
Create an instance of this object with the host and port to connect to.
Call the start() method to establish the connection.
Now the database and the stock_exchange variable can be used to communicate
with the server.
If you do not wish to connect to a server, but run a local server instead,
create the object without any arguments.
Example
-------
>>> conn = ClientConnector("192.168.1.1")
>>> conn.start()
>>> conn.database.get_drinks()
Available server methods
------------------------
Currently two server objects are made available upon connection. Please see the
documentation for the server object for available methods and events:
* database: :class:`quartjes.controllers.database.Database`
* stock_exchange: :class:`quartjes.controllers.stock_exchange.StockExchange`
Advanced
--------
Use the method get_service_interface to retrieve additional interfaces to a server side
service.
As long as the connector is running, it will keep trying to reconnect any
lost connections using an exponential back-off.
ClientConnector class
---------------------
"""
__author__ = "Rob van der Most"
__docformat__ = "restructuredtext en"
from quartjes.connector.protocol import QuartjesClientFactory
from twisted.internet import reactor, threads
from threading import Thread
from quartjes.connector.services import ServiceInterface
import quartjes.controllers.database
import quartjes.controllers.stock_exchange2
class ClientConnector(object):
"""
Client side endpoint of the Quartjes connector.
Parameters
----------
host : string
Host to connect to. If no host is specified, a local server is started.
port : int
Port to connect to.
Attributes
----------
host
port
factory
database
stock_exchange
"""
def __init__(self, host=None, port=None):
self._host = host
if port:
self._port = port
else:
from quartjes.connector.server import default_port
self._port = default_port
self._factory = QuartjesClientFactory()
self._database = None
self._stock_exchange = None
self._connection = None
@property
def host(self):
"""
Hostname to connect to.
Can only be changed when there is no active connection.
"""
return self._host
@host.setter
def host(self, value):
assert not self.is_connected(), "Host should not be changed will connected."
self._host = value
@property
def port(self):
"""
Port to connect to.
Can only be changed when there is no active connection.
"""
return self._port
@port.setter
def port(self, value):
assert not self.is_connected(), "Port should not be changed will connected."
self._port = value
@property
def factory(self):
"""
The protocol factory used by the client to connect to the server.
You normally should not need to access this. It is for advanced options.
"""
return self._factory
@property
def database(self):
"""
Reference to the currently running
:class:`Database <quartjes.controllers.database.Database>`.
This can be a proxy to the database on the server or a local database.
"""
return self._database
@property
def stock_exchange(self):
"""
Reference to the currently running
:class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`.
This can be a proxy to the stock exchange on the server or a local stock exchange.
"""
return self._stock_exchange
def start(self):
"""
Start the connector and create a connection to the server. Starts a
reactor loop in a separate thread.
"""
if not self._host:
print("No host selected, starting local instance.")
self._database = quartjes.controllers.database.default_database()
self._stock_exchange = quartjes.controllers.stock_exchange2.StockExchange2()
else:
reactor.callLater(0, self._connect) #@UndefinedVariable
if not reactor.running: #@UndefinedVariable
self._reactor_thread = ClientConnector._ReactorThread()
self._reactor_thread.start()
self._factory.wait_for_connection()
self._database = self.get_service_interface("database")
self._stock_exchange = self.get_service_interface("stock_exchange")
def stop(self):
"""
Stop the connector, closing the connection.
The Reactor loop remains active as the reactor cannot be restarted.
"""
if self._host:
#threads.blockingCallFromThread(reactor, self._factory.stopTrying)
threads.blockingCallFromThread(reactor, self._disconnect)
else:
self._database = None
self._stock_exchange.stop()
self._stock_exchange = None
def get_service_interface(self, service_name):
"""
Construct a service interface for the service with the given name. Use
the service interface to send requests to the corresponding service
on the Quartjes server.
Parameters
----------
service_name : string
Name of the service on the server to which you want a remote
interface.
Returns
-------
service_interface : :class:`quartjes.connector.services.ServiceInterface`
An interface to the service.
Please note that the existence of the service on the server is not
verified until an actual method call has been done.
"""
return ServiceInterface(self._factory, service_name)
def is_connected(self):
"""
Determine whether the connection to the server is active.
A local service is also considered connected.
Returns
-------
connected : boolean
True if connected, False if not.
"""
if not self._host:
if self._database:
return True
else:
return False
else:
return self._factory.is_connected()
def _connect(self):
"""
Internal method called from the reactor to start a new connection.
"""
#print("Connecting...")
self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable
def _disconnect(self):
"""
Internal method called from the reactor to shut down a connection.
"""
self._factory.stopTrying()
self._connection.disconnect()
class _ReactorThread(Thread):
"""
Thread for running the reactor loop. This thread runs as a daemon, so
if the main thread and any non daemon threads end, the reactor also
stops running allowing the application to exit.
"""
def __init__(self):
Thread.__init__(self, name="ReactorThread")
self.daemon = True
def run(self):
reactor.run(installSignalHandlers=0) #@UndefinedVariable
def tk_event_listener(F):
"""
Make a method able to receive events from the connector while running in
the TK mainloop.
"""
def listener(self, *pargs, **kwargs):
self._event_queue.put((F, self, pargs, kwargs))
return listener
def tk_prepare_instance_for_events(instance):
"""
Prepare a class to receive events from outside the tk mainloop.
Call this from the TK mainloop before any events are going to be received.
Decorate methods to call using tk_event_listener
"""
def listener():
try:
while 1:
(method, self, pargs, kwargs) = instance._event_queue.get_nowait()
method(self, *pargs, **kwargs)
except Queue.Empty:
pass
instance.after(100, listener)
import Queue
instance._event_queue = Queue.Queue()
instance.after(100, listener)
|
normal
|
{
"blob_id": "a8f200e0ae1252df4ad6560e5756347cd0e4c8ba",
"index": 5034,
"step-1": "<mask token>\n\n\nclass ClientConnector(object):\n <mask token>\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n <mask token>\n <mask token>\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n <mask token>\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n\n @host.setter\n def host(self, value):\n assert not self.is_connected(\n ), 'Host should not be changed will connected.'\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\n<mask token>\n",
"step-3": "<mask token>\n__author__ = 'Rob van der Most'\n__docformat__ = 'restructuredtext en'\n<mask token>\n\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n\n @host.setter\n def host(self, value):\n assert not self.is_connected(\n ), 'Host should not be changed will connected.'\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\ndef tk_event_listener(F):\n \"\"\"\n Make a method able to receive events from the connector while running in\n the TK mainloop.\n \"\"\"\n\n def listener(self, *pargs, **kwargs):\n self._event_queue.put((F, self, pargs, kwargs))\n return listener\n\n\ndef tk_prepare_instance_for_events(instance):\n \"\"\"\n Prepare a class to receive events from outside the tk mainloop.\n Call this from the TK mainloop before any events are going to be received.\n Decorate methods to call using tk_event_listener\n \"\"\"\n\n def listener():\n try:\n while 1:\n method, self, pargs, kwargs = instance._event_queue.get_nowait(\n )\n method(self, *pargs, **kwargs)\n except Queue.Empty:\n pass\n instance.after(100, listener)\n import Queue\n instance._event_queue = Queue.Queue()\n instance.after(100, listener)\n",
"step-4": "<mask token>\n__author__ = 'Rob van der Most'\n__docformat__ = 'restructuredtext en'\nfrom quartjes.connector.protocol import QuartjesClientFactory\nfrom twisted.internet import reactor, threads\nfrom threading import Thread\nfrom quartjes.connector.services import ServiceInterface\nimport quartjes.controllers.database\nimport quartjes.controllers.stock_exchange2\n\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n\n @host.setter\n def host(self, value):\n assert not self.is_connected(\n ), 'Host should not be changed will connected.'\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n\n @port.setter\n def port(self, value):\n assert not self.is_connected(\n ), 'Port should not be changed will connected.'\n self._port = value\n\n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n\n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n\n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n\n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print('No host selected, starting local instance.')\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = (quartjes.controllers.stock_exchange2.\n StockExchange2())\n else:\n reactor.callLater(0, self._connect)\n if not reactor.running:\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n self._database = self.get_service_interface('database')\n self._stock_exchange = self.get_service_interface('stock_exchange')\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n self._connection = reactor.connectTCP(self.host, self.port, self.\n factory)\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n\n def __init__(self):\n Thread.__init__(self, name='ReactorThread')\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0)\n\n\ndef tk_event_listener(F):\n \"\"\"\n Make a method able to receive events from the connector while running in\n the TK mainloop.\n \"\"\"\n\n def listener(self, *pargs, **kwargs):\n self._event_queue.put((F, self, pargs, kwargs))\n return listener\n\n\ndef tk_prepare_instance_for_events(instance):\n \"\"\"\n Prepare a class to receive events from outside the tk mainloop.\n Call this from the TK mainloop before any events are going to be received.\n Decorate methods to call using tk_event_listener\n \"\"\"\n\n def listener():\n try:\n while 1:\n method, self, pargs, kwargs = instance._event_queue.get_nowait(\n )\n method(self, *pargs, **kwargs)\n except Queue.Empty:\n pass\n instance.after(100, listener)\n import Queue\n instance._event_queue = Queue.Queue()\n instance.after(100, listener)\n",
"step-5": "\"\"\"\nClient component of the Quartjes connector. Use the ClientConnector to create\na connection to the Quartjes server.\n\nUsage\n-----\nCreate an instance of this object with the host and port to connect to.\nCall the start() method to establish the connection.\nNow the database and the stock_exchange variable can be used to communicate\nwith the server.\n\nIf you do not wish to connect to a server, but run a local server instead,\ncreate the object without any arguments.\n\nExample\n-------\n>>> conn = ClientConnector(\"192.168.1.1\")\n>>> conn.start()\n>>> conn.database.get_drinks()\n\nAvailable server methods\n------------------------\n\nCurrently two server objects are made available upon connection. Please see the\ndocumentation for the server object for available methods and events:\n\n* database: :class:`quartjes.controllers.database.Database`\n* stock_exchange: :class:`quartjes.controllers.stock_exchange.StockExchange`\n\nAdvanced\n--------\n\nUse the method get_service_interface to retrieve additional interfaces to a server side\nservice.\n\nAs long as the connector is running, it will keep trying to reconnect any\nlost connections using an exponential back-off.\n\nClientConnector class\n---------------------\n\n\"\"\"\n__author__ = \"Rob van der Most\"\n__docformat__ = \"restructuredtext en\"\n\nfrom quartjes.connector.protocol import QuartjesClientFactory\nfrom twisted.internet import reactor, threads\nfrom threading import Thread\nfrom quartjes.connector.services import ServiceInterface\nimport quartjes.controllers.database\nimport quartjes.controllers.stock_exchange2\n\nclass ClientConnector(object):\n \"\"\"\n Client side endpoint of the Quartjes connector.\n \n Parameters\n ----------\n host : string\n Host to connect to. If no host is specified, a local server is started.\n port : int\n Port to connect to.\n \n Attributes\n ----------\n host\n port\n factory\n database\n stock_exchange\n \n \n \"\"\"\n\n def __init__(self, host=None, port=None):\n self._host = host\n if port:\n self._port = port\n else:\n from quartjes.connector.server import default_port\n self._port = default_port\n self._factory = QuartjesClientFactory()\n self._database = None\n self._stock_exchange = None\n self._connection = None\n\n @property\n def host(self):\n \"\"\"\n Hostname to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._host\n \n @host.setter\n def host(self, value):\n assert not self.is_connected(), \"Host should not be changed will connected.\"\n self._host = value\n\n @property\n def port(self):\n \"\"\"\n Port to connect to.\n Can only be changed when there is no active connection.\n \"\"\"\n return self._port\n \n @port.setter\n def port(self, value):\n assert not self.is_connected(), \"Port should not be changed will connected.\"\n self._port = value\n \n @property\n def factory(self):\n \"\"\"\n The protocol factory used by the client to connect to the server.\n You normally should not need to access this. It is for advanced options.\n \"\"\"\n return self._factory\n \n @property\n def database(self):\n \"\"\"\n Reference to the currently running \n :class:`Database <quartjes.controllers.database.Database>`. \n This can be a proxy to the database on the server or a local database.\n \"\"\"\n return self._database\n \n @property\n def stock_exchange(self):\n \"\"\"\n Reference to the currently running \n :class:`StockExchange <quartjes.controllers.stock_exchange.StockExchange>`. \n This can be a proxy to the stock exchange on the server or a local stock exchange.\n \"\"\"\n return self._stock_exchange\n \n def start(self):\n \"\"\"\n Start the connector and create a connection to the server. Starts a\n reactor loop in a separate thread.\n \"\"\"\n if not self._host:\n print(\"No host selected, starting local instance.\")\n self._database = quartjes.controllers.database.default_database()\n self._stock_exchange = quartjes.controllers.stock_exchange2.StockExchange2()\n else:\n reactor.callLater(0, self._connect) #@UndefinedVariable\n if not reactor.running: #@UndefinedVariable\n self._reactor_thread = ClientConnector._ReactorThread()\n self._reactor_thread.start()\n self._factory.wait_for_connection()\n\n self._database = self.get_service_interface(\"database\")\n self._stock_exchange = self.get_service_interface(\"stock_exchange\")\n\n def stop(self):\n \"\"\"\n Stop the connector, closing the connection.\n The Reactor loop remains active as the reactor cannot be restarted.\n \"\"\"\n if self._host:\n #threads.blockingCallFromThread(reactor, self._factory.stopTrying)\n threads.blockingCallFromThread(reactor, self._disconnect)\n else:\n self._database = None\n self._stock_exchange.stop()\n self._stock_exchange = None\n\n def get_service_interface(self, service_name):\n \"\"\"\n Construct a service interface for the service with the given name. Use\n the service interface to send requests to the corresponding service\n on the Quartjes server.\n \n Parameters\n ----------\n service_name : string\n Name of the service on the server to which you want a remote\n interface.\n \n Returns\n -------\n service_interface : :class:`quartjes.connector.services.ServiceInterface`\n An interface to the service.\n Please note that the existence of the service on the server is not\n verified until an actual method call has been done.\n \"\"\"\n return ServiceInterface(self._factory, service_name)\n\n def is_connected(self):\n \"\"\"\n Determine whether the connection to the server is active.\n A local service is also considered connected.\n \n Returns\n -------\n connected : boolean\n True if connected, False if not.\n \"\"\"\n if not self._host:\n if self._database:\n return True\n else:\n return False\n else:\n return self._factory.is_connected()\n\n def _connect(self):\n \"\"\"\n Internal method called from the reactor to start a new connection.\n \"\"\"\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable\n\n def _disconnect(self):\n \"\"\"\n Internal method called from the reactor to shut down a connection.\n \"\"\"\n self._factory.stopTrying()\n self._connection.disconnect()\n\n class _ReactorThread(Thread):\n \"\"\"\n Thread for running the reactor loop. This thread runs as a daemon, so\n if the main thread and any non daemon threads end, the reactor also\n stops running allowing the application to exit.\n \"\"\"\n def __init__(self):\n Thread.__init__(self, name=\"ReactorThread\")\n self.daemon = True\n\n def run(self):\n reactor.run(installSignalHandlers=0) #@UndefinedVariable\n\ndef tk_event_listener(F):\n \"\"\"\n Make a method able to receive events from the connector while running in\n the TK mainloop.\n \"\"\"\n def listener(self, *pargs, **kwargs):\n self._event_queue.put((F, self, pargs, kwargs))\n \n return listener\n\ndef tk_prepare_instance_for_events(instance):\n \"\"\"\n Prepare a class to receive events from outside the tk mainloop.\n Call this from the TK mainloop before any events are going to be received.\n Decorate methods to call using tk_event_listener\n \"\"\"\n def listener():\n try:\n while 1:\n (method, self, pargs, kwargs) = instance._event_queue.get_nowait()\n method(self, *pargs, **kwargs)\n except Queue.Empty:\n pass\n instance.after(100, listener)\n \n import Queue\n instance._event_queue = Queue.Queue()\n instance.after(100, listener)\n \n",
"step-ids": [
12,
16,
19,
20,
21
]
}
|
[
12,
16,
19,
20,
21
] |
import sys
import re
import math
s=sys.stdin.read()
digits=re.findall(r"-?\d+",s)
listline= [int(e) for e in digits ]
x=listline[-1]
del(listline[-1])
n=len(listline)//2
customers=listline[:n]
grumpy=listline[n:]
maxcus=0
if x==n:
print(sum(customers))
else:
for i in range(n-x):
total=0
for j in range(i,i+x):
total+=customers[i]
for j in range(i):
if grumpy[j]!=1:
total+=customers[j]
for j in range(i+x,n):
if grumpy[j]!=1:
total+=customers[j]
maxcus=max(total,maxcus)
print(maxcus)
|
normal
|
{
"blob_id": "24bc43c1fe035430afde05fec1330e27fb5f1d86",
"index": 8809,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndel listline[-1]\n<mask token>\nif x == n:\n print(sum(customers))\nelse:\n for i in range(n - x):\n total = 0\n for j in range(i, i + x):\n total += customers[i]\n for j in range(i):\n if grumpy[j] != 1:\n total += customers[j]\n for j in range(i + x, n):\n if grumpy[j] != 1:\n total += customers[j]\n maxcus = max(total, maxcus)\n print(maxcus)\n",
"step-3": "<mask token>\ns = sys.stdin.read()\ndigits = re.findall('-?\\\\d+', s)\nlistline = [int(e) for e in digits]\nx = listline[-1]\ndel listline[-1]\nn = len(listline) // 2\ncustomers = listline[:n]\ngrumpy = listline[n:]\nmaxcus = 0\nif x == n:\n print(sum(customers))\nelse:\n for i in range(n - x):\n total = 0\n for j in range(i, i + x):\n total += customers[i]\n for j in range(i):\n if grumpy[j] != 1:\n total += customers[j]\n for j in range(i + x, n):\n if grumpy[j] != 1:\n total += customers[j]\n maxcus = max(total, maxcus)\n print(maxcus)\n",
"step-4": "import sys\nimport re\nimport math\ns = sys.stdin.read()\ndigits = re.findall('-?\\\\d+', s)\nlistline = [int(e) for e in digits]\nx = listline[-1]\ndel listline[-1]\nn = len(listline) // 2\ncustomers = listline[:n]\ngrumpy = listline[n:]\nmaxcus = 0\nif x == n:\n print(sum(customers))\nelse:\n for i in range(n - x):\n total = 0\n for j in range(i, i + x):\n total += customers[i]\n for j in range(i):\n if grumpy[j] != 1:\n total += customers[j]\n for j in range(i + x, n):\n if grumpy[j] != 1:\n total += customers[j]\n maxcus = max(total, maxcus)\n print(maxcus)\n",
"step-5": "import sys\nimport re\nimport math\ns=sys.stdin.read()\ndigits=re.findall(r\"-?\\d+\",s)\nlistline= [int(e) for e in digits ]\nx=listline[-1]\ndel(listline[-1])\nn=len(listline)//2\ncustomers=listline[:n]\ngrumpy=listline[n:]\nmaxcus=0\nif x==n:\n print(sum(customers))\nelse:\n for i in range(n-x):\n total=0\n for j in range(i,i+x):\n total+=customers[i]\n for j in range(i):\n if grumpy[j]!=1:\n total+=customers[j]\n for j in range(i+x,n):\n if grumpy[j]!=1:\n total+=customers[j]\n maxcus=max(total,maxcus)\n print(maxcus)\n ",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
def main():
#entrada
N = int(input())
num = 1
#processamento
for i in range (N+1):
if i > 0:
#saida
print("%d %d %d" %(i, i**2, i**3))
num +=1
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "b55984da73d3cfb3109a52990a0d4d05a27d51a5",
"index": 1794,
"step-1": "<mask token>\n",
"step-2": "def main():\n N = int(input())\n num = 1\n for i in range(N + 1):\n if i > 0:\n print('%d %d %d' % (i, i ** 2, i ** 3))\n num += 1\n\n\n<mask token>\n",
"step-3": "def main():\n N = int(input())\n num = 1\n for i in range(N + 1):\n if i > 0:\n print('%d %d %d' % (i, i ** 2, i ** 3))\n num += 1\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "def main():\r\n #entrada\r\n N = int(input())\r\n num = 1\r\n\r\n #processamento\r\n for i in range (N+1):\r\n if i > 0:\r\n #saida\r\n print(\"%d %d %d\" %(i, i**2, i**3))\r\n num +=1\r\n\r\n\r\nif __name__ == '__main__':\r\n main()",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pickle
from pathlib import Path
from rich.console import Console
from fourierdb import FourierDocument, FourierCollection, FourierDB
console = Console()
doc = FourierDocument({"bar": "eggs", "xyz": "spam"})
doc2 = FourierDocument({"a": "foo", "b": "bar"})
doc3 = FourierDocument({"abc": "xyz"})
doc4 = FourierDocument({1: 2, 3: 4, 5: 6})
doc5 = FourierDocument({"hello": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
FOURIER_DIR = Path.home() / ".fourier"
FOURIER_LOGS = FOURIER_DIR / "logs"
FOURIER_DBS = FOURIER_DIR / "databases"
coll = FourierCollection("coll", doc, doc2)
coll2 = FourierCollection("coll2", doc3, doc4, doc5)
db = FourierDB("db")
db.add_collection(coll)
db.add_collection(coll2)
pickle.dump(db, open(""))
|
normal
|
{
"blob_id": "f15f96658130ac9bba748a518371ad80d9772fbc",
"index": 4121,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.add_collection(coll)\ndb.add_collection(coll2)\npickle.dump(db, open(''))\n",
"step-3": "<mask token>\nconsole = Console()\ndoc = FourierDocument({'bar': 'eggs', 'xyz': 'spam'})\ndoc2 = FourierDocument({'a': 'foo', 'b': 'bar'})\ndoc3 = FourierDocument({'abc': 'xyz'})\ndoc4 = FourierDocument({(1): 2, (3): 4, (5): 6})\ndoc5 = FourierDocument({'hello': [1, 2, 3, 4, 5, 6, 7, 8, 9]})\nFOURIER_DIR = Path.home() / '.fourier'\nFOURIER_LOGS = FOURIER_DIR / 'logs'\nFOURIER_DBS = FOURIER_DIR / 'databases'\ncoll = FourierCollection('coll', doc, doc2)\ncoll2 = FourierCollection('coll2', doc3, doc4, doc5)\ndb = FourierDB('db')\ndb.add_collection(coll)\ndb.add_collection(coll2)\npickle.dump(db, open(''))\n",
"step-4": "import pickle\nfrom pathlib import Path\nfrom rich.console import Console\nfrom fourierdb import FourierDocument, FourierCollection, FourierDB\nconsole = Console()\ndoc = FourierDocument({'bar': 'eggs', 'xyz': 'spam'})\ndoc2 = FourierDocument({'a': 'foo', 'b': 'bar'})\ndoc3 = FourierDocument({'abc': 'xyz'})\ndoc4 = FourierDocument({(1): 2, (3): 4, (5): 6})\ndoc5 = FourierDocument({'hello': [1, 2, 3, 4, 5, 6, 7, 8, 9]})\nFOURIER_DIR = Path.home() / '.fourier'\nFOURIER_LOGS = FOURIER_DIR / 'logs'\nFOURIER_DBS = FOURIER_DIR / 'databases'\ncoll = FourierCollection('coll', doc, doc2)\ncoll2 = FourierCollection('coll2', doc3, doc4, doc5)\ndb = FourierDB('db')\ndb.add_collection(coll)\ndb.add_collection(coll2)\npickle.dump(db, open(''))\n",
"step-5": "import pickle\nfrom pathlib import Path\nfrom rich.console import Console\nfrom fourierdb import FourierDocument, FourierCollection, FourierDB\n\nconsole = Console()\n\ndoc = FourierDocument({\"bar\": \"eggs\", \"xyz\": \"spam\"})\ndoc2 = FourierDocument({\"a\": \"foo\", \"b\": \"bar\"})\ndoc3 = FourierDocument({\"abc\": \"xyz\"})\ndoc4 = FourierDocument({1: 2, 3: 4, 5: 6})\ndoc5 = FourierDocument({\"hello\": [1, 2, 3, 4, 5, 6, 7, 8, 9]})\nFOURIER_DIR = Path.home() / \".fourier\"\nFOURIER_LOGS = FOURIER_DIR / \"logs\"\nFOURIER_DBS = FOURIER_DIR / \"databases\"\ncoll = FourierCollection(\"coll\", doc, doc2)\ncoll2 = FourierCollection(\"coll2\", doc3, doc4, doc5)\n\ndb = FourierDB(\"db\")\n\ndb.add_collection(coll)\ndb.add_collection(coll2)\n\npickle.dump(db, open(\"\"))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from django.urls import path
from . import views
from .views import index
from .views import Login , logout
from .views import CheckOut
urlpatterns = [
path("",views.index, name="index"),
path('login', Login.as_view(), name='login'),
path('logout', logout , name='logout'),
path("cart/",views.cart , name="cart"),
path("order/",views.order , name="order"),
path('check-out', views.CheckOut , name='checkout'),
path("track/",views.tracker, name="tracker"),
path("search/",views.search, name="search"),
path("checkout/",views.check, name="checkout"),
path("productview/",views.proview, name="see"),
]
|
normal
|
{
"blob_id": "c8aa93a33a6513129b4980180c4eb8d5d5eb3b5b",
"index": 2592,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('', views.index, name='index'), path('login', Login.\n as_view(), name='login'), path('logout', logout, name='logout'), path(\n 'cart/', views.cart, name='cart'), path('order/', views.order, name=\n 'order'), path('check-out', views.CheckOut, name='checkout'), path(\n 'track/', views.tracker, name='tracker'), path('search/', views.search,\n name='search'), path('checkout/', views.check, name='checkout'), path(\n 'productview/', views.proview, name='see')]\n",
"step-3": "from django.contrib import admin\nfrom django.urls import path\nfrom . import views\nfrom .views import index\nfrom .views import Login, logout\nfrom .views import CheckOut\nurlpatterns = [path('', views.index, name='index'), path('login', Login.\n as_view(), name='login'), path('logout', logout, name='logout'), path(\n 'cart/', views.cart, name='cart'), path('order/', views.order, name=\n 'order'), path('check-out', views.CheckOut, name='checkout'), path(\n 'track/', views.tracker, name='tracker'), path('search/', views.search,\n name='search'), path('checkout/', views.check, name='checkout'), path(\n 'productview/', views.proview, name='see')]\n",
"step-4": "\r\n\r\nfrom django.contrib import admin\r\nfrom django.urls import path\r\nfrom . import views\r\nfrom .views import index\r\nfrom .views import Login , logout\r\n\r\nfrom .views import CheckOut\r\n\r\n\r\n\r\nurlpatterns = [\r\n path(\"\",views.index, name=\"index\"),\r\n \r\n path('login', Login.as_view(), name='login'),\r\n path('logout', logout , name='logout'),\r\n path(\"cart/\",views.cart , name=\"cart\"),\r\n path(\"order/\",views.order , name=\"order\"),\r\n \r\n path('check-out', views.CheckOut , name='checkout'),\r\n path(\"track/\",views.tracker, name=\"tracker\"),\r\n path(\"search/\",views.search, name=\"search\"),\r\n path(\"checkout/\",views.check, name=\"checkout\"),\r\n path(\"productview/\",views.proview, name=\"see\"),\r\n]\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
i = 0
num = ''
while len(num) < 1e6:
i += 1
num += str(i)
prod = 1
for i in xrange(0, 7):
prod *= int(num[10 ** i - 1])
print prod
|
normal
|
{
"blob_id": "f19056222be713c1556817d852af14d04483c9a3",
"index": 5931,
"step-1": "i = 0\nnum = ''\n\nwhile len(num) < 1e6:\n i += 1\n num += str(i)\n\nprod = 1\nfor i in xrange(0, 7):\n prod *= int(num[10 ** i - 1])\n\nprint prod\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import json
from typing import TYPE_CHECKING
import pytest
from eth_utils import is_checksum_address
from rotkehlchen.globaldb.handler import GlobalDBHandler
from rotkehlchen.types import ChainID
if TYPE_CHECKING:
from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer
def test_evm_contracts_data(globaldb):
"""Test that all evm contract entries in the packaged global DB have legal data"""
serialized_chain_ids = [x.serialize_for_db() for x in ChainID]
with globaldb.conn.read_ctx() as cursor:
cursor.execute('SELECT address, chain_id, abi, deployed_block FROM contract_data')
for entry in cursor:
assert is_checksum_address(entry[0])
assert isinstance(entry[1], int) and entry[1] in serialized_chain_ids
assert isinstance(entry[2], int)
assert isinstance(entry[3], int) and entry[3] > 0
def test_evm_abi_data(globaldb):
"""Test that the evm abi entries in the packaged globalDB have legal data"""
abis_set = {0}
with globaldb.conn.read_ctx() as cursor:
cursor.execute('SELECT id, value FROM contract_abi')
for entry in cursor:
assert isinstance(entry[0], int)
# read the abi, and make sure it's the most compressed version it can be
# and that it's unique
assert isinstance(entry[1], str)
json_abi = json.loads(entry[1])
serialized_abi = json.dumps(json_abi, separators=(',', ':'))
assert serialized_abi == entry[1]
assert entry[1] not in abis_set
abis_set.add(entry[1])
@pytest.mark.parametrize('sql_vm_instructions_cb', [2])
def test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):
"""
Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.
"""
with GlobalDBHandler().conn.read_ctx() as cursor:
# Delete one contract and its abi
cursor.execute(
'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN '
'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1',
)
(address, abi) = cursor.fetchone() # There has to be at least one entry
cursor.execute('DELETE FROM contract_data WHERE address=? AND chain_id=1', (address,))
cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))
# Now query the contract, let it get to packaged global DB and also see that
# database packaged_db is locked is also not raised
ethereum_inquirer.contracts.contract(address)
with GlobalDBHandler().conn.read_ctx() as cursor:
# Check that the contract and the abi were copied to the global db
cursor.execute(
'SELECT COUNT(*) FROM contract_data INNER JOIN '
'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND '
'contract_data.address=? AND contract_abi.value=?',
(address, abi),
)
assert cursor.fetchone()[0] == 1
|
normal
|
{
"blob_id": "52dc8a4f9165a88dddc1da16e0adb045c4d851ed",
"index": 5017,
"step-1": "<mask token>\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\n<mask token>\n\n\[email protected]('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-2": "<mask token>\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\[email protected]('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-3": "<mask token>\nif TYPE_CHECKING:\n from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\[email protected]('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-4": "import json\nfrom typing import TYPE_CHECKING\nimport pytest\nfrom eth_utils import is_checksum_address\nfrom rotkehlchen.globaldb.handler import GlobalDBHandler\nfrom rotkehlchen.types import ChainID\nif TYPE_CHECKING:\n from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1\n ] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\[email protected]('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1'\n )\n address, abi = cursor.fetchone()\n cursor.execute(\n 'DELETE FROM contract_data WHERE address=? AND chain_id=1', (\n address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n ethereum_inquirer.contracts.contract(address)\n with GlobalDBHandler().conn.read_ctx() as cursor:\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND contract_data.address=? AND contract_abi.value=?'\n , (address, abi))\n assert cursor.fetchone()[0] == 1\n",
"step-5": "import json\nfrom typing import TYPE_CHECKING\n\nimport pytest\nfrom eth_utils import is_checksum_address\n\nfrom rotkehlchen.globaldb.handler import GlobalDBHandler\nfrom rotkehlchen.types import ChainID\n\nif TYPE_CHECKING:\n from rotkehlchen.chain.ethereum.node_inquirer import EthereumInquirer\n\n\ndef test_evm_contracts_data(globaldb):\n \"\"\"Test that all evm contract entries in the packaged global DB have legal data\"\"\"\n serialized_chain_ids = [x.serialize_for_db() for x in ChainID]\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT address, chain_id, abi, deployed_block FROM contract_data')\n for entry in cursor:\n assert is_checksum_address(entry[0])\n assert isinstance(entry[1], int) and entry[1] in serialized_chain_ids\n assert isinstance(entry[2], int)\n assert isinstance(entry[3], int) and entry[3] > 0\n\n\ndef test_evm_abi_data(globaldb):\n \"\"\"Test that the evm abi entries in the packaged globalDB have legal data\"\"\"\n abis_set = {0}\n with globaldb.conn.read_ctx() as cursor:\n cursor.execute('SELECT id, value FROM contract_abi')\n for entry in cursor:\n assert isinstance(entry[0], int)\n # read the abi, and make sure it's the most compressed version it can be\n # and that it's unique\n assert isinstance(entry[1], str)\n json_abi = json.loads(entry[1])\n serialized_abi = json.dumps(json_abi, separators=(',', ':'))\n assert serialized_abi == entry[1]\n assert entry[1] not in abis_set\n abis_set.add(entry[1])\n\n\[email protected]('sql_vm_instructions_cb', [2])\ndef test_fallback_to_packaged_db(ethereum_inquirer: 'EthereumInquirer'):\n \"\"\"\n Test that if a contract / abi is missing in the globaldb, it is searched in the packaged db.\n \"\"\"\n with GlobalDBHandler().conn.read_ctx() as cursor:\n # Delete one contract and its abi\n cursor.execute(\n 'SELECT contract_data.address, contract_abi.value FROM contract_data INNER JOIN '\n 'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 LIMIT 1',\n )\n (address, abi) = cursor.fetchone() # There has to be at least one entry\n cursor.execute('DELETE FROM contract_data WHERE address=? AND chain_id=1', (address,))\n cursor.execute('DELETE FROM contract_abi WHERE value=?', (abi,))\n\n # Now query the contract, let it get to packaged global DB and also see that\n # database packaged_db is locked is also not raised\n ethereum_inquirer.contracts.contract(address)\n\n with GlobalDBHandler().conn.read_ctx() as cursor:\n # Check that the contract and the abi were copied to the global db\n cursor.execute(\n 'SELECT COUNT(*) FROM contract_data INNER JOIN '\n 'contract_abi ON contract_data.abi=contract_abi.id WHERE chain_id=1 AND '\n 'contract_data.address=? AND contract_abi.value=?',\n (address, abi),\n )\n assert cursor.fetchone()[0] == 1\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
normal
|
{
"blob_id": "193d48237b4b1e406eb565943cf01f0423449fca",
"index": 3682,
"step-1": "# [email protected]\n#[email protected]",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
1
]
}
|
[
1
] |
|
print('Hello World!')
print('2nd Test')
d = dict()
d['a'] = dict()
d['a']['b'] = 5
d['a']['c'] = 6
d['x'] = dict()
d['x']['y'] = 10
print(d)
print(d['a'])
import random
random.seed(30)
r = random.randrange(0,5)
print(r)
import numpy as np
np.random.seed
for i in range(20):
newArray = list(set(np.random.random_integers(0, 10, size=(6))))[:3]
print(newArray)
|
normal
|
{
"blob_id": "e4a60008ca7d61d825b59e6202b40c6be02841cd",
"index": 2024,
"step-1": "<mask token>\n",
"step-2": "print('Hello World!')\nprint('2nd Test')\n<mask token>\nprint(d)\nprint(d['a'])\n<mask token>\nrandom.seed(30)\n<mask token>\nprint(r)\n<mask token>\nnp.random.seed\nfor i in range(20):\n newArray = list(set(np.random.random_integers(0, 10, size=6)))[:3]\n print(newArray)\n",
"step-3": "print('Hello World!')\nprint('2nd Test')\nd = dict()\nd['a'] = dict()\nd['a']['b'] = 5\nd['a']['c'] = 6\nd['x'] = dict()\nd['x']['y'] = 10\nprint(d)\nprint(d['a'])\n<mask token>\nrandom.seed(30)\nr = random.randrange(0, 5)\nprint(r)\n<mask token>\nnp.random.seed\nfor i in range(20):\n newArray = list(set(np.random.random_integers(0, 10, size=6)))[:3]\n print(newArray)\n",
"step-4": "print('Hello World!')\nprint('2nd Test')\nd = dict()\nd['a'] = dict()\nd['a']['b'] = 5\nd['a']['c'] = 6\nd['x'] = dict()\nd['x']['y'] = 10\nprint(d)\nprint(d['a'])\nimport random\nrandom.seed(30)\nr = random.randrange(0, 5)\nprint(r)\nimport numpy as np\nnp.random.seed\nfor i in range(20):\n newArray = list(set(np.random.random_integers(0, 10, size=6)))[:3]\n print(newArray)\n",
"step-5": "print('Hello World!')\nprint('2nd Test')\n\n\n\nd = dict()\nd['a'] = dict()\nd['a']['b'] = 5\nd['a']['c'] = 6\nd['x'] = dict()\nd['x']['y'] = 10\nprint(d)\n\nprint(d['a'])\n\n\nimport random\nrandom.seed(30)\n\nr = random.randrange(0,5)\nprint(r)\n\n\nimport numpy as np\nnp.random.seed\nfor i in range(20):\n newArray = list(set(np.random.random_integers(0, 10, size=(6))))[:3]\n print(newArray)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.db import models
# Create your models here.
from user.models import User
class Post(models.Model):
class Meta:
db_table = 'bl_post'
id = models.AutoField(primary_key=True)
title = models.CharField(max_length=200, null=False)
pubdate = models.DateTimeField(null=False)
# 作者
# author_id = models.IntegerField(null=False)
author = models.ForeignKey(User)
# 内容
def __repr__(self):
return "<Post {} {} {} {} [{}] >".format(self.id, self.title,self.author,self.content,self.author.id)
__str__ = __repr__
class Content(models.Model):
class Meta:
db_table = 'bl_content'
# id 可以不写,主键django帮你创建一个pk
post = models.OneToOneField(Post, to_field='id') # post_id
content = models.TextField(null=False)
def __repr__(self):
return "<Content {} {} {} >".format(self.id,self.post.id, self.content[:40])
__str__ = __repr__
|
normal
|
{
"blob_id": "34a523b31e5567d2a8aec95c5820792d1ae80892",
"index": 5335,
"step-1": "<mask token>\n\n\nclass Post(models.Model):\n\n\n class Meta:\n db_table = 'bl_post'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Content(models.Model):\n\n\n class Meta:\n db_table = 'bl_content'\n post = models.OneToOneField(Post, to_field='id')\n content = models.TextField(null=False)\n\n def __repr__(self):\n return '<Content {} {} {} >'.format(self.id, self.post.id, self.\n content[:40])\n __str__ = __repr__\n",
"step-2": "<mask token>\n\n\nclass Post(models.Model):\n\n\n class Meta:\n db_table = 'bl_post'\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __repr__(self):\n return '<Post {} {} {} {} [{}] >'.format(self.id, self.title, self.\n author, self.content, self.author.id)\n <mask token>\n\n\nclass Content(models.Model):\n\n\n class Meta:\n db_table = 'bl_content'\n post = models.OneToOneField(Post, to_field='id')\n content = models.TextField(null=False)\n\n def __repr__(self):\n return '<Content {} {} {} >'.format(self.id, self.post.id, self.\n content[:40])\n __str__ = __repr__\n",
"step-3": "<mask token>\n\n\nclass Post(models.Model):\n\n\n class Meta:\n db_table = 'bl_post'\n id = models.AutoField(primary_key=True)\n title = models.CharField(max_length=200, null=False)\n pubdate = models.DateTimeField(null=False)\n author = models.ForeignKey(User)\n\n def __repr__(self):\n return '<Post {} {} {} {} [{}] >'.format(self.id, self.title, self.\n author, self.content, self.author.id)\n __str__ = __repr__\n\n\nclass Content(models.Model):\n\n\n class Meta:\n db_table = 'bl_content'\n post = models.OneToOneField(Post, to_field='id')\n content = models.TextField(null=False)\n\n def __repr__(self):\n return '<Content {} {} {} >'.format(self.id, self.post.id, self.\n content[:40])\n __str__ = __repr__\n",
"step-4": "from django.db import models\nfrom user.models import User\n\n\nclass Post(models.Model):\n\n\n class Meta:\n db_table = 'bl_post'\n id = models.AutoField(primary_key=True)\n title = models.CharField(max_length=200, null=False)\n pubdate = models.DateTimeField(null=False)\n author = models.ForeignKey(User)\n\n def __repr__(self):\n return '<Post {} {} {} {} [{}] >'.format(self.id, self.title, self.\n author, self.content, self.author.id)\n __str__ = __repr__\n\n\nclass Content(models.Model):\n\n\n class Meta:\n db_table = 'bl_content'\n post = models.OneToOneField(Post, to_field='id')\n content = models.TextField(null=False)\n\n def __repr__(self):\n return '<Content {} {} {} >'.format(self.id, self.post.id, self.\n content[:40])\n __str__ = __repr__\n",
"step-5": "from django.db import models\n\n# Create your models here.\nfrom user.models import User\n\n\nclass Post(models.Model):\n class Meta:\n db_table = 'bl_post'\n\n id = models.AutoField(primary_key=True)\n title = models.CharField(max_length=200, null=False)\n pubdate = models.DateTimeField(null=False)\n # 作者\n # author_id = models.IntegerField(null=False)\n author = models.ForeignKey(User)\n\n # 内容\n\n def __repr__(self):\n return \"<Post {} {} {} {} [{}] >\".format(self.id, self.title,self.author,self.content,self.author.id)\n\n __str__ = __repr__\n\n\nclass Content(models.Model):\n class Meta:\n db_table = 'bl_content'\n\n # id 可以不写,主键django帮你创建一个pk\n post = models.OneToOneField(Post, to_field='id') # post_id\n content = models.TextField(null=False)\n\n def __repr__(self):\n return \"<Content {} {} {} >\".format(self.id,self.post.id, self.content[:40])\n\n __str__ = __repr__\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import os, sys, shutil
import fnmatch, logging, zipfile
logging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d,%H:%M:%S', level=logging.DEBUG)
def scan_files(dir, pattern):
fileList = []
for root, subFolders, files in os.walk(dir):
for file in files:
if fnmatch.fnmatch(file, pattern):
fileList.append(os.path.join(root,file))
return fileList
if (not os.path.exists('dist')):
os.makedirs('dist')
currentDir = os.getcwd() # save current dir
os.chdir('..\\..') # go to root of simulation
distPath = os.path.join(currentDir, 'bundle') # where to put files
scanData = [
['WSN\\simulations', '*.ned', '', True],
['WSN\\simulations', '*.xml', '', True],
['WSN\\simulations', '*.exe', '', True],
['WSN\\simulations', '*.ini', '', True],
['WSN\\src', '*.ned', '', True],
['WSN\\src', '*.dll', '', True],
['MiXiM\\src', '*.ned', '', True],
['MiXiM\\src', '*.dll', '', True],
['MiXiM\\src\\base', '*.dll', 'lib', False],
['MiXiM\\src\\modules', '*.dll', 'lib', False],
[os.path.join(currentDir, 'lib'), '*.dll', 'lib', False],
]
# remove old bundle
if (os.path.exists(distPath)):
shutil.rmtree(distPath)
# copy neccessary files
for data in scanData:
for file in scan_files(data[0], data[1]):
if (data[3]):
newSubPath = file
else:
newSubPath = os.path.basename(file)
newPath = os.path.relpath(os.path.join(distPath, data[2], newSubPath))
newDir = os.path.dirname(newPath)
if (not os.path.exists(newDir)):
os.makedirs(newDir)
logging.info('Copying %s to %s' % (file, newPath))
shutil.copyfile(file, newPath)
logging.info("Creating archive")
bundleZip = zipfile.ZipFile(os.path.join(currentDir, 'dist', "bundle.zip"), 'w', zipfile.ZIP_DEFLATED)
for root, subFolders, files in os.walk(distPath):
for file in files:
# make path relative to distPath
newPath = os.path.join(root, file).replace(distPath, '')
# add files to zip
bundleZip.write(os.path.join(root, file), newPath)
bundleZip.close()
logging.info("Done")
os.chdir(currentDir) # go back
|
normal
|
{
"blob_id": "187c2a56ba9360b89c8ded09861091e2deedf32e",
"index": 7783,
"step-1": "<mask token>\n\n\ndef scan_files(dir, pattern):\n fileList = []\n for root, subFolders, files in os.walk(dir):\n for file in files:\n if fnmatch.fnmatch(file, pattern):\n fileList.append(os.path.join(root, file))\n return fileList\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S', level=logging.DEBUG)\n\n\ndef scan_files(dir, pattern):\n fileList = []\n for root, subFolders, files in os.walk(dir):\n for file in files:\n if fnmatch.fnmatch(file, pattern):\n fileList.append(os.path.join(root, file))\n return fileList\n\n\nif not os.path.exists('dist'):\n os.makedirs('dist')\n<mask token>\nos.chdir('..\\\\..')\n<mask token>\nif os.path.exists(distPath):\n shutil.rmtree(distPath)\nfor data in scanData:\n for file in scan_files(data[0], data[1]):\n if data[3]:\n newSubPath = file\n else:\n newSubPath = os.path.basename(file)\n newPath = os.path.relpath(os.path.join(distPath, data[2], newSubPath))\n newDir = os.path.dirname(newPath)\n if not os.path.exists(newDir):\n os.makedirs(newDir)\n logging.info('Copying %s to %s' % (file, newPath))\n shutil.copyfile(file, newPath)\nlogging.info('Creating archive')\n<mask token>\nfor root, subFolders, files in os.walk(distPath):\n for file in files:\n newPath = os.path.join(root, file).replace(distPath, '')\n bundleZip.write(os.path.join(root, file), newPath)\nbundleZip.close()\nlogging.info('Done')\nos.chdir(currentDir)\n",
"step-3": "<mask token>\nlogging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S', level=logging.DEBUG)\n\n\ndef scan_files(dir, pattern):\n fileList = []\n for root, subFolders, files in os.walk(dir):\n for file in files:\n if fnmatch.fnmatch(file, pattern):\n fileList.append(os.path.join(root, file))\n return fileList\n\n\nif not os.path.exists('dist'):\n os.makedirs('dist')\ncurrentDir = os.getcwd()\nos.chdir('..\\\\..')\ndistPath = os.path.join(currentDir, 'bundle')\nscanData = [['WSN\\\\simulations', '*.ned', '', True], ['WSN\\\\simulations',\n '*.xml', '', True], ['WSN\\\\simulations', '*.exe', '', True], [\n 'WSN\\\\simulations', '*.ini', '', True], ['WSN\\\\src', '*.ned', '', True],\n ['WSN\\\\src', '*.dll', '', True], ['MiXiM\\\\src', '*.ned', '', True], [\n 'MiXiM\\\\src', '*.dll', '', True], ['MiXiM\\\\src\\\\base', '*.dll', 'lib', \n False], ['MiXiM\\\\src\\\\modules', '*.dll', 'lib', False], [os.path.join(\n currentDir, 'lib'), '*.dll', 'lib', False]]\nif os.path.exists(distPath):\n shutil.rmtree(distPath)\nfor data in scanData:\n for file in scan_files(data[0], data[1]):\n if data[3]:\n newSubPath = file\n else:\n newSubPath = os.path.basename(file)\n newPath = os.path.relpath(os.path.join(distPath, data[2], newSubPath))\n newDir = os.path.dirname(newPath)\n if not os.path.exists(newDir):\n os.makedirs(newDir)\n logging.info('Copying %s to %s' % (file, newPath))\n shutil.copyfile(file, newPath)\nlogging.info('Creating archive')\nbundleZip = zipfile.ZipFile(os.path.join(currentDir, 'dist', 'bundle.zip'),\n 'w', zipfile.ZIP_DEFLATED)\nfor root, subFolders, files in os.walk(distPath):\n for file in files:\n newPath = os.path.join(root, file).replace(distPath, '')\n bundleZip.write(os.path.join(root, file), newPath)\nbundleZip.close()\nlogging.info('Done')\nos.chdir(currentDir)\n",
"step-4": "import os, sys, shutil\nimport fnmatch, logging, zipfile\nlogging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S', level=logging.DEBUG)\n\n\ndef scan_files(dir, pattern):\n fileList = []\n for root, subFolders, files in os.walk(dir):\n for file in files:\n if fnmatch.fnmatch(file, pattern):\n fileList.append(os.path.join(root, file))\n return fileList\n\n\nif not os.path.exists('dist'):\n os.makedirs('dist')\ncurrentDir = os.getcwd()\nos.chdir('..\\\\..')\ndistPath = os.path.join(currentDir, 'bundle')\nscanData = [['WSN\\\\simulations', '*.ned', '', True], ['WSN\\\\simulations',\n '*.xml', '', True], ['WSN\\\\simulations', '*.exe', '', True], [\n 'WSN\\\\simulations', '*.ini', '', True], ['WSN\\\\src', '*.ned', '', True],\n ['WSN\\\\src', '*.dll', '', True], ['MiXiM\\\\src', '*.ned', '', True], [\n 'MiXiM\\\\src', '*.dll', '', True], ['MiXiM\\\\src\\\\base', '*.dll', 'lib', \n False], ['MiXiM\\\\src\\\\modules', '*.dll', 'lib', False], [os.path.join(\n currentDir, 'lib'), '*.dll', 'lib', False]]\nif os.path.exists(distPath):\n shutil.rmtree(distPath)\nfor data in scanData:\n for file in scan_files(data[0], data[1]):\n if data[3]:\n newSubPath = file\n else:\n newSubPath = os.path.basename(file)\n newPath = os.path.relpath(os.path.join(distPath, data[2], newSubPath))\n newDir = os.path.dirname(newPath)\n if not os.path.exists(newDir):\n os.makedirs(newDir)\n logging.info('Copying %s to %s' % (file, newPath))\n shutil.copyfile(file, newPath)\nlogging.info('Creating archive')\nbundleZip = zipfile.ZipFile(os.path.join(currentDir, 'dist', 'bundle.zip'),\n 'w', zipfile.ZIP_DEFLATED)\nfor root, subFolders, files in os.walk(distPath):\n for file in files:\n newPath = os.path.join(root, file).replace(distPath, '')\n bundleZip.write(os.path.join(root, file), newPath)\nbundleZip.close()\nlogging.info('Done')\nos.chdir(currentDir)\n",
"step-5": "import os, sys, shutil \r\nimport fnmatch, logging, zipfile\r\n\r\nlogging.basicConfig(format='%(asctime)s [%(levelname)s] %(message)s', datefmt='%Y-%m-%d,%H:%M:%S', level=logging.DEBUG)\r\n\r\ndef scan_files(dir, pattern):\r\n fileList = []\r\n for root, subFolders, files in os.walk(dir):\r\n for file in files:\r\n if fnmatch.fnmatch(file, pattern):\r\n fileList.append(os.path.join(root,file))\r\n return fileList\r\n\r\nif (not os.path.exists('dist')):\r\n os.makedirs('dist')\r\n \r\ncurrentDir = os.getcwd() # save current dir\r\nos.chdir('..\\\\..') # go to root of simulation\r\n\r\ndistPath = os.path.join(currentDir, 'bundle') # where to put files\r\nscanData = [\r\n ['WSN\\\\simulations', '*.ned', '', True],\r\n ['WSN\\\\simulations', '*.xml', '', True],\r\n ['WSN\\\\simulations', '*.exe', '', True],\r\n ['WSN\\\\simulations', '*.ini', '', True],\r\n ['WSN\\\\src', '*.ned', '', True],\r\n ['WSN\\\\src', '*.dll', '', True],\r\n ['MiXiM\\\\src', '*.ned', '', True],\r\n ['MiXiM\\\\src', '*.dll', '', True],\r\n ['MiXiM\\\\src\\\\base', '*.dll', 'lib', False],\r\n ['MiXiM\\\\src\\\\modules', '*.dll', 'lib', False],\r\n [os.path.join(currentDir, 'lib'), '*.dll', 'lib', False],\r\n]\r\n\r\n# remove old bundle\r\nif (os.path.exists(distPath)):\r\n shutil.rmtree(distPath)\r\n\r\n# copy neccessary files\r\nfor data in scanData:\r\n \r\n for file in scan_files(data[0], data[1]):\r\n \r\n if (data[3]):\r\n newSubPath = file \r\n else:\r\n newSubPath = os.path.basename(file)\r\n \r\n newPath = os.path.relpath(os.path.join(distPath, data[2], newSubPath))\r\n newDir = os.path.dirname(newPath)\r\n \r\n if (not os.path.exists(newDir)):\r\n os.makedirs(newDir)\r\n \r\n logging.info('Copying %s to %s' % (file, newPath))\r\n shutil.copyfile(file, newPath)\r\n\r\nlogging.info(\"Creating archive\")\r\nbundleZip = zipfile.ZipFile(os.path.join(currentDir, 'dist', \"bundle.zip\"), 'w', zipfile.ZIP_DEFLATED)\r\nfor root, subFolders, files in os.walk(distPath):\r\n for file in files:\r\n # make path relative to distPath\r\n newPath = os.path.join(root, file).replace(distPath, '')\r\n # add files to zip\r\n bundleZip.write(os.path.join(root, file), newPath)\r\nbundleZip.close()\r\nlogging.info(\"Done\")\r\n\r\nos.chdir(currentDir) # go back",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#Copyright 2008, Meka Robotics
#All rights reserved.
#http://mekabot.com
#Redistribution and use in source and binary forms, with or without
#modification, are permitted.
#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
#POSSIBILITY OF SUCH DAMAGE.
import time
#import Numeric as nu
import math
import os
import sys
import yaml
import m3.unit_conversion as m3u
from m3qa.calibrate import *
from m3qa.calibrate_sensors import *
from m3qa.calibrate_actuator_ec_r1 import *
import m3.actuator_ec_pb2 as aec
# ####################################################################################################
config_default_g1_j0={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
config_default_g1_j1={
'calib':{
'motor':{
'name': 'Maxon RE13 2.5W 24V',
'winding_resistance': 53.2,#Ohm
'winding_inductance':1.79,#mH
'torque_constant':19.7, #mNm/A
'thermal_resistance_housing_ambient': 33.0,#K/W
'thermal_resistance_rotor_housing': 7.0,#K/W
'max_winding_temp': 85, #C
'gear_ratio': 275.0,
'thermal_time_constant_winding': 4.85, #S
'thermal_time_constant_motor':346, #S
'temp_sensor_type':'housing'
},
'theta':{
'type': 'ma3_12bit',
'name': 'US Digital MA3',
'cb_scale': 1.0,
'cb_bias': 0.0},
'amp_temp':{
'type': 'adc_linear_3V3', #3V3 supply, no divider
'name': 'Microchip TC1047',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0,
},
'motor_temp':{
'type': 'adc_linear_3V3', #5V supply, no divider=3V3
'name': 'Analog TMP36',
'cb_mV_at_25C': 750.0,
'cb_mV_per_C': 10.0,
'cb_scale': 1.0,
'cb_bias': 0.0},
'torque':{
'type': 'adc_poly',
'name': 'Allegro A1321',
'cb_inv_torque': [1,0],
'cb_torque': [1,0],
'cb_scale': 1.0,
'cb_bias': 0.0},
'current':{
'type': 'none',
'cb_scale': 0.0,
'cb_bias': 0.0},
},
'param':{
'max_amp_temp': 100.0,
'max_current': 800,
'max_motor_temp': 75.0,
'max_tq': 150.0,
'min_tq': -30.0,
'thetadot_deadband': 1.0
},
'param_internal':
{
'calib_tq_degree':1,
'pwm_theta':[-800,800],
'pwm_torque':[-1000,-1000],
'joint_limits':[0,315.0]
}
}
class M3Calibrate_Gripper_G1R1(M3CalibrateActuatorEcR1):
def __init__(self):
M3CalibrateActuatorEcR1.__init__(self)
self.joint_names=['Left Digit J0',
'Right Digit J1']
self.config_default=[
config_default_g1_j0,
config_default_g1_j1]
def start(self,ctype):
if not M3CalibrateActuatorEcR1.start(self,ctype):
return False
self.jid=int(self.comp_ec.name[self.comp_ec.name.find('_j')+2:])
self.calib_default=self.config_default[self.jid]['calib']
self.param_default=self.config_default[self.jid]['param']
self.param_internal=self.config_default[self.jid]['param_internal']
print 'Calibrating joint',self.joint_names[self.jid]
return True
def do_task(self,ct):
if ct=='ch':
self.reset_sensor('torque')
self.calibrate_torque()
self.write_config()
return True
if ct=='tt':
self.reset_sensor('theta')
self.calibrate_theta()
self.write_config()
return True
if M3CalibrateActuatorEc.do_task(self,ct):
return True
return False
def print_tasks(self):
M3CalibrateActuatorEcR1.print_tasks(self)
print 'ch: calibrate torque'
print 'tt: calibrate theta'
def display_sensors(self):
M3CalibrateActuatorEcR1.display_sensors(self)
q_on=self.comp_ec.status.qei_on
q_p=self.comp_ec.status.qei_period
q_r=self.comp_ec.status.qei_rollover
c=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on,q_p,q_r)
pos=1000.0*math.pi*2*self.comp_j.config['calib']['cb_drive_radius_m']*c/360.0
print 'Pos: (mm) : '+'%3.3f'%pos+' Qei On '+'%d'%q_on+' Qei Period '+'%d'%q_p+' Qei Rollover '+'%d'%q_r
raw=self.comp_ec.status.adc_torque
c=self.torque.raw_2_mNm(self.comp_rt.config['calib']['torque'],raw)
mN=c/self.comp_j.config['calib']['cb_drive_radius_m']
print 'Force: (g) : '+'%3.2f'%m3u.mN2g(mN)+' (mN): '+'%3.2f'%mN+' (ADC) '+'%d'%raw
def calibrate_torque(self):
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Place digit in zero load condition'
print 'Hit enter when ready'
raw_input()
self.step()
raw_a=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_a=0
print 'Hang 1Kg weight from gripper near slider'
print 'Hit enter to move joint in first direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_b=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
print 'Was load in the opening direction [y]?'
if m3t.get_yes_no('y'):
load_b=m3u.g2mN(1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
else:
load_b=m3u.g2mN(-1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']
print 'Hit enter to move joint in second direction.'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_torque'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when ready to sample'
raw_input()
raw_c=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])
load_c=-1*load_b
log_adc_torque=[raw_a,raw_b,raw_c]
log_load_mNm=[load_a,load_b,load_c]
poly,inv_poly=self.get_polyfit_to_data(x=log_adc_torque,y=log_load_mNm,n=1)
self.write_raw_calibration({'log_adc_torque':log_adc_torque,'log_load_mNm':log_load_mNm,
'cb_torque':poly,'cb_inv_torque':inv_poly})
self.comp_rt.config['calib']['torque']['cb_torque']=poly
self.comp_rt.config['calib']['torque']['cb_inv_torque']=inv_poly
print 'Poly',poly
s=m3tc.PolyEval(poly,[raw_a,raw_b,raw_c])
m3t.mplot2(range(len(log_adc_torque)),log_load_mNm,s,xlabel='Samples',ylabel='Torque (mNm)',
y1name='load',y2name='raw')
def calibrate_theta(self):
pconfig=self.comp_ec.param.config #disable qei limits
self.comp_ec.param.config=0
self.proxy.publish_command(self.comp_rt)
self.proxy.publish_param(self.comp_rt)
self.proxy.make_operational(self.name_rt)
self.step()
print 'Make sure other digit is all the way open'
print 'Moving joint to first limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][0],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][0]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_a=self.comp_ec.status.qei_on
q_p_a=self.comp_ec.status.qei_period
q_r_a=self.comp_ec.status.qei_rollover
print 'RawA',q_on_a
print 'Moving joint to second limit. Hit any key when ready'
raw_input()
self.comp_rt.set_mode_pwm()
print 'Desired pwm? [',self.param_internal['pwm_theta'][1],']?'
p=int(m3t.get_float(self.param_internal['pwm_theta'][1]))
self.comp_rt.set_pwm(p)
self.step()
print 'Hit any key when motion done'
raw_input()
self.step()
q_on_b=self.comp_ec.status.qei_on
q_p_b=self.comp_ec.status.qei_period
q_r_b=self.comp_ec.status.qei_rollover
print 'Rawb',q_on_b
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'Did this last motion open the gripper [y]?' #At zero position
if m3t.get_yes_no('y'):
theta_b=0
theta_a=abs(theta_bs-theta_as)
else:
theta_a=0
theta_b=abs(theta_bs-theta_as)
self.comp_rt.set_mode_off()
self.comp_ec.param.config=pconfig #enable qei limits
self.step()
self.proxy.make_safe_operational(self.name_rt)
self.step()
print 'Raw',[theta_as,theta_bs]
print 'True',[theta_a,theta_b]
poly,inv_poly=self.get_polyfit_to_data([theta_as,theta_bs],[theta_a,theta_b],n=1)
self.comp_rt.config['calib']['theta']['cb_scale']=poly[0]
self.comp_rt.config['calib']['theta']['cb_bias']=poly[1]
theta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)
theta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)
print 'New calibrated range',theta_as,theta_bs
max_q=max(theta_as,theta_bs)
min_q=min(theta_as,theta_bs)
if self.comp_j is not None:
print 'Setting joint limits to',min_q,max_q
print 'Expected joint limits of',self.param_internal['joint_limits']
self.comp_j.param.max_q=float(max_q)
self.comp_j.param.min_q=float(min_q)
else:
print 'Joint component missing. Unable to set joint limits to',min_q,max_q
#Assume 0-Ndeg, where N is defined by the encoder soft limits
self.comp_ec.config['param']['qei_min']=min(q_on_a,q_on_b)+100
self.comp_ec.config['param']['qei_max']=max(q_on_a,q_on_b)-100
self.comp_ec.param.qei_min=min(q_on_a,q_on_b)+100
self.comp_ec.param.qei_max=max(q_on_a,q_on_b)-100
print 'Setting DSP qei min/max to',self.comp_ec.config['param']['qei_min'],self.comp_ec.config['param']['qei_max']
|
normal
|
{
"blob_id": "b227f222569761493f50f9dfee32f21e0e0a5cd6",
"index": 4400,
"step-1": "#Copyright 2008, Meka Robotics\n#All rights reserved.\n#http://mekabot.com\n\n#Redistribution and use in source and binary forms, with or without\n#modification, are permitted. \n\n\n#THIS SOFTWARE IS PROVIDED BY THE Copyright HOLDERS AND CONTRIBUTORS\n#\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS\n#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE\n#Copyright OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,\n#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES INCLUDING,\n#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\n#LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN\n#ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n#POSSIBILITY OF SUCH DAMAGE.\n\nimport time\n \n#import Numeric as nu\nimport math\nimport os \nimport sys\nimport yaml\nimport m3.unit_conversion as m3u\nfrom m3qa.calibrate import *\nfrom m3qa.calibrate_sensors import *\nfrom m3qa.calibrate_actuator_ec_r1 import *\nimport m3.actuator_ec_pb2 as aec\n\n# ####################################################################################################\n\n\nconfig_default_g1_j0={\n\t'calib':{\n\t\t'motor':{\n\t\t\t'name': 'Maxon RE13 2.5W 24V',\n\t\t\t'winding_resistance': 53.2,#Ohm\n\t\t\t'winding_inductance':1.79,#mH\n\t\t\t'torque_constant':19.7, #mNm/A\n\t\t\t'thermal_resistance_housing_ambient': 33.0,#K/W\n\t\t\t'thermal_resistance_rotor_housing': 7.0,#K/W\n\t\t\t'max_winding_temp': 85, #C\n\t\t\t'gear_ratio': 275.0,\n\t\t\t'thermal_time_constant_winding': 4.85, #S\n\t\t\t'thermal_time_constant_motor':346, #S\n\t\t\t'temp_sensor_type':'housing'\n\t\t\t},\n\t 'theta':{\n\t\t 'type': 'ma3_12bit',\n\t\t 'name': 'US Digital MA3',\n\t\t 'cb_scale': 1.0,\n\t\t 'cb_bias': 0.0},\n\t 'amp_temp':{\n\t\t 'type': 'adc_linear_3V3', #3V3 supply, no divider\n\t\t 'name': 'Microchip TC1047',\n\t\t 'cb_mV_at_25C': 750.0,\n\t\t 'cb_mV_per_C': 10.0,\n\t\t 'cb_scale': 1.0,\n\t\t 'cb_bias': 0.0,\n\t\t },\n\t 'motor_temp':{\n\t\t 'type': 'adc_linear_3V3', #5V supply, no divider\n\t\t 'name': 'Analog TMP36',\n\t\t 'cb_mV_at_25C': 750.0,\n\t\t 'cb_mV_per_C': 10.0,\n\t\t 'cb_scale': 1.0,\n\t\t 'cb_bias': 0.0},\n\t 'torque':{\n\t\t 'type': 'adc_poly',\n\t\t 'name': 'Allegro A1321',\n\t\t 'cb_inv_torque': [1,0],\n\t\t 'cb_torque': [1,0],\n\t\t 'cb_scale': 1.0,\n\t\t 'cb_bias': 0.0},\n\t 'current':{\n\t\t 'type': 'none',\n\t\t 'cb_scale': 0.0,\n\t\t 'cb_bias': 0.0},\n\t },\n\t'param':{\n\t\t'max_amp_temp': 100.0,\n\t\t'max_current': 800,\n\t\t'max_motor_temp': 75.0,\n\t\t'max_tq': 150.0,\n\t\t'min_tq': -30.0,\n\t\t'thetadot_deadband': 1.0\n\t\t},\n\t'param_internal':\n\t{\n\t\t'calib_tq_degree':1,\n\t\t'pwm_theta':[-800,800],\n\t\t'pwm_torque':[-1000,-1000],\n\t\t'joint_limits':[0,315.0]\n\t}\n}\n\nconfig_default_g1_j1={\n\t'calib':{\n\t\t'motor':{\n\t\t\t'name': 'Maxon RE13 2.5W 24V',\n\t\t\t'winding_resistance': 53.2,#Ohm\n\t\t\t'winding_inductance':1.79,#mH\n\t\t\t'torque_constant':19.7, #mNm/A\n\t\t\t'thermal_resistance_housing_ambient': 33.0,#K/W\n\t\t\t'thermal_resistance_rotor_housing': 7.0,#K/W\n\t\t\t'max_winding_temp': 85, #C\n\t\t\t'gear_ratio': 275.0,\n\t\t\t'thermal_time_constant_winding': 4.85, #S\n\t\t\t'thermal_time_constant_motor':346, #S\n\t\t\t'temp_sensor_type':'housing'\n\t\t\t},\n\t 'theta':{\n\t\t 'type': 'ma3_12bit',\n\t\t 'name': 'US Digital MA3',\n\t\t 'cb_scale': 1.0,\n\t\t 'cb_bias': 0.0},\n\t 'amp_temp':{\n\t\t 'type': 'adc_linear_3V3', #3V3 supply, no divider\n\t\t 'name': 'Microchip TC1047',\n\t\t 'cb_mV_at_25C': 750.0,\n\t\t 'cb_mV_per_C': 10.0,\n\t\t 'cb_scale': 1.0,\n\t\t 'cb_bias': 0.0,\n\t\t },\n\t 'motor_temp':{\n\t\t 'type': 'adc_linear_3V3', #5V supply, no divider=3V3 \n\t\t 'name': 'Analog TMP36',\n\t\t 'cb_mV_at_25C': 750.0,\n\t\t 'cb_mV_per_C': 10.0,\n\t\t 'cb_scale': 1.0,\n\t\t 'cb_bias': 0.0},\n\t 'torque':{\n\t\t 'type': 'adc_poly',\n\t\t 'name': 'Allegro A1321',\n\t\t 'cb_inv_torque': [1,0],\n\t\t 'cb_torque': [1,0],\n\t\t 'cb_scale': 1.0,\n\t\t 'cb_bias': 0.0},\n\t 'current':{\n\t\t 'type': 'none',\n\t\t 'cb_scale': 0.0,\n\t\t 'cb_bias': 0.0},\n\t },\n\t'param':{\n\t\t'max_amp_temp': 100.0,\n\t\t'max_current': 800,\n\t\t'max_motor_temp': 75.0,\n\t\t'max_tq': 150.0,\n\t\t'min_tq': -30.0,\n\t\t'thetadot_deadband': 1.0\n\t\t},\n\t'param_internal':\n\t{\n\t\t'calib_tq_degree':1,\n\t\t'pwm_theta':[-800,800],\n\t\t'pwm_torque':[-1000,-1000],\n\t\t'joint_limits':[0,315.0]\n\t}\n}\n\n\n\t\t\nclass M3Calibrate_Gripper_G1R1(M3CalibrateActuatorEcR1):\n\tdef __init__(self):\n\t\tM3CalibrateActuatorEcR1.__init__(self)\n\t\tself.joint_names=['Left Digit J0',\n\t\t\t\t 'Right Digit J1']\n\t\tself.config_default=[\n\t\t\tconfig_default_g1_j0,\n\t\t\tconfig_default_g1_j1]\n\tdef start(self,ctype):\n\t\tif not M3CalibrateActuatorEcR1.start(self,ctype):\n\t\t\treturn False\n\t\tself.jid=int(self.comp_ec.name[self.comp_ec.name.find('_j')+2:])\n\t\tself.calib_default=self.config_default[self.jid]['calib']\n\t\tself.param_default=self.config_default[self.jid]['param']\n\t\tself.param_internal=self.config_default[self.jid]['param_internal']\n\t\tprint 'Calibrating joint',self.joint_names[self.jid]\n\t\treturn True\n\t\n\tdef do_task(self,ct):\n\t\tif ct=='ch':\n\t\t\tself.reset_sensor('torque')\n\t\t\tself.calibrate_torque()\n\t\t\tself.write_config()\n\t\t\treturn True\n\t\tif ct=='tt':\n\t\t\tself.reset_sensor('theta')\n\t\t\tself.calibrate_theta()\n\t\t\tself.write_config()\n\t\t\treturn True\n\t\tif M3CalibrateActuatorEc.do_task(self,ct):\n\t\t\treturn True\n\t\treturn False\n\t\n\tdef print_tasks(self):\n\t\tM3CalibrateActuatorEcR1.print_tasks(self)\n\t\tprint 'ch: calibrate torque' \n\t\tprint 'tt: calibrate theta' \n\t\t\n\tdef display_sensors(self):\n\t\tM3CalibrateActuatorEcR1.display_sensors(self)\n\t\tq_on=self.comp_ec.status.qei_on\n\t\tq_p=self.comp_ec.status.qei_period\n\t\tq_r=self.comp_ec.status.qei_rollover\n\t\tc=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on,q_p,q_r)\n\t\tpos=1000.0*math.pi*2*self.comp_j.config['calib']['cb_drive_radius_m']*c/360.0\n\t\tprint 'Pos: (mm) : '+'%3.3f'%pos+' Qei On '+'%d'%q_on+' Qei Period '+'%d'%q_p+' Qei Rollover '+'%d'%q_r\n\t\traw=self.comp_ec.status.adc_torque\n\t\tc=self.torque.raw_2_mNm(self.comp_rt.config['calib']['torque'],raw)\n\t\tmN=c/self.comp_j.config['calib']['cb_drive_radius_m']\n\t\tprint 'Force: (g) : '+'%3.2f'%m3u.mN2g(mN)+' (mN): '+'%3.2f'%mN+' (ADC) '+'%d'%raw\n\t\t\n\tdef calibrate_torque(self):\n\t\tself.proxy.publish_command(self.comp_rt)\n\t\tself.proxy.publish_param(self.comp_rt)\n\t\tself.proxy.make_operational(self.name_rt)\n\t\tself.step()\n\t\tprint 'Make sure other digit is all the way open'\n\t\tprint 'Place digit in zero load condition'\n\t\tprint 'Hit enter when ready'\n\t\traw_input()\n\t\tself.step()\n\t\traw_a=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])\n\t\tload_a=0\n\t\t\n\t\tprint 'Hang 1Kg weight from gripper near slider'\n\t\tprint 'Hit enter to move joint in first direction.'\n\t\traw_input()\n\t\tself.comp_rt.set_mode_pwm()\n\t\tprint 'Desired pwm? [',self.param_internal['pwm_torque'][0],']?'\n\t\tp=int(m3t.get_float(self.param_internal['pwm_theta'][0]))\n\t\tself.comp_rt.set_pwm(p)\n\t\tself.step()\n\t\tprint 'Hit any key when ready to sample'\n\t\traw_input()\n\t\traw_b=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])\n\t\tprint 'Was load in the opening direction [y]?'\n\t\tif m3t.get_yes_no('y'):\n\t\t\tload_b=m3u.g2mN(1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']\n\t\telse:\n\t\t\tload_b=m3u.g2mN(-1000.0)*self.comp_j.config['calib']['cb_drive_radius_m']\n\t\t\t\n\t\tprint 'Hit enter to move joint in second direction.'\n\t\traw_input()\n\t\tself.comp_rt.set_mode_pwm()\n\t\tprint 'Desired pwm? [',self.param_internal['pwm_torque'][1],']?'\n\t\tp=int(m3t.get_float(self.param_internal['pwm_theta'][1]))\n\t\tself.comp_rt.set_pwm(p)\n\t\tself.step()\n\t\tprint 'Hit any key when ready to sample'\n\t\traw_input()\n\t\traw_c=int(self.get_sensor_list_avg(['adc_torque'],1.0)['adc_torque'])\n\t\tload_c=-1*load_b\n\n\t\tlog_adc_torque=[raw_a,raw_b,raw_c]\n\t\tlog_load_mNm=[load_a,load_b,load_c]\n\t\tpoly,inv_poly=self.get_polyfit_to_data(x=log_adc_torque,y=log_load_mNm,n=1)\n\t\tself.write_raw_calibration({'log_adc_torque':log_adc_torque,'log_load_mNm':log_load_mNm,\n\t\t\t\t\t 'cb_torque':poly,'cb_inv_torque':inv_poly})\n\t\tself.comp_rt.config['calib']['torque']['cb_torque']=poly\n\t\tself.comp_rt.config['calib']['torque']['cb_inv_torque']=inv_poly\n\t\tprint 'Poly',poly\n\t\ts=m3tc.PolyEval(poly,[raw_a,raw_b,raw_c])\n\t\tm3t.mplot2(range(len(log_adc_torque)),log_load_mNm,s,xlabel='Samples',ylabel='Torque (mNm)',\n\t\t\t y1name='load',y2name='raw')\n\t\t\n\t\t\t\n\tdef calibrate_theta(self):\n\t\tpconfig=self.comp_ec.param.config #disable qei limits\n\t\tself.comp_ec.param.config=0 \n\t\tself.proxy.publish_command(self.comp_rt)\n\t\tself.proxy.publish_param(self.comp_rt)\n\t\tself.proxy.make_operational(self.name_rt)\n\t\tself.step()\n\t\tprint 'Make sure other digit is all the way open'\n\t\tprint 'Moving joint to first limit. Hit any key when ready'\n\t\traw_input()\n\t\tself.comp_rt.set_mode_pwm()\n\t\tprint 'Desired pwm? [',self.param_internal['pwm_theta'][0],']?'\n\t\tp=int(m3t.get_float(self.param_internal['pwm_theta'][0]))\n\t\tself.comp_rt.set_pwm(p)\n\t\tself.step()\n\t\tprint 'Hit any key when motion done'\n\t\traw_input()\n\t\tself.step()\n\t\tq_on_a=self.comp_ec.status.qei_on\n\t\tq_p_a=self.comp_ec.status.qei_period\n\t\tq_r_a=self.comp_ec.status.qei_rollover\n\t\tprint 'RawA',q_on_a\n\t\t\t\n\t\tprint 'Moving joint to second limit. Hit any key when ready'\n\t\traw_input()\n\t\tself.comp_rt.set_mode_pwm()\n\t\tprint 'Desired pwm? [',self.param_internal['pwm_theta'][1],']?'\n\t\tp=int(m3t.get_float(self.param_internal['pwm_theta'][1]))\n\t\tself.comp_rt.set_pwm(p)\n\t\tself.step()\n\t\tprint 'Hit any key when motion done'\n\t\traw_input()\n\t\tself.step()\n\t\tq_on_b=self.comp_ec.status.qei_on\n\t\tq_p_b=self.comp_ec.status.qei_period\n\t\tq_r_b=self.comp_ec.status.qei_rollover\n\t\tprint 'Rawb',q_on_b\n\t\t\n\t\ttheta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)\n\t\ttheta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)\n\t\t\n\t\tprint 'Did this last motion open the gripper [y]?' #At zero position\n\t\tif m3t.get_yes_no('y'):\n\t\t\ttheta_b=0\n\t\t\ttheta_a=abs(theta_bs-theta_as)\n\t\telse:\n\t\t\ttheta_a=0\n\t\t\ttheta_b=abs(theta_bs-theta_as)\n\t\t\t\n\t\tself.comp_rt.set_mode_off()\n\t\tself.comp_ec.param.config=pconfig #enable qei limits\n\t\tself.step()\n\t\tself.proxy.make_safe_operational(self.name_rt)\n\t\tself.step()\n\t\t\t\n\t\tprint 'Raw',[theta_as,theta_bs]\n\t\tprint 'True',[theta_a,theta_b]\n\t\tpoly,inv_poly=self.get_polyfit_to_data([theta_as,theta_bs],[theta_a,theta_b],n=1)\n\t\t\n\t\tself.comp_rt.config['calib']['theta']['cb_scale']=poly[0]\n\t\tself.comp_rt.config['calib']['theta']['cb_bias']=poly[1]\n\t\t\n\t\ttheta_as=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_a,q_p_a,q_r_a)\n\t\ttheta_bs=self.theta.raw_2_deg(self.comp_rt.config['calib']['theta'],q_on_b,q_p_b,q_r_b)\n\t\tprint 'New calibrated range',theta_as,theta_bs\n\t\t\n\t\tmax_q=max(theta_as,theta_bs)\n\t\tmin_q=min(theta_as,theta_bs)\n\t\tif self.comp_j is not None:\n\t\t\tprint 'Setting joint limits to',min_q,max_q\n\t\t\tprint 'Expected joint limits of',self.param_internal['joint_limits']\n\t\t\tself.comp_j.param.max_q=float(max_q) \n\t\t\tself.comp_j.param.min_q=float(min_q)\n\t\telse:\n\t\t\tprint 'Joint component missing. Unable to set joint limits to',min_q,max_q\n\t\t\t\n\t\t#Assume 0-Ndeg, where N is defined by the encoder soft limits\n\t\tself.comp_ec.config['param']['qei_min']=min(q_on_a,q_on_b)+100\n\t\tself.comp_ec.config['param']['qei_max']=max(q_on_a,q_on_b)-100\n\t\tself.comp_ec.param.qei_min=min(q_on_a,q_on_b)+100\n\t\tself.comp_ec.param.qei_max=max(q_on_a,q_on_b)-100\n\t\tprint 'Setting DSP qei min/max to',self.comp_ec.config['param']['qei_min'],self.comp_ec.config['param']['qei_max']\n\t\t\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import torch
import numpy as np
import torch.utils.data as data
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import time
class CNN(nn.Module):
def __init__(self, fragment_length, conv_layers_num, conv_kernel_size,
pool_kernel_size, fc_size, conv_dilation=1, pool_dilation=1,
conv_stride=1, pool_stride=2):
super(CNN, self).__init__()
self.input_channels = 4
self.fragment_length = fragment_length
self.conv_layers_num = conv_layers_num
self.conv_kernel_size = conv_kernel_size
self.pool_kernel_size = pool_kernel_size
self.conv1 = nn.Conv1d(in_channels=self.input_channels,
out_channels=self.conv_layers_num, kernel_size=self.
conv_kernel_size, stride=conv_stride, dilation=conv_dilation)
self.pool = nn.MaxPool1d(kernel_size=self.pool_kernel_size, stride=
pool_stride, dilation=pool_dilation)
size_after_conv = (self.fragment_length + 2 * 0 - conv_dilation * (
self.conv_kernel_size - 1) - 1) / conv_stride + 1
size_after_pool = (size_after_conv + 2 * 0 - pool_dilation * (self.
pool_kernel_size - 1) - 1) / pool_stride + 1
self.dropout = nn.Dropout()
self.input_fc = int(size_after_pool) * self.conv_layers_num
self.output_fc = fc_size
self.fc1 = nn.Linear(self.input_fc, self.output_fc)
self.fc2 = nn.Linear(self.output_fc, 2)
self.softmax = torch.nn.Softmax(dim=1)
def forward(self, x):
conv_result = self.conv1(x)
relu_result = F.relu(conv_result)
pooling_result = self.pool(relu_result)
fc_input = pooling_result.view(-1, self.input_fc)
dropout_result1 = self.dropout(fc_input)
fc_result1 = self.fc1(dropout_result1)
relu_result1 = F.relu(fc_result1)
dropout_result2 = self.dropout(relu_result1)
fc_result2 = self.fc2(dropout_result2)
relu_result2 = F.relu(fc_result2)
result = self.softmax(relu_result2)
return result
|
normal
|
{
"blob_id": "415a6cf1c3f633a863851a4a407d416355398b39",
"index": 7732,
"step-1": "<mask token>\n\n\nclass CNN(nn.Module):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CNN(nn.Module):\n\n def __init__(self, fragment_length, conv_layers_num, conv_kernel_size,\n pool_kernel_size, fc_size, conv_dilation=1, pool_dilation=1,\n conv_stride=1, pool_stride=2):\n super(CNN, self).__init__()\n self.input_channels = 4\n self.fragment_length = fragment_length\n self.conv_layers_num = conv_layers_num\n self.conv_kernel_size = conv_kernel_size\n self.pool_kernel_size = pool_kernel_size\n self.conv1 = nn.Conv1d(in_channels=self.input_channels,\n out_channels=self.conv_layers_num, kernel_size=self.\n conv_kernel_size, stride=conv_stride, dilation=conv_dilation)\n self.pool = nn.MaxPool1d(kernel_size=self.pool_kernel_size, stride=\n pool_stride, dilation=pool_dilation)\n size_after_conv = (self.fragment_length + 2 * 0 - conv_dilation * (\n self.conv_kernel_size - 1) - 1) / conv_stride + 1\n size_after_pool = (size_after_conv + 2 * 0 - pool_dilation * (self.\n pool_kernel_size - 1) - 1) / pool_stride + 1\n self.dropout = nn.Dropout()\n self.input_fc = int(size_after_pool) * self.conv_layers_num\n self.output_fc = fc_size\n self.fc1 = nn.Linear(self.input_fc, self.output_fc)\n self.fc2 = nn.Linear(self.output_fc, 2)\n self.softmax = torch.nn.Softmax(dim=1)\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CNN(nn.Module):\n\n def __init__(self, fragment_length, conv_layers_num, conv_kernel_size,\n pool_kernel_size, fc_size, conv_dilation=1, pool_dilation=1,\n conv_stride=1, pool_stride=2):\n super(CNN, self).__init__()\n self.input_channels = 4\n self.fragment_length = fragment_length\n self.conv_layers_num = conv_layers_num\n self.conv_kernel_size = conv_kernel_size\n self.pool_kernel_size = pool_kernel_size\n self.conv1 = nn.Conv1d(in_channels=self.input_channels,\n out_channels=self.conv_layers_num, kernel_size=self.\n conv_kernel_size, stride=conv_stride, dilation=conv_dilation)\n self.pool = nn.MaxPool1d(kernel_size=self.pool_kernel_size, stride=\n pool_stride, dilation=pool_dilation)\n size_after_conv = (self.fragment_length + 2 * 0 - conv_dilation * (\n self.conv_kernel_size - 1) - 1) / conv_stride + 1\n size_after_pool = (size_after_conv + 2 * 0 - pool_dilation * (self.\n pool_kernel_size - 1) - 1) / pool_stride + 1\n self.dropout = nn.Dropout()\n self.input_fc = int(size_after_pool) * self.conv_layers_num\n self.output_fc = fc_size\n self.fc1 = nn.Linear(self.input_fc, self.output_fc)\n self.fc2 = nn.Linear(self.output_fc, 2)\n self.softmax = torch.nn.Softmax(dim=1)\n\n def forward(self, x):\n conv_result = self.conv1(x)\n relu_result = F.relu(conv_result)\n pooling_result = self.pool(relu_result)\n fc_input = pooling_result.view(-1, self.input_fc)\n dropout_result1 = self.dropout(fc_input)\n fc_result1 = self.fc1(dropout_result1)\n relu_result1 = F.relu(fc_result1)\n dropout_result2 = self.dropout(relu_result1)\n fc_result2 = self.fc2(dropout_result2)\n relu_result2 = F.relu(fc_result2)\n result = self.softmax(relu_result2)\n return result\n",
"step-4": "import torch\nimport numpy as np\nimport torch.utils.data as data\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport time\n\n\nclass CNN(nn.Module):\n\n def __init__(self, fragment_length, conv_layers_num, conv_kernel_size,\n pool_kernel_size, fc_size, conv_dilation=1, pool_dilation=1,\n conv_stride=1, pool_stride=2):\n super(CNN, self).__init__()\n self.input_channels = 4\n self.fragment_length = fragment_length\n self.conv_layers_num = conv_layers_num\n self.conv_kernel_size = conv_kernel_size\n self.pool_kernel_size = pool_kernel_size\n self.conv1 = nn.Conv1d(in_channels=self.input_channels,\n out_channels=self.conv_layers_num, kernel_size=self.\n conv_kernel_size, stride=conv_stride, dilation=conv_dilation)\n self.pool = nn.MaxPool1d(kernel_size=self.pool_kernel_size, stride=\n pool_stride, dilation=pool_dilation)\n size_after_conv = (self.fragment_length + 2 * 0 - conv_dilation * (\n self.conv_kernel_size - 1) - 1) / conv_stride + 1\n size_after_pool = (size_after_conv + 2 * 0 - pool_dilation * (self.\n pool_kernel_size - 1) - 1) / pool_stride + 1\n self.dropout = nn.Dropout()\n self.input_fc = int(size_after_pool) * self.conv_layers_num\n self.output_fc = fc_size\n self.fc1 = nn.Linear(self.input_fc, self.output_fc)\n self.fc2 = nn.Linear(self.output_fc, 2)\n self.softmax = torch.nn.Softmax(dim=1)\n\n def forward(self, x):\n conv_result = self.conv1(x)\n relu_result = F.relu(conv_result)\n pooling_result = self.pool(relu_result)\n fc_input = pooling_result.view(-1, self.input_fc)\n dropout_result1 = self.dropout(fc_input)\n fc_result1 = self.fc1(dropout_result1)\n relu_result1 = F.relu(fc_result1)\n dropout_result2 = self.dropout(relu_result1)\n fc_result2 = self.fc2(dropout_result2)\n relu_result2 = F.relu(fc_result2)\n result = self.softmax(relu_result2)\n return result\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# -*- coding: utf-8 -*-
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class StaffApp(CMSApp):
name = _('Staff')
urls = ['blog.urls', ]
app_name = 'staff'
apphook_pool.register(StaffApp)
|
normal
|
{
"blob_id": "40ee790f4272c05c1619eb7b2cc66a8b57bbe8a8",
"index": 5988,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass StaffApp(CMSApp):\n name = _('Staff')\n urls = ['blog.urls']\n app_name = 'staff'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass StaffApp(CMSApp):\n name = _('Staff')\n urls = ['blog.urls']\n app_name = 'staff'\n\n\napphook_pool.register(StaffApp)\n",
"step-4": "from cms.app_base import CMSApp\nfrom cms.apphook_pool import apphook_pool\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass StaffApp(CMSApp):\n name = _('Staff')\n urls = ['blog.urls']\n app_name = 'staff'\n\n\napphook_pool.register(StaffApp)\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\r\nfrom cms.app_base import CMSApp\r\nfrom cms.apphook_pool import apphook_pool\r\nfrom django.utils.translation import ugettext_lazy as _\r\n\r\n\r\nclass StaffApp(CMSApp):\r\n name = _('Staff')\r\n urls = ['blog.urls', ]\r\n app_name = 'staff'\r\n\r\n\r\napphook_pool.register(StaffApp)\r\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
name = ['zhangsan']
def func(n):
name = n
print(name)
def func1():
nonlocal name
name = 'xiaohong'
print(name)
func1()
print(name)
func('lisi')
|
normal
|
{
"blob_id": "b04aef64dc0485d9112a40e00d178042833a9ddd",
"index": 4294,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef func(n):\n name = n\n print(name)\n\n def func1():\n nonlocal name\n name = 'xiaohong'\n print(name)\n func1()\n print(name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef func(n):\n name = n\n print(name)\n\n def func1():\n nonlocal name\n name = 'xiaohong'\n print(name)\n func1()\n print(name)\n\n\nfunc('lisi')\n",
"step-4": "name = ['zhangsan']\n\n\ndef func(n):\n name = n\n print(name)\n\n def func1():\n nonlocal name\n name = 'xiaohong'\n print(name)\n func1()\n print(name)\n\n\nfunc('lisi')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#! /usr/bin/env python3
# -*- coding:utf-8 -*-
"""
企查查-行政许可[工商局]
"""
import json
import time
import random
import requests
from lxml import etree
from support.use_mysql import QccMysql as db
from support.others import DealKey as dk
from support.others import TimeInfo as tm
from support.headers import GeneralHeaders as gh
class AdmLicense():
def get_com_id(self): # 随机获取一条符合条件的公司信息
sel = """
SELECT `com_id`,`com_name`
FROM `com_info`
WHERE `origin`
IS NOT NULL AND LENGTH(`com_id`) > 5 AND `status_credit_adm_license` IS NULL
ORDER BY RAND() LIMIT 1;
"""
# 测试sql#
# sel = """
# SELECT `com_id`, `com_name`
# FROM `com_info`
# WHERE com_id = '299eee201318f0283f086b4847d69fc7';
# """
# 测试sql#
result = db().selsts(sel)
if result == ():
result = [None, None]
else:
result = result[0]
return result
def upd_status(self, com_id,status_column,count_column, count): # 更新com_info表相关字段状态码
if count == -1:
status = -1
elif count == 0:
status = 0
else:
status = 9
upd = f"""
UPDATE
`com_info`
SET
`{status_column}` = "{status}",`{count_column}` = "{count}"
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
def adm_license_judge(self): # 判断行政许可信息,如果有记录则执行解析,返回该公司相关信息
global com_id, com_name
al = AdmLicense()
count_adm_license = 0
count = 0
while count_adm_license == 0 or count_adm_license == -1:
result = al.get_com_id()
com_id = result[0]
com_name = result[1]
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{adm_license_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{adm_license_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{adm_license_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_adm_license = tree.xpath('//div[@class="company-nav-items"]/span[contains(text(),"行政许可")]/span/text()|//div[@class="company-nav-items"]/a[@data-pos="licenslist"]/span/text()')[0]
count_adm_license = int(count_adm_license)
except:
count_adm_license = -1
localtime = tm().get_localtime() # 当前时间
print(localtime)
if count_adm_license == 0 or count_adm_license == -1:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息条数:无')
else:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息条数:{count_adm_license}')
status_column = 'status_credit_adm_license' #表字段名
count_column = 'count_credit_adm_license' #表字段名
al.upd_status(com_id,status_column,count_column,count_adm_license)
return com_id, com_name, count_adm_license
class AdmLicenseBc(AdmLicense):
def bc_judge(self):
global com_id,com_name
alb = AdmLicenseBc()
count_bc = 0
count = 0
while count_bc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{bc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{bc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{bc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_bc = tree.xpath('//div[@class="tcaption"]/h3[contains(text(),"[工商局]")]/following-sibling::span[1]/text()')[0]
count_bc = int(count_bc)
except:
count_bc = 0
localtime = tm().get_localtime() # 当前时间
print(localtime)
if count_bc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_bc}')
status_column = 'status_credit_adm_license_bc' # 表字段名
count_column = 'count_credit_adm_license_bc' # 表字段名
alb.upd_status(com_id, status_column, count_column, count_bc)
return com_id, com_name, count_bc
def get_page_count(self): # 获取页码长度
alb = AdmLicenseBc()
result = alb.bc_judge()
com_id = result[0]
com_name = result[1]
count_record = result[2]
if count_record % 10 == 0:
count_page = count_record // 10
else:
count_page = count_record // 10 + 1
value = [com_id, com_name, count_page, count_record]
return value
def get_page_info(self): # 解析页面内容
alb = AdmLicenseBc()
value = alb.get_page_count()
com_id = value[0]
com_name = value[1]
count_page = value[2]
count_record = value[3]
key = dk().search_key(com_name)
count = 0
for page in range(1, count_page + 1):
index_url = 'https://www.qcc.com'
page_url = f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(1, 2))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{get_page_info[2]}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{get_page_info[2]}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{get_page_info[2]}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath('//table[@class="ntable ntable-odd"]/tr[position()>2]')
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
license_doc_num = content.xpath('td[2]/text()')[0]
license_doc_name = content.xpath('td[3]/text()')[0]
valid_period_from = content.xpath('td[4]/text()')[0]
valid_period_to = content.xpath('td[5]/text()')[0]
license_office = content.xpath('td[6]/text()')[0]
license_content = content.xpath('td[7]/text()')[0]
except:
license_num = None
license_doc_num = None
license_doc_name = None
valid_period_from = None
valid_period_to = None
license_office = None
license_content = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9, count, page, count_page))
localtime = tm().get_localtime() # 当前时间
create_time = localtime
print(f'当前时间:{create_time}')
print(f'公司ID:{com_id}\n序号:{license_num}\n许可文件编号:{license_doc_num}\n许可文件名称:{license_doc_name}\n有效期自:{valid_period_from}\n'
f'有效期至:{valid_period_to}\n许可机关:{license_office}\n许可内容:{license_content}')
if license_num == None:
ins = """
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
(NULL,NULL,NULL,NULL,NULL,
NULL,NULL,NULL,NULL);
"""
else:
ins = f"""
INSERT INTO
`com_credit_adm_license_bc`
(`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,
`valid_period_to`,`license_office`,`license_content`,`create_time`)
VALUES
("{com_id}","{license_num}","{license_doc_num}","{license_doc_name}","{valid_period_from}",
"{valid_period_to}","{license_office}","{license_content}","{create_time}");
"""
db().inssts(ins)
upd = f"""
UPDATE
`com_info`
SET
`status_credit_adm_license_bc` = 1
WHERE
`com_id` = "{com_id}" ;
"""
db().updsts(upd)
localtime = tm().get_localtime() # 当前时间
print('\n{1}\n{0}数据采集完成!{0}\n{1}'.format('+' * 7, '+' * 25))
print(f'当前时间:{localtime}\n')
time.sleep(3)
class AdmLicenseCc(AdmLicense): #行政许可[信用中国]
def cc_judge(self):
global com_id,com_name
alb = AdmLicenseCc()
count_cc = 0
count = 0
while count_cc == 0:
result = alb.adm_license_judge()
com_id = result[0]
com_name = result[1]
key = dk().search_key(com_name)
if com_id == None:
pass
else:
count += 1
com_url = f'https://www.qcc.com/firm_{com_id}.html'
hds = gh().header()
time.sleep(random.randint(3, 5))
res = requests.get(com_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
try:
count_cc = tree.xpath('//div[@class="tcaption"]/h3[contains(text(),"[信用中国]")]/following-sibling::span[1]/text()')[0]
count_cc = int(count_cc)
except:
count_cc = 0
localtime = tm().get_localtime() # 当前时间
print(localtime)
if count_cc == 0:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:无')
else:
print(f'计数器:{count}\n公司ID:{com_id}\n行政许可信息[工商局]条数:{count_cc}')
status_column = 'status_credit_adm_license_cc' # 表字段名
count_column = 'count_credit_adm_license_cc' # 表字段名
cd.upd_status(com_id, status_column, count_column, count_cc)
return com_id, com_name, count_cc
def get_page_info(self): # 解析页面内容
global project_name,license_status,license_content,expire_time,approval_category,area
alb = AdmLicenseCc()
value = alb.cc_judge()
com_id = value[0]
com_name = value[1]
count_cc = value[2]
key = dk().search_key(com_name)
count = 0
index_url = 'https://www.qcc.com'
page_url = f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'
hds = gh().header()
hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})
time.sleep(random.randint(3, 5))
res = requests.get(page_url, headers=hds).text
if '<script>window.location.href' in res:
print('访问频繁,需验证!{cc_judge}')
input('暂停')
elif '<script>location.href="/user_login"</script>' in res:
print('Cookie失效,需更换!{cc_judge}')
input('程序暂停运行!')
elif '您的账号访问超频,请稍后访问或联系客服人员' in res:
print('账号访问超频,请更换账号!{cc_judge}')
input('程序暂停运行!')
else:
tree = etree.HTML(res)
content_li = tree.xpath('//div[@class="tcaption"]/span[contains(text(),"[信用中国]")]/parent::div/following-sibling::table[@class="ntable ntable-odd"]/tr[position()>2]')
for nbr, content in enumerate(content_li, 1):
count += 1
try:
license_num = content.xpath('td[1]/text()')[0]
dec_book_num = content.xpath('td[2]/text()')[0]
license_office = content.xpath('td[3]/text()')[0]
dec_date = content.xpath('td[4]/text()')[0]
time.sleep(random.randint(1, 2))
dt_id = content.xpath('td[5]/a[@class="xzxukeView"]/@onclick')[0].split('xzxukeView("')[1].split('")')[0]
dt_url = 'https://www.qcc.com/company_xzxukeView'
para = {'id':f'{dt_id}'}
res_info = requests.post(dt_url, headers=hds,data=para).text
status = json.loads(res_info)['status']
if status == 200:
data = json.loads(res_info)['data']
project_name = data['name']
license_status = data['status']
license_content = data['content']
expire_time = data['expire_time']
approval_category = data['type']
area = data['province']
else:
print(f'响应失败!\n状态码:{status}')
input('程序暂停运行!')
except:
license_num = None
dec_book_num = None
license_office = None
dec_date = None
dt_id = None
project_name = None
license_status = None
license_content = None
expire_time = None
approval_category = None
print('\n{0}--总第{1}条----{2}/{3}页--{0}\n'.format('-' * 9, count, page, count_page))
localtime = tm().get_localtime() # 当前时间
create_time = localtime
print(f'当前时间:{create_time}')
print(f'公司ID:{com_id}\n序号:{license_num}\n决定文书号:{dec_book_num}\n许可机关:{license_office}\n详情ID:{dt_id}\n'
f'决定日期:{dec_date}\n项目名称:{project_name}\n许可状态:{license_status}\n许可内容:{license_content}\n截止时间:{expire_time}\n'
f'审批类别:{approval_category}\n地域:{area}\n创建/入库时间:{create_time}')
input('Pause')
if __name__ == '__main__':
cc = AdmLicenseCc()
cc.get_page_info()
|
normal
|
{
"blob_id": "63822d60ef9dcc1e123a3d20874e9f492b439c6d",
"index": 3313,
"step-1": "<mask token>\n\n\nclass AdmLicenseBc(AdmLicense):\n\n def bc_judge(self):\n global com_id, com_name\n alb = AdmLicenseBc()\n count_bc = 0\n count = 0\n while count_bc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{bc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{bc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{bc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_bc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[工商局]\")]/following-sibling::span[1]/text()'\n )[0]\n count_bc = int(count_bc)\n except:\n count_bc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_bc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_bc}'\n )\n status_column = 'status_credit_adm_license_bc'\n count_column = 'count_credit_adm_license_bc'\n alb.upd_status(com_id, status_column, count_column,\n count_bc)\n return com_id, com_name, count_bc\n\n def get_page_count(self):\n alb = AdmLicenseBc()\n result = alb.bc_judge()\n com_id = result[0]\n com_name = result[1]\n count_record = result[2]\n if count_record % 10 == 0:\n count_page = count_record // 10\n else:\n count_page = count_record // 10 + 1\n value = [com_id, com_name, count_page, count_record]\n return value\n\n def get_page_info(self):\n alb = AdmLicenseBc()\n value = alb.get_page_count()\n com_id = value[0]\n com_name = value[1]\n count_page = value[2]\n count_record = value[3]\n key = dk().search_key(com_name)\n count = 0\n for page in range(1, count_page + 1):\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(1, 2))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{get_page_info[2]}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{get_page_info[2]}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{get_page_info[2]}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n license_doc_num = content.xpath('td[2]/text()')[0]\n license_doc_name = content.xpath('td[3]/text()')[0]\n valid_period_from = content.xpath('td[4]/text()')[0]\n valid_period_to = content.xpath('td[5]/text()')[0]\n license_office = content.xpath('td[6]/text()')[0]\n license_content = content.xpath('td[7]/text()')[0]\n except:\n license_num = None\n license_doc_num = None\n license_doc_name = None\n valid_period_from = None\n valid_period_to = None\n license_office = None\n license_content = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n许可文件编号:{license_doc_num}\n许可文件名称:{license_doc_name}\n有效期自:{valid_period_from}\n有效期至:{valid_period_to}\n许可机关:{license_office}\n许可内容:{license_content}\"\"\"\n )\n if license_num == None:\n ins = \"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (NULL,NULL,NULL,NULL,NULL,\n NULL,NULL,NULL,NULL);\n \"\"\"\n else:\n ins = f\"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (\"{com_id}\",\"{license_num}\",\"{license_doc_num}\",\"{license_doc_name}\",\"{valid_period_from}\",\n \"{valid_period_to}\",\"{license_office}\",\"{license_content}\",\"{create_time}\");\n \"\"\"\n db().inssts(ins)\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `status_credit_adm_license_bc` = 1\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n localtime = tm().get_localtime()\n print('\\n{1}\\n{0}数据采集完成!{0}\\n{1}'.format('+' * 7, '+' * 25))\n print(f'当前时间:{localtime}\\n')\n time.sleep(3)\n\n\nclass AdmLicenseCc(AdmLicense):\n\n def cc_judge(self):\n global com_id, com_name\n alb = AdmLicenseCc()\n count_cc = 0\n count = 0\n while count_cc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_cc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[信用中国]\")]/following-sibling::span[1]/text()'\n )[0]\n count_cc = int(count_cc)\n except:\n count_cc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_cc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_cc}'\n )\n status_column = 'status_credit_adm_license_cc'\n count_column = 'count_credit_adm_license_cc'\n cd.upd_status(com_id, status_column, count_column, count_cc\n )\n return com_id, com_name, count_cc\n\n def get_page_info(self):\n global project_name, license_status, license_content, expire_time, approval_category, area\n alb = AdmLicenseCc()\n value = alb.cc_judge()\n com_id = value[0]\n com_name = value[1]\n count_cc = value[2]\n key = dk().search_key(com_name)\n count = 0\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(3, 5))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//div[@class=\"tcaption\"]/span[contains(text(),\"[信用中国]\")]/parent::div/following-sibling::table[@class=\"ntable ntable-odd\"]/tr[position()>2]'\n )\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n dec_book_num = content.xpath('td[2]/text()')[0]\n license_office = content.xpath('td[3]/text()')[0]\n dec_date = content.xpath('td[4]/text()')[0]\n time.sleep(random.randint(1, 2))\n dt_id = content.xpath(\n 'td[5]/a[@class=\"xzxukeView\"]/@onclick')[0].split(\n 'xzxukeView(\"')[1].split('\")')[0]\n dt_url = 'https://www.qcc.com/company_xzxukeView'\n para = {'id': f'{dt_id}'}\n res_info = requests.post(dt_url, headers=hds, data=para\n ).text\n status = json.loads(res_info)['status']\n if status == 200:\n data = json.loads(res_info)['data']\n project_name = data['name']\n license_status = data['status']\n license_content = data['content']\n expire_time = data['expire_time']\n approval_category = data['type']\n area = data['province']\n else:\n print(f'响应失败!\\n状态码:{status}')\n input('程序暂停运行!')\n except:\n license_num = None\n dec_book_num = None\n license_office = None\n dec_date = None\n dt_id = None\n project_name = None\n license_status = None\n license_content = None\n expire_time = None\n approval_category = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n决定文书号:{dec_book_num}\n许可机关:{license_office}\n详情ID:{dt_id}\n决定日期:{dec_date}\n项目名称:{project_name}\n许可状态:{license_status}\n许可内容:{license_content}\n截止时间:{expire_time}\n审批类别:{approval_category}\n地域:{area}\n创建/入库时间:{create_time}\"\"\"\n )\n input('Pause')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass AdmLicense:\n <mask token>\n\n def upd_status(self, com_id, status_column, count_column, count):\n if count == -1:\n status = -1\n elif count == 0:\n status = 0\n else:\n status = 9\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `{status_column}` = \"{status}\",`{count_column}` = \"{count}\"\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n <mask token>\n\n\nclass AdmLicenseBc(AdmLicense):\n\n def bc_judge(self):\n global com_id, com_name\n alb = AdmLicenseBc()\n count_bc = 0\n count = 0\n while count_bc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{bc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{bc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{bc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_bc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[工商局]\")]/following-sibling::span[1]/text()'\n )[0]\n count_bc = int(count_bc)\n except:\n count_bc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_bc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_bc}'\n )\n status_column = 'status_credit_adm_license_bc'\n count_column = 'count_credit_adm_license_bc'\n alb.upd_status(com_id, status_column, count_column,\n count_bc)\n return com_id, com_name, count_bc\n\n def get_page_count(self):\n alb = AdmLicenseBc()\n result = alb.bc_judge()\n com_id = result[0]\n com_name = result[1]\n count_record = result[2]\n if count_record % 10 == 0:\n count_page = count_record // 10\n else:\n count_page = count_record // 10 + 1\n value = [com_id, com_name, count_page, count_record]\n return value\n\n def get_page_info(self):\n alb = AdmLicenseBc()\n value = alb.get_page_count()\n com_id = value[0]\n com_name = value[1]\n count_page = value[2]\n count_record = value[3]\n key = dk().search_key(com_name)\n count = 0\n for page in range(1, count_page + 1):\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(1, 2))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{get_page_info[2]}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{get_page_info[2]}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{get_page_info[2]}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n license_doc_num = content.xpath('td[2]/text()')[0]\n license_doc_name = content.xpath('td[3]/text()')[0]\n valid_period_from = content.xpath('td[4]/text()')[0]\n valid_period_to = content.xpath('td[5]/text()')[0]\n license_office = content.xpath('td[6]/text()')[0]\n license_content = content.xpath('td[7]/text()')[0]\n except:\n license_num = None\n license_doc_num = None\n license_doc_name = None\n valid_period_from = None\n valid_period_to = None\n license_office = None\n license_content = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n许可文件编号:{license_doc_num}\n许可文件名称:{license_doc_name}\n有效期自:{valid_period_from}\n有效期至:{valid_period_to}\n许可机关:{license_office}\n许可内容:{license_content}\"\"\"\n )\n if license_num == None:\n ins = \"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (NULL,NULL,NULL,NULL,NULL,\n NULL,NULL,NULL,NULL);\n \"\"\"\n else:\n ins = f\"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (\"{com_id}\",\"{license_num}\",\"{license_doc_num}\",\"{license_doc_name}\",\"{valid_period_from}\",\n \"{valid_period_to}\",\"{license_office}\",\"{license_content}\",\"{create_time}\");\n \"\"\"\n db().inssts(ins)\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `status_credit_adm_license_bc` = 1\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n localtime = tm().get_localtime()\n print('\\n{1}\\n{0}数据采集完成!{0}\\n{1}'.format('+' * 7, '+' * 25))\n print(f'当前时间:{localtime}\\n')\n time.sleep(3)\n\n\nclass AdmLicenseCc(AdmLicense):\n\n def cc_judge(self):\n global com_id, com_name\n alb = AdmLicenseCc()\n count_cc = 0\n count = 0\n while count_cc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_cc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[信用中国]\")]/following-sibling::span[1]/text()'\n )[0]\n count_cc = int(count_cc)\n except:\n count_cc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_cc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_cc}'\n )\n status_column = 'status_credit_adm_license_cc'\n count_column = 'count_credit_adm_license_cc'\n cd.upd_status(com_id, status_column, count_column, count_cc\n )\n return com_id, com_name, count_cc\n\n def get_page_info(self):\n global project_name, license_status, license_content, expire_time, approval_category, area\n alb = AdmLicenseCc()\n value = alb.cc_judge()\n com_id = value[0]\n com_name = value[1]\n count_cc = value[2]\n key = dk().search_key(com_name)\n count = 0\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(3, 5))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//div[@class=\"tcaption\"]/span[contains(text(),\"[信用中国]\")]/parent::div/following-sibling::table[@class=\"ntable ntable-odd\"]/tr[position()>2]'\n )\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n dec_book_num = content.xpath('td[2]/text()')[0]\n license_office = content.xpath('td[3]/text()')[0]\n dec_date = content.xpath('td[4]/text()')[0]\n time.sleep(random.randint(1, 2))\n dt_id = content.xpath(\n 'td[5]/a[@class=\"xzxukeView\"]/@onclick')[0].split(\n 'xzxukeView(\"')[1].split('\")')[0]\n dt_url = 'https://www.qcc.com/company_xzxukeView'\n para = {'id': f'{dt_id}'}\n res_info = requests.post(dt_url, headers=hds, data=para\n ).text\n status = json.loads(res_info)['status']\n if status == 200:\n data = json.loads(res_info)['data']\n project_name = data['name']\n license_status = data['status']\n license_content = data['content']\n expire_time = data['expire_time']\n approval_category = data['type']\n area = data['province']\n else:\n print(f'响应失败!\\n状态码:{status}')\n input('程序暂停运行!')\n except:\n license_num = None\n dec_book_num = None\n license_office = None\n dec_date = None\n dt_id = None\n project_name = None\n license_status = None\n license_content = None\n expire_time = None\n approval_category = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n决定文书号:{dec_book_num}\n许可机关:{license_office}\n详情ID:{dt_id}\n决定日期:{dec_date}\n项目名称:{project_name}\n许可状态:{license_status}\n许可内容:{license_content}\n截止时间:{expire_time}\n审批类别:{approval_category}\n地域:{area}\n创建/入库时间:{create_time}\"\"\"\n )\n input('Pause')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass AdmLicense:\n\n def get_com_id(self):\n sel = \"\"\"\n SELECT `com_id`,`com_name`\n FROM `com_info`\n WHERE `origin`\n IS NOT NULL AND LENGTH(`com_id`) > 5 AND `status_credit_adm_license` IS NULL\n ORDER BY RAND() LIMIT 1;\n \"\"\"\n result = db().selsts(sel)\n if result == ():\n result = [None, None]\n else:\n result = result[0]\n return result\n\n def upd_status(self, com_id, status_column, count_column, count):\n if count == -1:\n status = -1\n elif count == 0:\n status = 0\n else:\n status = 9\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `{status_column}` = \"{status}\",`{count_column}` = \"{count}\"\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n <mask token>\n\n\nclass AdmLicenseBc(AdmLicense):\n\n def bc_judge(self):\n global com_id, com_name\n alb = AdmLicenseBc()\n count_bc = 0\n count = 0\n while count_bc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{bc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{bc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{bc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_bc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[工商局]\")]/following-sibling::span[1]/text()'\n )[0]\n count_bc = int(count_bc)\n except:\n count_bc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_bc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_bc}'\n )\n status_column = 'status_credit_adm_license_bc'\n count_column = 'count_credit_adm_license_bc'\n alb.upd_status(com_id, status_column, count_column,\n count_bc)\n return com_id, com_name, count_bc\n\n def get_page_count(self):\n alb = AdmLicenseBc()\n result = alb.bc_judge()\n com_id = result[0]\n com_name = result[1]\n count_record = result[2]\n if count_record % 10 == 0:\n count_page = count_record // 10\n else:\n count_page = count_record // 10 + 1\n value = [com_id, com_name, count_page, count_record]\n return value\n\n def get_page_info(self):\n alb = AdmLicenseBc()\n value = alb.get_page_count()\n com_id = value[0]\n com_name = value[1]\n count_page = value[2]\n count_record = value[3]\n key = dk().search_key(com_name)\n count = 0\n for page in range(1, count_page + 1):\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(1, 2))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{get_page_info[2]}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{get_page_info[2]}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{get_page_info[2]}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n license_doc_num = content.xpath('td[2]/text()')[0]\n license_doc_name = content.xpath('td[3]/text()')[0]\n valid_period_from = content.xpath('td[4]/text()')[0]\n valid_period_to = content.xpath('td[5]/text()')[0]\n license_office = content.xpath('td[6]/text()')[0]\n license_content = content.xpath('td[7]/text()')[0]\n except:\n license_num = None\n license_doc_num = None\n license_doc_name = None\n valid_period_from = None\n valid_period_to = None\n license_office = None\n license_content = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n许可文件编号:{license_doc_num}\n许可文件名称:{license_doc_name}\n有效期自:{valid_period_from}\n有效期至:{valid_period_to}\n许可机关:{license_office}\n许可内容:{license_content}\"\"\"\n )\n if license_num == None:\n ins = \"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (NULL,NULL,NULL,NULL,NULL,\n NULL,NULL,NULL,NULL);\n \"\"\"\n else:\n ins = f\"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (\"{com_id}\",\"{license_num}\",\"{license_doc_num}\",\"{license_doc_name}\",\"{valid_period_from}\",\n \"{valid_period_to}\",\"{license_office}\",\"{license_content}\",\"{create_time}\");\n \"\"\"\n db().inssts(ins)\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `status_credit_adm_license_bc` = 1\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n localtime = tm().get_localtime()\n print('\\n{1}\\n{0}数据采集完成!{0}\\n{1}'.format('+' * 7, '+' * 25))\n print(f'当前时间:{localtime}\\n')\n time.sleep(3)\n\n\nclass AdmLicenseCc(AdmLicense):\n\n def cc_judge(self):\n global com_id, com_name\n alb = AdmLicenseCc()\n count_cc = 0\n count = 0\n while count_cc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_cc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[信用中国]\")]/following-sibling::span[1]/text()'\n )[0]\n count_cc = int(count_cc)\n except:\n count_cc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_cc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_cc}'\n )\n status_column = 'status_credit_adm_license_cc'\n count_column = 'count_credit_adm_license_cc'\n cd.upd_status(com_id, status_column, count_column, count_cc\n )\n return com_id, com_name, count_cc\n\n def get_page_info(self):\n global project_name, license_status, license_content, expire_time, approval_category, area\n alb = AdmLicenseCc()\n value = alb.cc_judge()\n com_id = value[0]\n com_name = value[1]\n count_cc = value[2]\n key = dk().search_key(com_name)\n count = 0\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(3, 5))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//div[@class=\"tcaption\"]/span[contains(text(),\"[信用中国]\")]/parent::div/following-sibling::table[@class=\"ntable ntable-odd\"]/tr[position()>2]'\n )\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n dec_book_num = content.xpath('td[2]/text()')[0]\n license_office = content.xpath('td[3]/text()')[0]\n dec_date = content.xpath('td[4]/text()')[0]\n time.sleep(random.randint(1, 2))\n dt_id = content.xpath(\n 'td[5]/a[@class=\"xzxukeView\"]/@onclick')[0].split(\n 'xzxukeView(\"')[1].split('\")')[0]\n dt_url = 'https://www.qcc.com/company_xzxukeView'\n para = {'id': f'{dt_id}'}\n res_info = requests.post(dt_url, headers=hds, data=para\n ).text\n status = json.loads(res_info)['status']\n if status == 200:\n data = json.loads(res_info)['data']\n project_name = data['name']\n license_status = data['status']\n license_content = data['content']\n expire_time = data['expire_time']\n approval_category = data['type']\n area = data['province']\n else:\n print(f'响应失败!\\n状态码:{status}')\n input('程序暂停运行!')\n except:\n license_num = None\n dec_book_num = None\n license_office = None\n dec_date = None\n dt_id = None\n project_name = None\n license_status = None\n license_content = None\n expire_time = None\n approval_category = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n决定文书号:{dec_book_num}\n许可机关:{license_office}\n详情ID:{dt_id}\n决定日期:{dec_date}\n项目名称:{project_name}\n许可状态:{license_status}\n许可内容:{license_content}\n截止时间:{expire_time}\n审批类别:{approval_category}\n地域:{area}\n创建/入库时间:{create_time}\"\"\"\n )\n input('Pause')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass AdmLicense:\n\n def get_com_id(self):\n sel = \"\"\"\n SELECT `com_id`,`com_name`\n FROM `com_info`\n WHERE `origin`\n IS NOT NULL AND LENGTH(`com_id`) > 5 AND `status_credit_adm_license` IS NULL\n ORDER BY RAND() LIMIT 1;\n \"\"\"\n result = db().selsts(sel)\n if result == ():\n result = [None, None]\n else:\n result = result[0]\n return result\n\n def upd_status(self, com_id, status_column, count_column, count):\n if count == -1:\n status = -1\n elif count == 0:\n status = 0\n else:\n status = 9\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `{status_column}` = \"{status}\",`{count_column}` = \"{count}\"\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n\n def adm_license_judge(self):\n global com_id, com_name\n al = AdmLicense()\n count_adm_license = 0\n count = 0\n while count_adm_license == 0 or count_adm_license == -1:\n result = al.get_com_id()\n com_id = result[0]\n com_name = result[1]\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{adm_license_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{adm_license_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{adm_license_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_adm_license = tree.xpath(\n '//div[@class=\"company-nav-items\"]/span[contains(text(),\"行政许可\")]/span/text()|//div[@class=\"company-nav-items\"]/a[@data-pos=\"licenslist\"]/span/text()'\n )[0]\n count_adm_license = int(count_adm_license)\n except:\n count_adm_license = -1\n localtime = tm().get_localtime()\n print(localtime)\n if count_adm_license == 0 or count_adm_license == -1:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息条数:{count_adm_license}'\n )\n status_column = 'status_credit_adm_license'\n count_column = 'count_credit_adm_license'\n al.upd_status(com_id, status_column, count_column,\n count_adm_license)\n return com_id, com_name, count_adm_license\n\n\nclass AdmLicenseBc(AdmLicense):\n\n def bc_judge(self):\n global com_id, com_name\n alb = AdmLicenseBc()\n count_bc = 0\n count = 0\n while count_bc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{bc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{bc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{bc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_bc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[工商局]\")]/following-sibling::span[1]/text()'\n )[0]\n count_bc = int(count_bc)\n except:\n count_bc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_bc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_bc}'\n )\n status_column = 'status_credit_adm_license_bc'\n count_column = 'count_credit_adm_license_bc'\n alb.upd_status(com_id, status_column, count_column,\n count_bc)\n return com_id, com_name, count_bc\n\n def get_page_count(self):\n alb = AdmLicenseBc()\n result = alb.bc_judge()\n com_id = result[0]\n com_name = result[1]\n count_record = result[2]\n if count_record % 10 == 0:\n count_page = count_record // 10\n else:\n count_page = count_record // 10 + 1\n value = [com_id, com_name, count_page, count_record]\n return value\n\n def get_page_info(self):\n alb = AdmLicenseBc()\n value = alb.get_page_count()\n com_id = value[0]\n com_name = value[1]\n count_page = value[2]\n count_record = value[3]\n key = dk().search_key(com_name)\n count = 0\n for page in range(1, count_page + 1):\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(1, 2))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{get_page_info[2]}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{get_page_info[2]}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{get_page_info[2]}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n license_doc_num = content.xpath('td[2]/text()')[0]\n license_doc_name = content.xpath('td[3]/text()')[0]\n valid_period_from = content.xpath('td[4]/text()')[0]\n valid_period_to = content.xpath('td[5]/text()')[0]\n license_office = content.xpath('td[6]/text()')[0]\n license_content = content.xpath('td[7]/text()')[0]\n except:\n license_num = None\n license_doc_num = None\n license_doc_name = None\n valid_period_from = None\n valid_period_to = None\n license_office = None\n license_content = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n许可文件编号:{license_doc_num}\n许可文件名称:{license_doc_name}\n有效期自:{valid_period_from}\n有效期至:{valid_period_to}\n许可机关:{license_office}\n许可内容:{license_content}\"\"\"\n )\n if license_num == None:\n ins = \"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (NULL,NULL,NULL,NULL,NULL,\n NULL,NULL,NULL,NULL);\n \"\"\"\n else:\n ins = f\"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (\"{com_id}\",\"{license_num}\",\"{license_doc_num}\",\"{license_doc_name}\",\"{valid_period_from}\",\n \"{valid_period_to}\",\"{license_office}\",\"{license_content}\",\"{create_time}\");\n \"\"\"\n db().inssts(ins)\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `status_credit_adm_license_bc` = 1\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n localtime = tm().get_localtime()\n print('\\n{1}\\n{0}数据采集完成!{0}\\n{1}'.format('+' * 7, '+' * 25))\n print(f'当前时间:{localtime}\\n')\n time.sleep(3)\n\n\nclass AdmLicenseCc(AdmLicense):\n\n def cc_judge(self):\n global com_id, com_name\n alb = AdmLicenseCc()\n count_cc = 0\n count = 0\n while count_cc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_cc = tree.xpath(\n '//div[@class=\"tcaption\"]/h3[contains(text(),\"[信用中国]\")]/following-sibling::span[1]/text()'\n )[0]\n count_cc = int(count_cc)\n except:\n count_cc = 0\n localtime = tm().get_localtime()\n print(localtime)\n if count_cc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(\n f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_cc}'\n )\n status_column = 'status_credit_adm_license_cc'\n count_column = 'count_credit_adm_license_cc'\n cd.upd_status(com_id, status_column, count_column, count_cc\n )\n return com_id, com_name, count_cc\n\n def get_page_info(self):\n global project_name, license_status, license_content, expire_time, approval_category, area\n alb = AdmLicenseCc()\n value = alb.cc_judge()\n com_id = value[0]\n com_name = value[1]\n count_cc = value[2]\n key = dk().search_key(com_name)\n count = 0\n index_url = 'https://www.qcc.com'\n page_url = (\n f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'\n )\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(3, 5))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath(\n '//div[@class=\"tcaption\"]/span[contains(text(),\"[信用中国]\")]/parent::div/following-sibling::table[@class=\"ntable ntable-odd\"]/tr[position()>2]'\n )\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n dec_book_num = content.xpath('td[2]/text()')[0]\n license_office = content.xpath('td[3]/text()')[0]\n dec_date = content.xpath('td[4]/text()')[0]\n time.sleep(random.randint(1, 2))\n dt_id = content.xpath(\n 'td[5]/a[@class=\"xzxukeView\"]/@onclick')[0].split(\n 'xzxukeView(\"')[1].split('\")')[0]\n dt_url = 'https://www.qcc.com/company_xzxukeView'\n para = {'id': f'{dt_id}'}\n res_info = requests.post(dt_url, headers=hds, data=para\n ).text\n status = json.loads(res_info)['status']\n if status == 200:\n data = json.loads(res_info)['data']\n project_name = data['name']\n license_status = data['status']\n license_content = data['content']\n expire_time = data['expire_time']\n approval_category = data['type']\n area = data['province']\n else:\n print(f'响应失败!\\n状态码:{status}')\n input('程序暂停运行!')\n except:\n license_num = None\n dec_book_num = None\n license_office = None\n dec_date = None\n dt_id = None\n project_name = None\n license_status = None\n license_content = None\n expire_time = None\n approval_category = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9,\n count, page, count_page))\n localtime = tm().get_localtime()\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(\n f\"\"\"公司ID:{com_id}\n序号:{license_num}\n决定文书号:{dec_book_num}\n许可机关:{license_office}\n详情ID:{dt_id}\n决定日期:{dec_date}\n项目名称:{project_name}\n许可状态:{license_status}\n许可内容:{license_content}\n截止时间:{expire_time}\n审批类别:{approval_category}\n地域:{area}\n创建/入库时间:{create_time}\"\"\"\n )\n input('Pause')\n\n\n<mask token>\n",
"step-5": "#! /usr/bin/env python3\n# -*- coding:utf-8 -*-\n\"\"\"\n企查查-行政许可[工商局]\n\"\"\"\nimport json\nimport time\nimport random\nimport requests\n\nfrom lxml import etree\n\nfrom support.use_mysql import QccMysql as db\nfrom support.others import DealKey as dk\nfrom support.others import TimeInfo as tm\nfrom support.headers import GeneralHeaders as gh\n\nclass AdmLicense():\n def get_com_id(self): # 随机获取一条符合条件的公司信息\n sel = \"\"\"\n SELECT `com_id`,`com_name`\n FROM `com_info`\n WHERE `origin`\n IS NOT NULL AND LENGTH(`com_id`) > 5 AND `status_credit_adm_license` IS NULL\n ORDER BY RAND() LIMIT 1;\n \"\"\"\n\n # 测试sql#\n # sel = \"\"\"\n # SELECT `com_id`, `com_name`\n # FROM `com_info`\n # WHERE com_id = '299eee201318f0283f086b4847d69fc7';\n # \"\"\"\n # 测试sql#\n\n result = db().selsts(sel)\n if result == ():\n result = [None, None]\n else:\n result = result[0]\n return result\n\n def upd_status(self, com_id,status_column,count_column, count): # 更新com_info表相关字段状态码\n if count == -1:\n status = -1\n elif count == 0:\n status = 0\n else:\n status = 9\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `{status_column}` = \"{status}\",`{count_column}` = \"{count}\"\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n\n def adm_license_judge(self): # 判断行政许可信息,如果有记录则执行解析,返回该公司相关信息\n global com_id, com_name\n al = AdmLicense()\n count_adm_license = 0\n count = 0\n while count_adm_license == 0 or count_adm_license == -1:\n result = al.get_com_id()\n com_id = result[0]\n com_name = result[1]\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{adm_license_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{adm_license_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{adm_license_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_adm_license = tree.xpath('//div[@class=\"company-nav-items\"]/span[contains(text(),\"行政许可\")]/span/text()|//div[@class=\"company-nav-items\"]/a[@data-pos=\"licenslist\"]/span/text()')[0]\n count_adm_license = int(count_adm_license)\n except:\n count_adm_license = -1\n localtime = tm().get_localtime() # 当前时间\n print(localtime)\n if count_adm_license == 0 or count_adm_license == -1:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息条数:无')\n else:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息条数:{count_adm_license}')\n status_column = 'status_credit_adm_license' #表字段名\n count_column = 'count_credit_adm_license' #表字段名\n al.upd_status(com_id,status_column,count_column,count_adm_license)\n return com_id, com_name, count_adm_license\n\nclass AdmLicenseBc(AdmLicense):\n def bc_judge(self):\n global com_id,com_name\n alb = AdmLicenseBc()\n count_bc = 0\n count = 0\n while count_bc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{bc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{bc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{bc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_bc = tree.xpath('//div[@class=\"tcaption\"]/h3[contains(text(),\"[工商局]\")]/following-sibling::span[1]/text()')[0]\n count_bc = int(count_bc)\n except:\n count_bc = 0\n localtime = tm().get_localtime() # 当前时间\n print(localtime)\n if count_bc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_bc}')\n status_column = 'status_credit_adm_license_bc' # 表字段名\n count_column = 'count_credit_adm_license_bc' # 表字段名\n alb.upd_status(com_id, status_column, count_column, count_bc)\n return com_id, com_name, count_bc\n\n def get_page_count(self): # 获取页码长度\n alb = AdmLicenseBc()\n result = alb.bc_judge()\n com_id = result[0]\n com_name = result[1]\n count_record = result[2]\n if count_record % 10 == 0:\n count_page = count_record // 10\n else:\n count_page = count_record // 10 + 1\n value = [com_id, com_name, count_page, count_record]\n return value\n\n def get_page_info(self): # 解析页面内容\n alb = AdmLicenseBc()\n value = alb.get_page_count()\n com_id = value[0]\n com_name = value[1]\n count_page = value[2]\n count_record = value[3]\n key = dk().search_key(com_name)\n count = 0\n for page in range(1, count_page + 1):\n index_url = 'https://www.qcc.com'\n page_url = f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run&box=licens'\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(1, 2))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{get_page_info[2]}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{get_page_info[2]}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{get_page_info[2]}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath('//table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n license_doc_num = content.xpath('td[2]/text()')[0]\n license_doc_name = content.xpath('td[3]/text()')[0]\n valid_period_from = content.xpath('td[4]/text()')[0]\n valid_period_to = content.xpath('td[5]/text()')[0]\n license_office = content.xpath('td[6]/text()')[0]\n license_content = content.xpath('td[7]/text()')[0]\n except:\n license_num = None\n license_doc_num = None\n license_doc_name = None\n valid_period_from = None\n valid_period_to = None\n license_office = None\n license_content = None\n\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9, count, page, count_page))\n localtime = tm().get_localtime() # 当前时间\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(f'公司ID:{com_id}\\n序号:{license_num}\\n许可文件编号:{license_doc_num}\\n许可文件名称:{license_doc_name}\\n有效期自:{valid_period_from}\\n'\n f'有效期至:{valid_period_to}\\n许可机关:{license_office}\\n许可内容:{license_content}')\n if license_num == None:\n ins = \"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (NULL,NULL,NULL,NULL,NULL,\n NULL,NULL,NULL,NULL);\n \"\"\"\n else:\n ins = f\"\"\"\n INSERT INTO\n `com_credit_adm_license_bc`\n (`com_id`,`license_num`,`license_doc_num`,`license_doc_name`,`valid_period_from`,\n `valid_period_to`,`license_office`,`license_content`,`create_time`)\n VALUES\n (\"{com_id}\",\"{license_num}\",\"{license_doc_num}\",\"{license_doc_name}\",\"{valid_period_from}\",\n \"{valid_period_to}\",\"{license_office}\",\"{license_content}\",\"{create_time}\");\n \"\"\"\n db().inssts(ins)\n\n upd = f\"\"\"\n UPDATE \n `com_info` \n SET\n `status_credit_adm_license_bc` = 1\n WHERE \n `com_id` = \"{com_id}\" ;\n \"\"\"\n db().updsts(upd)\n\n localtime = tm().get_localtime() # 当前时间\n print('\\n{1}\\n{0}数据采集完成!{0}\\n{1}'.format('+' * 7, '+' * 25))\n print(f'当前时间:{localtime}\\n')\n time.sleep(3)\n\nclass AdmLicenseCc(AdmLicense): #行政许可[信用中国]\n def cc_judge(self):\n global com_id,com_name\n alb = AdmLicenseCc()\n count_cc = 0\n count = 0\n while count_cc == 0:\n result = alb.adm_license_judge()\n com_id = result[0]\n com_name = result[1]\n key = dk().search_key(com_name)\n if com_id == None:\n pass\n else:\n count += 1\n com_url = f'https://www.qcc.com/firm_{com_id}.html'\n hds = gh().header()\n time.sleep(random.randint(3, 5))\n res = requests.get(com_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n try:\n count_cc = tree.xpath('//div[@class=\"tcaption\"]/h3[contains(text(),\"[信用中国]\")]/following-sibling::span[1]/text()')[0]\n count_cc = int(count_cc)\n except:\n count_cc = 0\n localtime = tm().get_localtime() # 当前时间\n print(localtime)\n if count_cc == 0:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:无')\n else:\n print(f'计数器:{count}\\n公司ID:{com_id}\\n行政许可信息[工商局]条数:{count_cc}')\n status_column = 'status_credit_adm_license_cc' # 表字段名\n count_column = 'count_credit_adm_license_cc' # 表字段名\n cd.upd_status(com_id, status_column, count_column, count_cc)\n return com_id, com_name, count_cc\n\n def get_page_info(self): # 解析页面内容\n global project_name,license_status,license_content,expire_time,approval_category,area\n alb = AdmLicenseCc()\n value = alb.cc_judge()\n com_id = value[0]\n com_name = value[1]\n count_cc = value[2]\n key = dk().search_key(com_name)\n count = 0\n index_url = 'https://www.qcc.com'\n page_url = f'{index_url}/company_getinfos?unique={com_id}&companyname={key}&p={page}&tab=run'\n hds = gh().header()\n hds.update({'Referer': f'{index_url}/firm_{com_id}.html'})\n time.sleep(random.randint(3, 5))\n res = requests.get(page_url, headers=hds).text\n if '<script>window.location.href' in res:\n print('访问频繁,需验证!{cc_judge}')\n input('暂停')\n elif '<script>location.href=\"/user_login\"</script>' in res:\n print('Cookie失效,需更换!{cc_judge}')\n input('程序暂停运行!')\n elif '您的账号访问超频,请稍后访问或联系客服人员' in res:\n print('账号访问超频,请更换账号!{cc_judge}')\n input('程序暂停运行!')\n else:\n tree = etree.HTML(res)\n content_li = tree.xpath('//div[@class=\"tcaption\"]/span[contains(text(),\"[信用中国]\")]/parent::div/following-sibling::table[@class=\"ntable ntable-odd\"]/tr[position()>2]')\n for nbr, content in enumerate(content_li, 1):\n count += 1\n try:\n license_num = content.xpath('td[1]/text()')[0]\n dec_book_num = content.xpath('td[2]/text()')[0]\n license_office = content.xpath('td[3]/text()')[0]\n dec_date = content.xpath('td[4]/text()')[0]\n time.sleep(random.randint(1, 2))\n dt_id = content.xpath('td[5]/a[@class=\"xzxukeView\"]/@onclick')[0].split('xzxukeView(\"')[1].split('\")')[0]\n dt_url = 'https://www.qcc.com/company_xzxukeView'\n para = {'id':f'{dt_id}'}\n res_info = requests.post(dt_url, headers=hds,data=para).text\n status = json.loads(res_info)['status']\n if status == 200:\n data = json.loads(res_info)['data']\n project_name = data['name']\n license_status = data['status']\n license_content = data['content']\n expire_time = data['expire_time']\n approval_category = data['type']\n area = data['province']\n else:\n print(f'响应失败!\\n状态码:{status}')\n input('程序暂停运行!')\n except:\n license_num = None\n dec_book_num = None\n license_office = None\n dec_date = None\n dt_id = None\n project_name = None\n license_status = None\n license_content = None\n expire_time = None\n approval_category = None\n print('\\n{0}--总第{1}条----{2}/{3}页--{0}\\n'.format('-' * 9, count, page, count_page))\n localtime = tm().get_localtime() # 当前时间\n create_time = localtime\n print(f'当前时间:{create_time}')\n print(f'公司ID:{com_id}\\n序号:{license_num}\\n决定文书号:{dec_book_num}\\n许可机关:{license_office}\\n详情ID:{dt_id}\\n'\n f'决定日期:{dec_date}\\n项目名称:{project_name}\\n许可状态:{license_status}\\n许可内容:{license_content}\\n截止时间:{expire_time}\\n'\n f'审批类别:{approval_category}\\n地域:{area}\\n创建/入库时间:{create_time}')\n input('Pause')\n\n\n\n\n\nif __name__ == '__main__':\n cc = AdmLicenseCc()\n cc.get_page_info()",
"step-ids": [
7,
9,
10,
11,
14
]
}
|
[
7,
9,
10,
11,
14
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
# Add gumpy path
sys.path.append('../shared')
from gumpy import signal
import numpy as np
def preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=2, bp_high=60, notch=False,
hp_filter=False, bp_filter=False, artifact_removal=False, normalize=False):
if notch:
data = notch_filter(data, ac_freq, sample_rate)
if hp_filter:
data = highpass_filter(data, hp_freq)
if bp_filter:
data = bandpass_filter(data, bp_low, bp_high, sample_rate)
if normalize:
data = normalize_data(data, 'mean_std')
if artifact_removal:
data = remove_artifacts(data)
return data
def notch_filter(data, ac_freq, sample_rate):
w0 = ac_freq / (sample_rate / 2)
return signal.notch(data, w0)
def highpass_filter(data, hp_freq):
return signal.butter_highpass(data, hp_freq)
def bandpass_filter(data, bp_low, bp_high, sample_rate):
return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=sample_rate)
def normalize_data(data, strategy):
return signal.normalize(data, strategy)
def remove_artifacts(data):
cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]
return np.squeeze(cleaned)
|
normal
|
{
"blob_id": "5f1cbe1019f218d2aad616ea8bbe760ea760534c",
"index": 9359,
"step-1": "<mask token>\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\n<mask token>\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\n<mask token>\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-2": "<mask token>\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-3": "<mask token>\nsys.path.append('../shared')\n<mask token>\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-4": "import sys\nsys.path.append('../shared')\nfrom gumpy import signal\nimport numpy as np\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=\n 2, bp_high=60, notch=False, hp_filter=False, bp_filter=False,\n artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=\n sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-5": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\n\n# Add gumpy path\nsys.path.append('../shared')\nfrom gumpy import signal\nimport numpy as np\n\n\ndef preprocess_data(data, sample_rate=160, ac_freq=60, hp_freq=0.5, bp_low=2, bp_high=60, notch=False,\n hp_filter=False, bp_filter=False, artifact_removal=False, normalize=False):\n if notch:\n data = notch_filter(data, ac_freq, sample_rate)\n if hp_filter:\n data = highpass_filter(data, hp_freq)\n if bp_filter:\n data = bandpass_filter(data, bp_low, bp_high, sample_rate)\n if normalize:\n data = normalize_data(data, 'mean_std')\n if artifact_removal:\n data = remove_artifacts(data)\n\n return data\n\n\ndef notch_filter(data, ac_freq, sample_rate):\n w0 = ac_freq / (sample_rate / 2)\n return signal.notch(data, w0)\n\n\ndef highpass_filter(data, hp_freq):\n return signal.butter_highpass(data, hp_freq)\n\n\ndef bandpass_filter(data, bp_low, bp_high, sample_rate):\n return signal.butter_bandpass(data, bp_low, bp_high, order=5, fs=sample_rate)\n\n\ndef normalize_data(data, strategy):\n return signal.normalize(data, strategy)\n\n\ndef remove_artifacts(data):\n cleaned = signal.artifact_removal(data.reshape((-1, 1)))[0]\n return np.squeeze(cleaned)\n",
"step-ids": [
4,
6,
7,
8,
9
]
}
|
[
4,
6,
7,
8,
9
] |
import sys
import pytest
from presidio_evaluator.evaluation import Evaluator
from tests.conftest import assert_model_results_gt
from presidio_evaluator.models.flair_model import FlairModel
@pytest.mark.slow
@pytest.mark.skipif("flair" not in sys.modules, reason="requires the Flair library")
def test_flair_simple(small_dataset):
flair_model = FlairModel(model_path="ner", entities_to_keep=["PERSON"])
evaluator = Evaluator(model=flair_model)
evaluation_results = evaluator.evaluate_all(small_dataset)
scores = evaluator.calculate_score(evaluation_results)
assert_model_results_gt(scores, "PERSON", 0)
|
normal
|
{
"blob_id": "813d27e8f9c1a416dab2f891dd71e4791bb92dbb",
"index": 1040,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]\[email protected]('flair' not in sys.modules, reason=\n 'requires the Flair library')\ndef test_flair_simple(small_dataset):\n flair_model = FlairModel(model_path='ner', entities_to_keep=['PERSON'])\n evaluator = Evaluator(model=flair_model)\n evaluation_results = evaluator.evaluate_all(small_dataset)\n scores = evaluator.calculate_score(evaluation_results)\n assert_model_results_gt(scores, 'PERSON', 0)\n",
"step-3": "import sys\nimport pytest\nfrom presidio_evaluator.evaluation import Evaluator\nfrom tests.conftest import assert_model_results_gt\nfrom presidio_evaluator.models.flair_model import FlairModel\n\n\[email protected]\[email protected]('flair' not in sys.modules, reason=\n 'requires the Flair library')\ndef test_flair_simple(small_dataset):\n flair_model = FlairModel(model_path='ner', entities_to_keep=['PERSON'])\n evaluator = Evaluator(model=flair_model)\n evaluation_results = evaluator.evaluate_all(small_dataset)\n scores = evaluator.calculate_score(evaluation_results)\n assert_model_results_gt(scores, 'PERSON', 0)\n",
"step-4": "import sys\n\nimport pytest\n\nfrom presidio_evaluator.evaluation import Evaluator\nfrom tests.conftest import assert_model_results_gt\nfrom presidio_evaluator.models.flair_model import FlairModel\n\n\[email protected]\[email protected](\"flair\" not in sys.modules, reason=\"requires the Flair library\")\ndef test_flair_simple(small_dataset):\n\n flair_model = FlairModel(model_path=\"ner\", entities_to_keep=[\"PERSON\"])\n evaluator = Evaluator(model=flair_model)\n evaluation_results = evaluator.evaluate_all(small_dataset)\n scores = evaluator.calculate_score(evaluation_results)\n\n assert_model_results_gt(scores, \"PERSON\", 0)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import os
import yaml
import sys
import random
import shutil
import openpyxl
import yaml
import audioanalysis as aa
import numpy as np
import argparse
import logging
"""
manualtest.py
Script to create a listeneing test. The output, test
case directory and answer_key.yml file, can be
found in the root directory.
manual test creation
responsibilities:
1) directory of directories that each contain two files to compare(a,b) and a duplicated one (x)
example scenarios to test:
JITTER_BUFFER_INIT_X VS. JITTER_BUFFER_INIT_Y
dev version vs dev version
need to come up with more
2) an output yaml file labeled answer_key.yml that says which (a,b) is x
"""
# command line parse
help_string = ("\nPlease note that manual_test.py makes 3 assumptions about "
"these file paths. "
"\n1.Both scenarios contain the same amount of wav files."
"\n2.The wav files in both scenarios have a one to one "
"correspondence between each other. Each test case contains a "
"pair of files, one from each scenario. This pair is made by "
"matching files between scenarios with the same names 3."
"There are no more than 25 audio file pairs")
parser = argparse.ArgumentParser(description="Script to create a listening test. The output, test case directory and answer_key.yml file, can be found in the root directory."+help_string)
parser.add_argument("-o", dest="output_base_path", default= os.getcwd(),help="(optional)Absolute file path to locatin to save test directory and answer key (default: root directory)")
parser.add_argument("scenario_one", help="Absolute file path to location of first scenario. Required")
parser.add_argument("scenario_two", help="Absolute file path to location of second scenario. Required")
args=parser.parse_args()
# globals
output_base_path=args.output_base_path
root_directory = os.getcwd()
# first scenario
scenario_one = args.scenario_one
scenario_one_latency=0
scenario_one_correlation_coefficient=0
# second scenario
scenario_two = args.scenario_two
scenario_two_latency=0
scenario_two_correlation_coefficient=0
output_path=""
answer_key=[]
USER_ANSWER_KEY="user_answer"
USER_PREFERENCE_KEY="user_preference_weight"
USER_X_VALUE_KEY="user_X_value"
USER_CONFIDENCE_KEY="user_answer_confidence"
X_ANSWER_KEY="x_answer_alpha"
A_VALUE_KEY="A_value"
B_VALUE_KEY="B_value"
TESTCASES_SUBDIR="testcases"
A_CASE_NAME="A_"
B_CASE_NAME="B_"
X_CASE_NAME="X_"
WNDWS_COPY_CMD="copy"
AUDIO_TYPE=".wav"
SCNEARIO_ONE_DATA_FILE="output_data.yml"
SCENARIO_ONE_DATA_FILE_KEY="Scenario One"
SCENARIO_TWO_DATA_FILE="output_data.yml"
SCENARIO_TWO_DATA_FILE_KEY="Scenario Two"
ANSWER_KEY_NAME="answer_key.yml"
USER_ANSWER_CASE_A="A"
USER_ANSWER_CASE_B="B"
ANSWER_KEY_SCENARIO_ONE="scenario one"
ANSWER_KEY_SCENARIO_TWO="scenario two"
ANSWER_KEY_QUESTION_KEY="Q_"
MAX_CASE_NUM=24
ADJUSTED_AUDIO_SUBDIR="adjusted_audio"
SCENARIO_ONE_SUBDIR="scenario_one"
SCENARIO_TWO_SUBDIR="scenario_two"
class Answer():
"""
Wrapper for A_B_X directory containing all associated attributes.
Populate all fields of the class and call grade to determine if the
question was correct
**user_answers
user_answer either "A" or "B" indicating which file sounded better
user_preference_weight numeric value between 1-5 indicating how much better the
preferred value was. 5 being significant and 1 minimal
user_X_value either "A" or "B" denoting which file the user believes
X was a duplicate of
user_answer_confidence numeric value between 1-5 indicating how easy it was to
distinguish between A and B and pick X
x_answer_alpha the answer to which file X was a duplicate of. Either
"A" or "B"
A_value String field denoting which scenario A belonged to. Either
scenario_one or SCENARIO_TWO_SUBDIR
B_value String field denoting which scenario B belonged to. Either
scenario_one or SCENARIO_TWO_SUBDIR
correct Call self.grade to populate this field. Compares user_X_value
and x_answer_alpha to determine if question was correct.
Populates with boolean
"""
def __init__(self, question_num, **user_answers):
self.question_num=question_num
self.correct = None
try:
self.user_answer=user_answers[USER_ANSWER_KEY]
except KeyError:
self.user_answer=None
try:
self.user_preference_weight=user_answers[USER_PREFERENCE_KEY]
except KeyError:
self.user_preference_weight=None
try:
self.user_X_value=user_answers[USER_X_VALUE_KEY]
except KeyError:
self.user_X_value=None
try:
self.user_answer_confidence=user_answers[USER_CONFIDENCE_KEY]
except KeyError:
self.user_answer_confidence=None
try:
self.x_answer_alpha=user_answers[X_ANSWER_KEY]
except KeyError:
self.x_answer_alpha=None
try:
self.A_value=user_answers[A_VALUE_KEY]
except KeyError:
self.A_value=None
try:
self.B_value=user_answers[B_VALUE_KEY]
except KeyError:
self.B_value=None
def grade(self):
if self.x_answer_alpha==self.user_X_value:
self.correct=True
else:
self.correct=False
def _collect_locations():
# Method to pair all the files for comparison in the two scenarios the user has elected to compare
logging.info("Enter: _collect_locations")
global scenario_one
global scenario_two
global output_base_path
if not os.path.exists(scenario_one):
print("Scenario One file path does not exist. Exiting")
sys.exit()
if not os.path.exists(scenario_two):
print("Scenario Two file path does not exist. Exiting")
sys.exit()
print("Creating listening test...")
logging.info("Exit: _collect_locations")
return scenario_one, scenario_two, output_base_path
def _cleanup_scenarios(adjusted_file_path):
# Delete the adjusted audio created for this module
try:
shutil.rmtree(adjusted_file_path)
except:
print("The system could not delete the temporary audio files that "
"were created for this test. This directory can be removed "
"at {}".format(adjusted_file_path))
def _create_output_directory(output_base_path):
# From the base path create a testcases subdirectory
# Return the subdirectory full path
logging.info("Enter: _create_output_directory")
global output_path
output_path = os.path.join(output_base_path, TESTCASES_SUBDIR)
if os.path.exists(output_path):
try:
input("Please note there is already a Testcases directory at - {} .\nPress enter to continue and remove it. Press CNTRL-C to exit.".format(output_path))
shutil.rmtree(output_path)
except PermissionError:
print("There is a test directory located in the same location as the test directory location you specified")
print("It cannot be removed becase another process is still using it. Please close the process or delete yourself.")
sys.exit()
except KeyboardInterrupt:
print("Exiting...")
sys.exit()
os.mkdir(output_path)
logging.info("Exit: _create_output_directory")
return output_path
def _create_answer_key(output_path):
# Parse the data file from scenario one and two if it exists and add too answer key
# Dump data from processes to ANSWER_KEY_NAME in output_path
logging.info("Enter: _create_answer_key")
global answer_key
global scenario_one
global scenario_two
scenario_one_latency_data={}
if os.path.exists(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)):
with open(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)) as output_data:
scenario_one_latency_data[SCENARIO_ONE_DATA_FILE_KEY]=yaml.load(output_data)
scenario_two_latency_data={}
if os.path.exists(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)):
with open(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)) as output_data:
scenario_two_latency_data[SCENARIO_TWO_DATA_FILE_KEY]=yaml.load(output_data)
with open(os.path.join(output_path, ANSWER_KEY_NAME), "w") as answer_key_yml:
yaml.dump(scenario_one_latency_data, answer_key_yml, default_flow_style=False)
yaml.dump(scenario_two_latency_data, answer_key_yml, default_flow_style=False)
for question in answer_key:
yaml_dict={}
Key = str(ANSWER_KEY_QUESTION_KEY+str(question.question_num))
yaml_dict[Key] = {X_ANSWER_KEY: question.x_answer_alpha,A_VALUE_KEY: question.A_value,B_VALUE_KEY: question.B_value}
yaml.dump(yaml_dict, answer_key_yml, default_flow_style=False)
logging.info("Exit: _create_answer_key")
def _create_temp_dir(root_directory, scenario_one, scenario_two):
logging.info("Enter: _create_temp_dir")
# Will create exact copies of both directories specified so files may be altered later
adjusted_file_path = os.path.join(root_directory, ADJUSTED_AUDIO_SUBDIR)
scenario_one_temp = os.path.join(adjusted_file_path, SCENARIO_ONE_SUBDIR)
scenario_two_temp = os.path.join(adjusted_file_path, SCENARIO_TWO_SUBDIR)
try:
os.mkdir(adjusted_file_path)
except FileExistsError:
print("To properly create ABX tests, the audio files are modified so audio begins play at the same time")
print("In order to do this, a new directory called 'adjusted_audio' is temproarily created to hold the adjusted audio.")
input("This directory already exists. Press enter to remove and continue or CTRL-C to quit")
shutil.rmtree(adjusted_file_path)
os.mkdir(adjusted_file_path)
shutil.copytree(scenario_one, scenario_one_temp)
shutil.copytree(scenario_two, scenario_two_temp)
logging.info("Exit: _create_temp_dir")
return adjusted_file_path, scenario_one_temp, scenario_one_temp
def create_A_B_X_cases(A_B_cases_zip_list, output_path):
"""
Method to create A_B_X testing directories and return the corresponding answer key
An A file is chosen from either the scenario one or two with a 50/50 probability.
The B file is then from the scenario not chosen for A. An X file is then created with a 50/50
probability of being either a duplicate of A or B
Parameters:
A_B_cases_zip_list: A list containing absolute file pairs
[[scenario_one, scenario_two]...]
output_path: absolute file path to store testcase directory
Returns:
None
"""
logging.info("Enter: create_A_B_X_cases ")
global scenario_one
global scenario_two
global answer_key
# create listening directories and record answer to each in answer_log
for case_num, case in enumerate(A_B_cases_zip_list):
#MRR I really don't like silently dropping audio pairs. Please just create multiple ABX tests, each with up to 25. Up to you whether you have 3 of 25 and one of 21 or 4 of 24.
if case_num > MAX_CASE_NUM:
logging.info("The amount of cases has exceeded 25. Please note that "
"the accompanying excel sheet only has 25 answer slots and that it will need to "
"be restructured")
print("The amount of cases has exceeded 25. Please note that "
"the accompanying excel sheet only has 25 answer slots and that it will need to "
"be restructured")
test_case_path = os.path.join(output_path, str(case_num))
try:
os.mkdir(test_case_path)
except FileExistsError:
logging.debug("Could not create test case directory at {} - encountered FileExistsError".format(test_case_path))
print("Could not create test case directory at {} - encountered FileExistsError".format(test_case_path))
sys.exit()
switch_A_B = random.randint(0,1) #If one then A and B are switched. This is so scenario one and two alternate thier A and B positions roughly 50% of the time
# add the wav files
# pick one to duplicate
x_answer=random.randint(0,1)
if switch_A_B:
# add A
cmd_command_copy_a = WNDWS_COPY_CMD+" " + case[1] + " "+ os.path.join(test_case_path, A_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_a)
# add B
cmd_command_copy_b = WNDWS_COPY_CMD+" " + case[0] + " "+ os.path.join(test_case_path, B_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_b)
# add X
if x_answer==1:
x_answer_alpha=USER_ANSWER_CASE_A
cmd_command_copy_a = WNDWS_COPY_CMD+" " + case[1] + " "+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_a)
if x_answer==0:
x_answer_alpha=USER_ANSWER_CASE_B
cmd_command_copy_b = WNDWS_COPY_CMD+" " + case[0] + " "+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_b)
A_value=ANSWER_KEY_SCENARIO_TWO
B_value=ANSWER_KEY_SCENARIO_ONE
else:
# add A
cmd_command_copy_a = WNDWS_COPY_CMD+" " + case[0] + " "+ os.path.join(test_case_path, A_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_a)
# add B
cmd_command_copy_b = WNDWS_COPY_CMD+" " + case[1] + " "+ os.path.join(test_case_path, B_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_b)
# add X
if x_answer==0:
x_answer_alpha=USER_ANSWER_CASE_A
cmd_command_copy_a = WNDWS_COPY_CMD+" " + case[0] + " "+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_a)
if x_answer==1:
x_answer_alpha=USER_ANSWER_CASE_B
cmd_command_copy_b = WNDWS_COPY_CMD+" " + case[1] + " "+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)
os.system(cmd_command_copy_b)
A_value=ANSWER_KEY_SCENARIO_ONE
B_value=ANSWER_KEY_SCENARIO_TWO
question_info = Answer(case_num, x_answer_alpha=x_answer_alpha,A_value=A_value, B_value=B_value)
answer_key.append(question_info)
logging.info("Exit: create_A_B_X_cases")
def create_manual_tests():
logging.info("Enter: create_manual_tests")
global root_directory
scenario_one, scenario_two, output_base_path=_collect_locations()
output_path = _create_output_directory(output_base_path)
# Confirm another answer key does not already exist
if os.path.exists(os.path.join(output_path, ANSWER_KEY_NAME)):
input("An answer_key.yml file already exists at - "+output_path+" - this file will be deleted. Press enter if this is okay of CNTRL-C to exit")
os.remove(os.path.join(output_path, ANSWER_KEY_NAME))
adjusted_file_path, scenario_one_temp, scenario_two_temp= _create_temp_dir(root_directory, scenario_one, scenario_two)
print("Please note that to create the manual tests, the latency of each file must be calculated. This takes roughly 30 minutes per 25 recordings. Press Enter to continue.")
rate_log, correlation_sample_log, correlation_coefficient_log = aa.find_latency_values(scenario_one_temp, scenario_two_temp)
# Negative value indicates that scenario one signal was delayed. Positive value indicates that scenario two signal was delayed
file_zip = aa.pair_directories(scenario_one_temp, scenario_two_temp)
aa.adjust_files(correlation_sample_log, rate_log, file_zip)
create_A_B_X_cases(file_zip, output_path)
_cleanup_scenarios(adjusted_file_path)
_create_answer_key(output_base_path)
print("done")
logging.info("Exit: create_manual_tests")
if __name__ =="__main__":
logging.basicConfig(filename="manualtest.log", level=logging.INFO, format="%(asctime)s %(levelname)s %(module)s line: %(lineno)d, %(message)s")
logging.info("Enter: main")
create_manual_tests()
logging.info("Exit: main")
|
normal
|
{
"blob_id": "c6ef9154285dee3b21980801a101ad5e34a50cab",
"index": 4656,
"step-1": "<mask token>\n\n\nclass Answer:\n \"\"\"\n Wrapper for A_B_X directory containing all associated attributes. \n Populate all fields of the class and call grade to determine if the \n question was correct\n **user_answers\n user_answer either \"A\" or \"B\" indicating which file sounded better\n user_preference_weight numeric value between 1-5 indicating how much better the \n preferred value was. 5 being significant and 1 minimal\n user_X_value either \"A\" or \"B\" denoting which file the user believes\n X was a duplicate of \n user_answer_confidence numeric value between 1-5 indicating how easy it was to \n distinguish between A and B and pick X\n x_answer_alpha the answer to which file X was a duplicate of. Either \n \"A\" or \"B\"\n A_value String field denoting which scenario A belonged to. Either\n scenario_one or SCENARIO_TWO_SUBDIR\n B_value String field denoting which scenario B belonged to. Either\n scenario_one or SCENARIO_TWO_SUBDIR\n correct Call self.grade to populate this field. Compares user_X_value\n and x_answer_alpha to determine if question was correct. \n Populates with boolean\n \"\"\"\n\n def __init__(self, question_num, **user_answers):\n self.question_num = question_num\n self.correct = None\n try:\n self.user_answer = user_answers[USER_ANSWER_KEY]\n except KeyError:\n self.user_answer = None\n try:\n self.user_preference_weight = user_answers[USER_PREFERENCE_KEY]\n except KeyError:\n self.user_preference_weight = None\n try:\n self.user_X_value = user_answers[USER_X_VALUE_KEY]\n except KeyError:\n self.user_X_value = None\n try:\n self.user_answer_confidence = user_answers[USER_CONFIDENCE_KEY]\n except KeyError:\n self.user_answer_confidence = None\n try:\n self.x_answer_alpha = user_answers[X_ANSWER_KEY]\n except KeyError:\n self.x_answer_alpha = None\n try:\n self.A_value = user_answers[A_VALUE_KEY]\n except KeyError:\n self.A_value = None\n try:\n self.B_value = user_answers[B_VALUE_KEY]\n except KeyError:\n self.B_value = None\n\n def grade(self):\n if self.x_answer_alpha == self.user_X_value:\n self.correct = True\n else:\n self.correct = False\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Answer:\n \"\"\"\n Wrapper for A_B_X directory containing all associated attributes. \n Populate all fields of the class and call grade to determine if the \n question was correct\n **user_answers\n user_answer either \"A\" or \"B\" indicating which file sounded better\n user_preference_weight numeric value between 1-5 indicating how much better the \n preferred value was. 5 being significant and 1 minimal\n user_X_value either \"A\" or \"B\" denoting which file the user believes\n X was a duplicate of \n user_answer_confidence numeric value between 1-5 indicating how easy it was to \n distinguish between A and B and pick X\n x_answer_alpha the answer to which file X was a duplicate of. Either \n \"A\" or \"B\"\n A_value String field denoting which scenario A belonged to. Either\n scenario_one or SCENARIO_TWO_SUBDIR\n B_value String field denoting which scenario B belonged to. Either\n scenario_one or SCENARIO_TWO_SUBDIR\n correct Call self.grade to populate this field. Compares user_X_value\n and x_answer_alpha to determine if question was correct. \n Populates with boolean\n \"\"\"\n\n def __init__(self, question_num, **user_answers):\n self.question_num = question_num\n self.correct = None\n try:\n self.user_answer = user_answers[USER_ANSWER_KEY]\n except KeyError:\n self.user_answer = None\n try:\n self.user_preference_weight = user_answers[USER_PREFERENCE_KEY]\n except KeyError:\n self.user_preference_weight = None\n try:\n self.user_X_value = user_answers[USER_X_VALUE_KEY]\n except KeyError:\n self.user_X_value = None\n try:\n self.user_answer_confidence = user_answers[USER_CONFIDENCE_KEY]\n except KeyError:\n self.user_answer_confidence = None\n try:\n self.x_answer_alpha = user_answers[X_ANSWER_KEY]\n except KeyError:\n self.x_answer_alpha = None\n try:\n self.A_value = user_answers[A_VALUE_KEY]\n except KeyError:\n self.A_value = None\n try:\n self.B_value = user_answers[B_VALUE_KEY]\n except KeyError:\n self.B_value = None\n\n def grade(self):\n if self.x_answer_alpha == self.user_X_value:\n self.correct = True\n else:\n self.correct = False\n\n\n<mask token>\n\n\ndef _cleanup_scenarios(adjusted_file_path):\n try:\n shutil.rmtree(adjusted_file_path)\n except:\n print(\n 'The system could not delete the temporary audio files that were created for this test. This directory can be removed at {}'\n .format(adjusted_file_path))\n\n\ndef _create_output_directory(output_base_path):\n logging.info('Enter: _create_output_directory')\n global output_path\n output_path = os.path.join(output_base_path, TESTCASES_SUBDIR)\n if os.path.exists(output_path):\n try:\n input(\n \"\"\"Please note there is already a Testcases directory at - {} .\nPress enter to continue and remove it. Press CNTRL-C to exit.\"\"\"\n .format(output_path))\n shutil.rmtree(output_path)\n except PermissionError:\n print(\n 'There is a test directory located in the same location as the test directory location you specified'\n )\n print(\n 'It cannot be removed becase another process is still using it. Please close the process or delete yourself.'\n )\n sys.exit()\n except KeyboardInterrupt:\n print('Exiting...')\n sys.exit()\n os.mkdir(output_path)\n logging.info('Exit: _create_output_directory')\n return output_path\n\n\ndef _create_answer_key(output_path):\n logging.info('Enter: _create_answer_key')\n global answer_key\n global scenario_one\n global scenario_two\n scenario_one_latency_data = {}\n if os.path.exists(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)):\n with open(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)\n ) as output_data:\n scenario_one_latency_data[SCENARIO_ONE_DATA_FILE_KEY] = yaml.load(\n output_data)\n scenario_two_latency_data = {}\n if os.path.exists(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)):\n with open(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)\n ) as output_data:\n scenario_two_latency_data[SCENARIO_TWO_DATA_FILE_KEY] = yaml.load(\n output_data)\n with open(os.path.join(output_path, ANSWER_KEY_NAME), 'w'\n ) as answer_key_yml:\n yaml.dump(scenario_one_latency_data, answer_key_yml,\n default_flow_style=False)\n yaml.dump(scenario_two_latency_data, answer_key_yml,\n default_flow_style=False)\n for question in answer_key:\n yaml_dict = {}\n Key = str(ANSWER_KEY_QUESTION_KEY + str(question.question_num))\n yaml_dict[Key] = {X_ANSWER_KEY: question.x_answer_alpha,\n A_VALUE_KEY: question.A_value, B_VALUE_KEY: question.B_value}\n yaml.dump(yaml_dict, answer_key_yml, default_flow_style=False)\n logging.info('Exit: _create_answer_key')\n\n\ndef _create_temp_dir(root_directory, scenario_one, scenario_two):\n logging.info('Enter: _create_temp_dir')\n adjusted_file_path = os.path.join(root_directory, ADJUSTED_AUDIO_SUBDIR)\n scenario_one_temp = os.path.join(adjusted_file_path, SCENARIO_ONE_SUBDIR)\n scenario_two_temp = os.path.join(adjusted_file_path, SCENARIO_TWO_SUBDIR)\n try:\n os.mkdir(adjusted_file_path)\n except FileExistsError:\n print(\n 'To properly create ABX tests, the audio files are modified so audio begins play at the same time'\n )\n print(\n \"In order to do this, a new directory called 'adjusted_audio' is temproarily created to hold the adjusted audio.\"\n )\n input(\n 'This directory already exists. Press enter to remove and continue or CTRL-C to quit'\n )\n shutil.rmtree(adjusted_file_path)\n os.mkdir(adjusted_file_path)\n shutil.copytree(scenario_one, scenario_one_temp)\n shutil.copytree(scenario_two, scenario_two_temp)\n logging.info('Exit: _create_temp_dir')\n return adjusted_file_path, scenario_one_temp, scenario_one_temp\n\n\ndef create_A_B_X_cases(A_B_cases_zip_list, output_path):\n \"\"\"\n Method to create A_B_X testing directories and return the corresponding answer key\n An A file is chosen from either the scenario one or two with a 50/50 probability. \n The B file is then from the scenario not chosen for A. An X file is then created with a 50/50\n probability of being either a duplicate of A or B\n Parameters:\n A_B_cases_zip_list: A list containing absolute file pairs\n [[scenario_one, scenario_two]...]\n output_path: absolute file path to store testcase directory \n\n Returns:\n None\n \"\"\"\n logging.info('Enter: create_A_B_X_cases ')\n global scenario_one\n global scenario_two\n global answer_key\n for case_num, case in enumerate(A_B_cases_zip_list):\n if case_num > MAX_CASE_NUM:\n logging.info(\n 'The amount of cases has exceeded 25. Please note that the accompanying excel sheet only has 25 answer slots and that it will need to be restructured'\n )\n print(\n 'The amount of cases has exceeded 25. Please note that the accompanying excel sheet only has 25 answer slots and that it will need to be restructured'\n )\n test_case_path = os.path.join(output_path, str(case_num))\n try:\n os.mkdir(test_case_path)\n except FileExistsError:\n logging.debug(\n 'Could not create test case directory at {} - encountered FileExistsError'\n .format(test_case_path))\n print(\n 'Could not create test case directory at {} - encountered FileExistsError'\n .format(test_case_path))\n sys.exit()\n switch_A_B = random.randint(0, 1)\n x_answer = random.randint(0, 1)\n if switch_A_B:\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, A_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, B_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n if x_answer == 1:\n x_answer_alpha = USER_ANSWER_CASE_A\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n if x_answer == 0:\n x_answer_alpha = USER_ANSWER_CASE_B\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n A_value = ANSWER_KEY_SCENARIO_TWO\n B_value = ANSWER_KEY_SCENARIO_ONE\n else:\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, A_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, B_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n if x_answer == 0:\n x_answer_alpha = USER_ANSWER_CASE_A\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n if x_answer == 1:\n x_answer_alpha = USER_ANSWER_CASE_B\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n A_value = ANSWER_KEY_SCENARIO_ONE\n B_value = ANSWER_KEY_SCENARIO_TWO\n question_info = Answer(case_num, x_answer_alpha=x_answer_alpha,\n A_value=A_value, B_value=B_value)\n answer_key.append(question_info)\n logging.info('Exit: create_A_B_X_cases')\n\n\ndef create_manual_tests():\n logging.info('Enter: create_manual_tests')\n global root_directory\n scenario_one, scenario_two, output_base_path = _collect_locations()\n output_path = _create_output_directory(output_base_path)\n if os.path.exists(os.path.join(output_path, ANSWER_KEY_NAME)):\n input('An answer_key.yml file already exists at - ' + output_path +\n ' - this file will be deleted. Press enter if this is okay of CNTRL-C to exit'\n )\n os.remove(os.path.join(output_path, ANSWER_KEY_NAME))\n adjusted_file_path, scenario_one_temp, scenario_two_temp = (\n _create_temp_dir(root_directory, scenario_one, scenario_two))\n print(\n 'Please note that to create the manual tests, the latency of each file must be calculated. This takes roughly 30 minutes per 25 recordings. Press Enter to continue.'\n )\n rate_log, correlation_sample_log, correlation_coefficient_log = (aa.\n find_latency_values(scenario_one_temp, scenario_two_temp))\n file_zip = aa.pair_directories(scenario_one_temp, scenario_two_temp)\n aa.adjust_files(correlation_sample_log, rate_log, file_zip)\n create_A_B_X_cases(file_zip, output_path)\n _cleanup_scenarios(adjusted_file_path)\n _create_answer_key(output_base_path)\n print('done')\n logging.info('Exit: create_manual_tests')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Answer:\n \"\"\"\n Wrapper for A_B_X directory containing all associated attributes. \n Populate all fields of the class and call grade to determine if the \n question was correct\n **user_answers\n user_answer either \"A\" or \"B\" indicating which file sounded better\n user_preference_weight numeric value between 1-5 indicating how much better the \n preferred value was. 5 being significant and 1 minimal\n user_X_value either \"A\" or \"B\" denoting which file the user believes\n X was a duplicate of \n user_answer_confidence numeric value between 1-5 indicating how easy it was to \n distinguish between A and B and pick X\n x_answer_alpha the answer to which file X was a duplicate of. Either \n \"A\" or \"B\"\n A_value String field denoting which scenario A belonged to. Either\n scenario_one or SCENARIO_TWO_SUBDIR\n B_value String field denoting which scenario B belonged to. Either\n scenario_one or SCENARIO_TWO_SUBDIR\n correct Call self.grade to populate this field. Compares user_X_value\n and x_answer_alpha to determine if question was correct. \n Populates with boolean\n \"\"\"\n\n def __init__(self, question_num, **user_answers):\n self.question_num = question_num\n self.correct = None\n try:\n self.user_answer = user_answers[USER_ANSWER_KEY]\n except KeyError:\n self.user_answer = None\n try:\n self.user_preference_weight = user_answers[USER_PREFERENCE_KEY]\n except KeyError:\n self.user_preference_weight = None\n try:\n self.user_X_value = user_answers[USER_X_VALUE_KEY]\n except KeyError:\n self.user_X_value = None\n try:\n self.user_answer_confidence = user_answers[USER_CONFIDENCE_KEY]\n except KeyError:\n self.user_answer_confidence = None\n try:\n self.x_answer_alpha = user_answers[X_ANSWER_KEY]\n except KeyError:\n self.x_answer_alpha = None\n try:\n self.A_value = user_answers[A_VALUE_KEY]\n except KeyError:\n self.A_value = None\n try:\n self.B_value = user_answers[B_VALUE_KEY]\n except KeyError:\n self.B_value = None\n\n def grade(self):\n if self.x_answer_alpha == self.user_X_value:\n self.correct = True\n else:\n self.correct = False\n\n\ndef _collect_locations():\n logging.info('Enter: _collect_locations')\n global scenario_one\n global scenario_two\n global output_base_path\n if not os.path.exists(scenario_one):\n print('Scenario One file path does not exist. Exiting')\n sys.exit()\n if not os.path.exists(scenario_two):\n print('Scenario Two file path does not exist. Exiting')\n sys.exit()\n print('Creating listening test...')\n logging.info('Exit: _collect_locations')\n return scenario_one, scenario_two, output_base_path\n\n\ndef _cleanup_scenarios(adjusted_file_path):\n try:\n shutil.rmtree(adjusted_file_path)\n except:\n print(\n 'The system could not delete the temporary audio files that were created for this test. This directory can be removed at {}'\n .format(adjusted_file_path))\n\n\ndef _create_output_directory(output_base_path):\n logging.info('Enter: _create_output_directory')\n global output_path\n output_path = os.path.join(output_base_path, TESTCASES_SUBDIR)\n if os.path.exists(output_path):\n try:\n input(\n \"\"\"Please note there is already a Testcases directory at - {} .\nPress enter to continue and remove it. Press CNTRL-C to exit.\"\"\"\n .format(output_path))\n shutil.rmtree(output_path)\n except PermissionError:\n print(\n 'There is a test directory located in the same location as the test directory location you specified'\n )\n print(\n 'It cannot be removed becase another process is still using it. Please close the process or delete yourself.'\n )\n sys.exit()\n except KeyboardInterrupt:\n print('Exiting...')\n sys.exit()\n os.mkdir(output_path)\n logging.info('Exit: _create_output_directory')\n return output_path\n\n\ndef _create_answer_key(output_path):\n logging.info('Enter: _create_answer_key')\n global answer_key\n global scenario_one\n global scenario_two\n scenario_one_latency_data = {}\n if os.path.exists(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)):\n with open(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)\n ) as output_data:\n scenario_one_latency_data[SCENARIO_ONE_DATA_FILE_KEY] = yaml.load(\n output_data)\n scenario_two_latency_data = {}\n if os.path.exists(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)):\n with open(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)\n ) as output_data:\n scenario_two_latency_data[SCENARIO_TWO_DATA_FILE_KEY] = yaml.load(\n output_data)\n with open(os.path.join(output_path, ANSWER_KEY_NAME), 'w'\n ) as answer_key_yml:\n yaml.dump(scenario_one_latency_data, answer_key_yml,\n default_flow_style=False)\n yaml.dump(scenario_two_latency_data, answer_key_yml,\n default_flow_style=False)\n for question in answer_key:\n yaml_dict = {}\n Key = str(ANSWER_KEY_QUESTION_KEY + str(question.question_num))\n yaml_dict[Key] = {X_ANSWER_KEY: question.x_answer_alpha,\n A_VALUE_KEY: question.A_value, B_VALUE_KEY: question.B_value}\n yaml.dump(yaml_dict, answer_key_yml, default_flow_style=False)\n logging.info('Exit: _create_answer_key')\n\n\ndef _create_temp_dir(root_directory, scenario_one, scenario_two):\n logging.info('Enter: _create_temp_dir')\n adjusted_file_path = os.path.join(root_directory, ADJUSTED_AUDIO_SUBDIR)\n scenario_one_temp = os.path.join(adjusted_file_path, SCENARIO_ONE_SUBDIR)\n scenario_two_temp = os.path.join(adjusted_file_path, SCENARIO_TWO_SUBDIR)\n try:\n os.mkdir(adjusted_file_path)\n except FileExistsError:\n print(\n 'To properly create ABX tests, the audio files are modified so audio begins play at the same time'\n )\n print(\n \"In order to do this, a new directory called 'adjusted_audio' is temproarily created to hold the adjusted audio.\"\n )\n input(\n 'This directory already exists. Press enter to remove and continue or CTRL-C to quit'\n )\n shutil.rmtree(adjusted_file_path)\n os.mkdir(adjusted_file_path)\n shutil.copytree(scenario_one, scenario_one_temp)\n shutil.copytree(scenario_two, scenario_two_temp)\n logging.info('Exit: _create_temp_dir')\n return adjusted_file_path, scenario_one_temp, scenario_one_temp\n\n\ndef create_A_B_X_cases(A_B_cases_zip_list, output_path):\n \"\"\"\n Method to create A_B_X testing directories and return the corresponding answer key\n An A file is chosen from either the scenario one or two with a 50/50 probability. \n The B file is then from the scenario not chosen for A. An X file is then created with a 50/50\n probability of being either a duplicate of A or B\n Parameters:\n A_B_cases_zip_list: A list containing absolute file pairs\n [[scenario_one, scenario_two]...]\n output_path: absolute file path to store testcase directory \n\n Returns:\n None\n \"\"\"\n logging.info('Enter: create_A_B_X_cases ')\n global scenario_one\n global scenario_two\n global answer_key\n for case_num, case in enumerate(A_B_cases_zip_list):\n if case_num > MAX_CASE_NUM:\n logging.info(\n 'The amount of cases has exceeded 25. Please note that the accompanying excel sheet only has 25 answer slots and that it will need to be restructured'\n )\n print(\n 'The amount of cases has exceeded 25. Please note that the accompanying excel sheet only has 25 answer slots and that it will need to be restructured'\n )\n test_case_path = os.path.join(output_path, str(case_num))\n try:\n os.mkdir(test_case_path)\n except FileExistsError:\n logging.debug(\n 'Could not create test case directory at {} - encountered FileExistsError'\n .format(test_case_path))\n print(\n 'Could not create test case directory at {} - encountered FileExistsError'\n .format(test_case_path))\n sys.exit()\n switch_A_B = random.randint(0, 1)\n x_answer = random.randint(0, 1)\n if switch_A_B:\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, A_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, B_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n if x_answer == 1:\n x_answer_alpha = USER_ANSWER_CASE_A\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n if x_answer == 0:\n x_answer_alpha = USER_ANSWER_CASE_B\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n A_value = ANSWER_KEY_SCENARIO_TWO\n B_value = ANSWER_KEY_SCENARIO_ONE\n else:\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, A_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, B_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n if x_answer == 0:\n x_answer_alpha = USER_ANSWER_CASE_A\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n if x_answer == 1:\n x_answer_alpha = USER_ANSWER_CASE_B\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n A_value = ANSWER_KEY_SCENARIO_ONE\n B_value = ANSWER_KEY_SCENARIO_TWO\n question_info = Answer(case_num, x_answer_alpha=x_answer_alpha,\n A_value=A_value, B_value=B_value)\n answer_key.append(question_info)\n logging.info('Exit: create_A_B_X_cases')\n\n\ndef create_manual_tests():\n logging.info('Enter: create_manual_tests')\n global root_directory\n scenario_one, scenario_two, output_base_path = _collect_locations()\n output_path = _create_output_directory(output_base_path)\n if os.path.exists(os.path.join(output_path, ANSWER_KEY_NAME)):\n input('An answer_key.yml file already exists at - ' + output_path +\n ' - this file will be deleted. Press enter if this is okay of CNTRL-C to exit'\n )\n os.remove(os.path.join(output_path, ANSWER_KEY_NAME))\n adjusted_file_path, scenario_one_temp, scenario_two_temp = (\n _create_temp_dir(root_directory, scenario_one, scenario_two))\n print(\n 'Please note that to create the manual tests, the latency of each file must be calculated. This takes roughly 30 minutes per 25 recordings. Press Enter to continue.'\n )\n rate_log, correlation_sample_log, correlation_coefficient_log = (aa.\n find_latency_values(scenario_one_temp, scenario_two_temp))\n file_zip = aa.pair_directories(scenario_one_temp, scenario_two_temp)\n aa.adjust_files(correlation_sample_log, rate_log, file_zip)\n create_A_B_X_cases(file_zip, output_path)\n _cleanup_scenarios(adjusted_file_path)\n _create_answer_key(output_base_path)\n print('done')\n logging.info('Exit: create_manual_tests')\n\n\n<mask token>\n",
"step-4": "<mask token>\nhelp_string = \"\"\"\nPlease note that manual_test.py makes 3 assumptions about these file paths. \n1.Both scenarios contain the same amount of wav files.\n2.The wav files in both scenarios have a one to one correspondence between each other. Each test case contains a pair of files, one from each scenario. This pair is made by matching files between scenarios with the same names 3.There are no more than 25 audio file pairs\"\"\"\nparser = argparse.ArgumentParser(description=\n 'Script to create a listening test. The output, test case directory and answer_key.yml file, can be found in the root directory.'\n + help_string)\nparser.add_argument('-o', dest='output_base_path', default=os.getcwd(),\n help=\n '(optional)Absolute file path to locatin to save test directory and answer key (default: root directory)'\n )\nparser.add_argument('scenario_one', help=\n 'Absolute file path to location of first scenario. Required')\nparser.add_argument('scenario_two', help=\n 'Absolute file path to location of second scenario. Required')\nargs = parser.parse_args()\noutput_base_path = args.output_base_path\nroot_directory = os.getcwd()\nscenario_one = args.scenario_one\nscenario_one_latency = 0\nscenario_one_correlation_coefficient = 0\nscenario_two = args.scenario_two\nscenario_two_latency = 0\nscenario_two_correlation_coefficient = 0\noutput_path = ''\nanswer_key = []\nUSER_ANSWER_KEY = 'user_answer'\nUSER_PREFERENCE_KEY = 'user_preference_weight'\nUSER_X_VALUE_KEY = 'user_X_value'\nUSER_CONFIDENCE_KEY = 'user_answer_confidence'\nX_ANSWER_KEY = 'x_answer_alpha'\nA_VALUE_KEY = 'A_value'\nB_VALUE_KEY = 'B_value'\nTESTCASES_SUBDIR = 'testcases'\nA_CASE_NAME = 'A_'\nB_CASE_NAME = 'B_'\nX_CASE_NAME = 'X_'\nWNDWS_COPY_CMD = 'copy'\nAUDIO_TYPE = '.wav'\nSCNEARIO_ONE_DATA_FILE = 'output_data.yml'\nSCENARIO_ONE_DATA_FILE_KEY = 'Scenario One'\nSCENARIO_TWO_DATA_FILE = 'output_data.yml'\nSCENARIO_TWO_DATA_FILE_KEY = 'Scenario Two'\nANSWER_KEY_NAME = 'answer_key.yml'\nUSER_ANSWER_CASE_A = 'A'\nUSER_ANSWER_CASE_B = 'B'\nANSWER_KEY_SCENARIO_ONE = 'scenario one'\nANSWER_KEY_SCENARIO_TWO = 'scenario two'\nANSWER_KEY_QUESTION_KEY = 'Q_'\nMAX_CASE_NUM = 24\nADJUSTED_AUDIO_SUBDIR = 'adjusted_audio'\nSCENARIO_ONE_SUBDIR = 'scenario_one'\nSCENARIO_TWO_SUBDIR = 'scenario_two'\n\n\nclass Answer:\n \"\"\"\n Wrapper for A_B_X directory containing all associated attributes. \n Populate all fields of the class and call grade to determine if the \n question was correct\n **user_answers\n user_answer either \"A\" or \"B\" indicating which file sounded better\n user_preference_weight numeric value between 1-5 indicating how much better the \n preferred value was. 5 being significant and 1 minimal\n user_X_value either \"A\" or \"B\" denoting which file the user believes\n X was a duplicate of \n user_answer_confidence numeric value between 1-5 indicating how easy it was to \n distinguish between A and B and pick X\n x_answer_alpha the answer to which file X was a duplicate of. Either \n \"A\" or \"B\"\n A_value String field denoting which scenario A belonged to. Either\n scenario_one or SCENARIO_TWO_SUBDIR\n B_value String field denoting which scenario B belonged to. Either\n scenario_one or SCENARIO_TWO_SUBDIR\n correct Call self.grade to populate this field. Compares user_X_value\n and x_answer_alpha to determine if question was correct. \n Populates with boolean\n \"\"\"\n\n def __init__(self, question_num, **user_answers):\n self.question_num = question_num\n self.correct = None\n try:\n self.user_answer = user_answers[USER_ANSWER_KEY]\n except KeyError:\n self.user_answer = None\n try:\n self.user_preference_weight = user_answers[USER_PREFERENCE_KEY]\n except KeyError:\n self.user_preference_weight = None\n try:\n self.user_X_value = user_answers[USER_X_VALUE_KEY]\n except KeyError:\n self.user_X_value = None\n try:\n self.user_answer_confidence = user_answers[USER_CONFIDENCE_KEY]\n except KeyError:\n self.user_answer_confidence = None\n try:\n self.x_answer_alpha = user_answers[X_ANSWER_KEY]\n except KeyError:\n self.x_answer_alpha = None\n try:\n self.A_value = user_answers[A_VALUE_KEY]\n except KeyError:\n self.A_value = None\n try:\n self.B_value = user_answers[B_VALUE_KEY]\n except KeyError:\n self.B_value = None\n\n def grade(self):\n if self.x_answer_alpha == self.user_X_value:\n self.correct = True\n else:\n self.correct = False\n\n\ndef _collect_locations():\n logging.info('Enter: _collect_locations')\n global scenario_one\n global scenario_two\n global output_base_path\n if not os.path.exists(scenario_one):\n print('Scenario One file path does not exist. Exiting')\n sys.exit()\n if not os.path.exists(scenario_two):\n print('Scenario Two file path does not exist. Exiting')\n sys.exit()\n print('Creating listening test...')\n logging.info('Exit: _collect_locations')\n return scenario_one, scenario_two, output_base_path\n\n\ndef _cleanup_scenarios(adjusted_file_path):\n try:\n shutil.rmtree(adjusted_file_path)\n except:\n print(\n 'The system could not delete the temporary audio files that were created for this test. This directory can be removed at {}'\n .format(adjusted_file_path))\n\n\ndef _create_output_directory(output_base_path):\n logging.info('Enter: _create_output_directory')\n global output_path\n output_path = os.path.join(output_base_path, TESTCASES_SUBDIR)\n if os.path.exists(output_path):\n try:\n input(\n \"\"\"Please note there is already a Testcases directory at - {} .\nPress enter to continue and remove it. Press CNTRL-C to exit.\"\"\"\n .format(output_path))\n shutil.rmtree(output_path)\n except PermissionError:\n print(\n 'There is a test directory located in the same location as the test directory location you specified'\n )\n print(\n 'It cannot be removed becase another process is still using it. Please close the process or delete yourself.'\n )\n sys.exit()\n except KeyboardInterrupt:\n print('Exiting...')\n sys.exit()\n os.mkdir(output_path)\n logging.info('Exit: _create_output_directory')\n return output_path\n\n\ndef _create_answer_key(output_path):\n logging.info('Enter: _create_answer_key')\n global answer_key\n global scenario_one\n global scenario_two\n scenario_one_latency_data = {}\n if os.path.exists(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)):\n with open(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)\n ) as output_data:\n scenario_one_latency_data[SCENARIO_ONE_DATA_FILE_KEY] = yaml.load(\n output_data)\n scenario_two_latency_data = {}\n if os.path.exists(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)):\n with open(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)\n ) as output_data:\n scenario_two_latency_data[SCENARIO_TWO_DATA_FILE_KEY] = yaml.load(\n output_data)\n with open(os.path.join(output_path, ANSWER_KEY_NAME), 'w'\n ) as answer_key_yml:\n yaml.dump(scenario_one_latency_data, answer_key_yml,\n default_flow_style=False)\n yaml.dump(scenario_two_latency_data, answer_key_yml,\n default_flow_style=False)\n for question in answer_key:\n yaml_dict = {}\n Key = str(ANSWER_KEY_QUESTION_KEY + str(question.question_num))\n yaml_dict[Key] = {X_ANSWER_KEY: question.x_answer_alpha,\n A_VALUE_KEY: question.A_value, B_VALUE_KEY: question.B_value}\n yaml.dump(yaml_dict, answer_key_yml, default_flow_style=False)\n logging.info('Exit: _create_answer_key')\n\n\ndef _create_temp_dir(root_directory, scenario_one, scenario_two):\n logging.info('Enter: _create_temp_dir')\n adjusted_file_path = os.path.join(root_directory, ADJUSTED_AUDIO_SUBDIR)\n scenario_one_temp = os.path.join(adjusted_file_path, SCENARIO_ONE_SUBDIR)\n scenario_two_temp = os.path.join(adjusted_file_path, SCENARIO_TWO_SUBDIR)\n try:\n os.mkdir(adjusted_file_path)\n except FileExistsError:\n print(\n 'To properly create ABX tests, the audio files are modified so audio begins play at the same time'\n )\n print(\n \"In order to do this, a new directory called 'adjusted_audio' is temproarily created to hold the adjusted audio.\"\n )\n input(\n 'This directory already exists. Press enter to remove and continue or CTRL-C to quit'\n )\n shutil.rmtree(adjusted_file_path)\n os.mkdir(adjusted_file_path)\n shutil.copytree(scenario_one, scenario_one_temp)\n shutil.copytree(scenario_two, scenario_two_temp)\n logging.info('Exit: _create_temp_dir')\n return adjusted_file_path, scenario_one_temp, scenario_one_temp\n\n\ndef create_A_B_X_cases(A_B_cases_zip_list, output_path):\n \"\"\"\n Method to create A_B_X testing directories and return the corresponding answer key\n An A file is chosen from either the scenario one or two with a 50/50 probability. \n The B file is then from the scenario not chosen for A. An X file is then created with a 50/50\n probability of being either a duplicate of A or B\n Parameters:\n A_B_cases_zip_list: A list containing absolute file pairs\n [[scenario_one, scenario_two]...]\n output_path: absolute file path to store testcase directory \n\n Returns:\n None\n \"\"\"\n logging.info('Enter: create_A_B_X_cases ')\n global scenario_one\n global scenario_two\n global answer_key\n for case_num, case in enumerate(A_B_cases_zip_list):\n if case_num > MAX_CASE_NUM:\n logging.info(\n 'The amount of cases has exceeded 25. Please note that the accompanying excel sheet only has 25 answer slots and that it will need to be restructured'\n )\n print(\n 'The amount of cases has exceeded 25. Please note that the accompanying excel sheet only has 25 answer slots and that it will need to be restructured'\n )\n test_case_path = os.path.join(output_path, str(case_num))\n try:\n os.mkdir(test_case_path)\n except FileExistsError:\n logging.debug(\n 'Could not create test case directory at {} - encountered FileExistsError'\n .format(test_case_path))\n print(\n 'Could not create test case directory at {} - encountered FileExistsError'\n .format(test_case_path))\n sys.exit()\n switch_A_B = random.randint(0, 1)\n x_answer = random.randint(0, 1)\n if switch_A_B:\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, A_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, B_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n if x_answer == 1:\n x_answer_alpha = USER_ANSWER_CASE_A\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n if x_answer == 0:\n x_answer_alpha = USER_ANSWER_CASE_B\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n A_value = ANSWER_KEY_SCENARIO_TWO\n B_value = ANSWER_KEY_SCENARIO_ONE\n else:\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, A_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, B_CASE_NAME + str(\n case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n if x_answer == 0:\n x_answer_alpha = USER_ANSWER_CASE_A\n cmd_command_copy_a = WNDWS_COPY_CMD + ' ' + case[0\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n if x_answer == 1:\n x_answer_alpha = USER_ANSWER_CASE_B\n cmd_command_copy_b = WNDWS_COPY_CMD + ' ' + case[1\n ] + ' ' + os.path.join(test_case_path, X_CASE_NAME +\n str(case_num) + AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n A_value = ANSWER_KEY_SCENARIO_ONE\n B_value = ANSWER_KEY_SCENARIO_TWO\n question_info = Answer(case_num, x_answer_alpha=x_answer_alpha,\n A_value=A_value, B_value=B_value)\n answer_key.append(question_info)\n logging.info('Exit: create_A_B_X_cases')\n\n\ndef create_manual_tests():\n logging.info('Enter: create_manual_tests')\n global root_directory\n scenario_one, scenario_two, output_base_path = _collect_locations()\n output_path = _create_output_directory(output_base_path)\n if os.path.exists(os.path.join(output_path, ANSWER_KEY_NAME)):\n input('An answer_key.yml file already exists at - ' + output_path +\n ' - this file will be deleted. Press enter if this is okay of CNTRL-C to exit'\n )\n os.remove(os.path.join(output_path, ANSWER_KEY_NAME))\n adjusted_file_path, scenario_one_temp, scenario_two_temp = (\n _create_temp_dir(root_directory, scenario_one, scenario_two))\n print(\n 'Please note that to create the manual tests, the latency of each file must be calculated. This takes roughly 30 minutes per 25 recordings. Press Enter to continue.'\n )\n rate_log, correlation_sample_log, correlation_coefficient_log = (aa.\n find_latency_values(scenario_one_temp, scenario_two_temp))\n file_zip = aa.pair_directories(scenario_one_temp, scenario_two_temp)\n aa.adjust_files(correlation_sample_log, rate_log, file_zip)\n create_A_B_X_cases(file_zip, output_path)\n _cleanup_scenarios(adjusted_file_path)\n _create_answer_key(output_base_path)\n print('done')\n logging.info('Exit: create_manual_tests')\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='manualtest.log', level=logging.INFO,\n format=\n '%(asctime)s %(levelname)s %(module)s line: %(lineno)d, %(message)s')\n logging.info('Enter: main')\n create_manual_tests()\n logging.info('Exit: main')\n",
"step-5": "\nimport os \nimport yaml\nimport sys\nimport random\nimport shutil\nimport openpyxl\nimport yaml\nimport audioanalysis as aa\nimport numpy as np\nimport argparse\nimport logging\n\"\"\"\nmanualtest.py\n\nScript to create a listeneing test. The output, test \ncase directory and answer_key.yml file, can be \nfound in the root directory.\n\nmanual test creation\nresponsibilities:\n1) directory of directories that each contain two files to compare(a,b) and a duplicated one (x)\n example scenarios to test:\n JITTER_BUFFER_INIT_X VS. JITTER_BUFFER_INIT_Y\n dev version vs dev version\n need to come up with more\n2) an output yaml file labeled answer_key.yml that says which (a,b) is x \n\n\"\"\"\n# command line parse\nhelp_string = (\"\\nPlease note that manual_test.py makes 3 assumptions about \"\n \"these file paths. \" \n \"\\n1.Both scenarios contain the same amount of wav files.\"\n \"\\n2.The wav files in both scenarios have a one to one \"\n \"correspondence between each other. Each test case contains a \"\n \"pair of files, one from each scenario. This pair is made by \"\n \"matching files between scenarios with the same names 3.\"\n \"There are no more than 25 audio file pairs\")\n\nparser = argparse.ArgumentParser(description=\"Script to create a listening test. The output, test case directory and answer_key.yml file, can be found in the root directory.\"+help_string)\nparser.add_argument(\"-o\", dest=\"output_base_path\", default= os.getcwd(),help=\"(optional)Absolute file path to locatin to save test directory and answer key (default: root directory)\")\nparser.add_argument(\"scenario_one\", help=\"Absolute file path to location of first scenario. Required\")\nparser.add_argument(\"scenario_two\", help=\"Absolute file path to location of second scenario. Required\")\nargs=parser.parse_args()\n\n# globals\noutput_base_path=args.output_base_path\nroot_directory = os.getcwd()\n# first scenario\nscenario_one = args.scenario_one\nscenario_one_latency=0\nscenario_one_correlation_coefficient=0\n# second scenario\nscenario_two = args.scenario_two\nscenario_two_latency=0\nscenario_two_correlation_coefficient=0\noutput_path=\"\"\nanswer_key=[]\n\nUSER_ANSWER_KEY=\"user_answer\"\nUSER_PREFERENCE_KEY=\"user_preference_weight\"\nUSER_X_VALUE_KEY=\"user_X_value\"\nUSER_CONFIDENCE_KEY=\"user_answer_confidence\"\nX_ANSWER_KEY=\"x_answer_alpha\"\nA_VALUE_KEY=\"A_value\"\nB_VALUE_KEY=\"B_value\"\nTESTCASES_SUBDIR=\"testcases\"\nA_CASE_NAME=\"A_\"\nB_CASE_NAME=\"B_\"\nX_CASE_NAME=\"X_\"\nWNDWS_COPY_CMD=\"copy\"\nAUDIO_TYPE=\".wav\"\nSCNEARIO_ONE_DATA_FILE=\"output_data.yml\"\nSCENARIO_ONE_DATA_FILE_KEY=\"Scenario One\"\nSCENARIO_TWO_DATA_FILE=\"output_data.yml\"\nSCENARIO_TWO_DATA_FILE_KEY=\"Scenario Two\"\nANSWER_KEY_NAME=\"answer_key.yml\"\nUSER_ANSWER_CASE_A=\"A\"\nUSER_ANSWER_CASE_B=\"B\"\nANSWER_KEY_SCENARIO_ONE=\"scenario one\"\nANSWER_KEY_SCENARIO_TWO=\"scenario two\"\nANSWER_KEY_QUESTION_KEY=\"Q_\"\nMAX_CASE_NUM=24\nADJUSTED_AUDIO_SUBDIR=\"adjusted_audio\"\nSCENARIO_ONE_SUBDIR=\"scenario_one\"\nSCENARIO_TWO_SUBDIR=\"scenario_two\"\n\nclass Answer():\n \"\"\"\n Wrapper for A_B_X directory containing all associated attributes. \n Populate all fields of the class and call grade to determine if the \n question was correct\n **user_answers\n user_answer either \"A\" or \"B\" indicating which file sounded better\n user_preference_weight numeric value between 1-5 indicating how much better the \n preferred value was. 5 being significant and 1 minimal\n user_X_value either \"A\" or \"B\" denoting which file the user believes\n X was a duplicate of \n user_answer_confidence numeric value between 1-5 indicating how easy it was to \n distinguish between A and B and pick X\n x_answer_alpha the answer to which file X was a duplicate of. Either \n \"A\" or \"B\"\n A_value String field denoting which scenario A belonged to. Either\n scenario_one or SCENARIO_TWO_SUBDIR\n B_value String field denoting which scenario B belonged to. Either\n scenario_one or SCENARIO_TWO_SUBDIR\n correct Call self.grade to populate this field. Compares user_X_value\n and x_answer_alpha to determine if question was correct. \n Populates with boolean\n \"\"\"\n def __init__(self, question_num, **user_answers):\n self.question_num=question_num\n self.correct = None\n try:\n self.user_answer=user_answers[USER_ANSWER_KEY]\n except KeyError:\n self.user_answer=None\n try:\n self.user_preference_weight=user_answers[USER_PREFERENCE_KEY]\n except KeyError: \n self.user_preference_weight=None\n try:\n self.user_X_value=user_answers[USER_X_VALUE_KEY]\n except KeyError:\n self.user_X_value=None\n try:\n self.user_answer_confidence=user_answers[USER_CONFIDENCE_KEY]\n except KeyError:\n self.user_answer_confidence=None\n try:\n self.x_answer_alpha=user_answers[X_ANSWER_KEY]\n except KeyError:\n self.x_answer_alpha=None\n try: \n self.A_value=user_answers[A_VALUE_KEY]\n except KeyError:\n self.A_value=None \n try:\n self.B_value=user_answers[B_VALUE_KEY]\n except KeyError:\n self.B_value=None\n\n def grade(self):\n if self.x_answer_alpha==self.user_X_value:\n self.correct=True\n else:\n self.correct=False\n\n\ndef _collect_locations():\n # Method to pair all the files for comparison in the two scenarios the user has elected to compare \n logging.info(\"Enter: _collect_locations\")\n global scenario_one\n global scenario_two\n global output_base_path\n if not os.path.exists(scenario_one):\n print(\"Scenario One file path does not exist. Exiting\")\n sys.exit()\n if not os.path.exists(scenario_two):\n print(\"Scenario Two file path does not exist. Exiting\")\n sys.exit()\n print(\"Creating listening test...\")\n logging.info(\"Exit: _collect_locations\")\n return scenario_one, scenario_two, output_base_path\n \n\ndef _cleanup_scenarios(adjusted_file_path):\n # Delete the adjusted audio created for this module\n try:\n shutil.rmtree(adjusted_file_path)\n except:\n print(\"The system could not delete the temporary audio files that \"\n \"were created for this test. This directory can be removed \"\n \"at {}\".format(adjusted_file_path))\n\n\ndef _create_output_directory(output_base_path):\n # From the base path create a testcases subdirectory\n # Return the subdirectory full path\n logging.info(\"Enter: _create_output_directory\")\n global output_path \n output_path = os.path.join(output_base_path, TESTCASES_SUBDIR)\n if os.path.exists(output_path):\n try:\n input(\"Please note there is already a Testcases directory at - {} .\\nPress enter to continue and remove it. Press CNTRL-C to exit.\".format(output_path))\n shutil.rmtree(output_path)\n except PermissionError:\n print(\"There is a test directory located in the same location as the test directory location you specified\")\n print(\"It cannot be removed becase another process is still using it. Please close the process or delete yourself.\")\n sys.exit()\n except KeyboardInterrupt:\n print(\"Exiting...\")\n sys.exit()\n os.mkdir(output_path)\n logging.info(\"Exit: _create_output_directory\")\n return output_path\n\n\ndef _create_answer_key(output_path):\n # Parse the data file from scenario one and two if it exists and add too answer key\n # Dump data from processes to ANSWER_KEY_NAME in output_path\n logging.info(\"Enter: _create_answer_key\")\n global answer_key\n global scenario_one\n global scenario_two\n scenario_one_latency_data={}\n if os.path.exists(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)):\n with open(os.path.join(scenario_one, SCNEARIO_ONE_DATA_FILE)) as output_data:\n scenario_one_latency_data[SCENARIO_ONE_DATA_FILE_KEY]=yaml.load(output_data)\n scenario_two_latency_data={}\n if os.path.exists(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)):\n with open(os.path.join(scenario_two, SCENARIO_TWO_DATA_FILE)) as output_data:\n scenario_two_latency_data[SCENARIO_TWO_DATA_FILE_KEY]=yaml.load(output_data)\n\n with open(os.path.join(output_path, ANSWER_KEY_NAME), \"w\") as answer_key_yml:\n yaml.dump(scenario_one_latency_data, answer_key_yml, default_flow_style=False)\n yaml.dump(scenario_two_latency_data, answer_key_yml, default_flow_style=False)\n for question in answer_key:\n yaml_dict={}\n Key = str(ANSWER_KEY_QUESTION_KEY+str(question.question_num))\n yaml_dict[Key] = {X_ANSWER_KEY: question.x_answer_alpha,A_VALUE_KEY: question.A_value,B_VALUE_KEY: question.B_value}\n yaml.dump(yaml_dict, answer_key_yml, default_flow_style=False)\n logging.info(\"Exit: _create_answer_key\")\n\n\ndef _create_temp_dir(root_directory, scenario_one, scenario_two):\n logging.info(\"Enter: _create_temp_dir\")\n # Will create exact copies of both directories specified so files may be altered later\n adjusted_file_path = os.path.join(root_directory, ADJUSTED_AUDIO_SUBDIR)\n scenario_one_temp = os.path.join(adjusted_file_path, SCENARIO_ONE_SUBDIR)\n scenario_two_temp = os.path.join(adjusted_file_path, SCENARIO_TWO_SUBDIR)\n try:\n os.mkdir(adjusted_file_path)\n except FileExistsError:\n print(\"To properly create ABX tests, the audio files are modified so audio begins play at the same time\")\n print(\"In order to do this, a new directory called 'adjusted_audio' is temproarily created to hold the adjusted audio.\")\n input(\"This directory already exists. Press enter to remove and continue or CTRL-C to quit\")\n shutil.rmtree(adjusted_file_path)\n os.mkdir(adjusted_file_path)\n shutil.copytree(scenario_one, scenario_one_temp)\n shutil.copytree(scenario_two, scenario_two_temp)\n logging.info(\"Exit: _create_temp_dir\")\n return adjusted_file_path, scenario_one_temp, scenario_one_temp\n\n\ndef create_A_B_X_cases(A_B_cases_zip_list, output_path):\n \"\"\"\n Method to create A_B_X testing directories and return the corresponding answer key\n An A file is chosen from either the scenario one or two with a 50/50 probability. \n The B file is then from the scenario not chosen for A. An X file is then created with a 50/50\n probability of being either a duplicate of A or B\n Parameters:\n A_B_cases_zip_list: A list containing absolute file pairs\n [[scenario_one, scenario_two]...]\n output_path: absolute file path to store testcase directory \n\n Returns:\n None\n \"\"\"\n logging.info(\"Enter: create_A_B_X_cases \")\n global scenario_one\n global scenario_two\n global answer_key\n # create listening directories and record answer to each in answer_log\n for case_num, case in enumerate(A_B_cases_zip_list):\n #MRR I really don't like silently dropping audio pairs. Please just create multiple ABX tests, each with up to 25. Up to you whether you have 3 of 25 and one of 21 or 4 of 24.\n if case_num > MAX_CASE_NUM:\n logging.info(\"The amount of cases has exceeded 25. Please note that \"\n \"the accompanying excel sheet only has 25 answer slots and that it will need to \"\n \"be restructured\") \n print(\"The amount of cases has exceeded 25. Please note that \"\n \"the accompanying excel sheet only has 25 answer slots and that it will need to \"\n \"be restructured\")\n test_case_path = os.path.join(output_path, str(case_num))\n try:\n os.mkdir(test_case_path)\n except FileExistsError:\n logging.debug(\"Could not create test case directory at {} - encountered FileExistsError\".format(test_case_path))\n print(\"Could not create test case directory at {} - encountered FileExistsError\".format(test_case_path))\n sys.exit()\n switch_A_B = random.randint(0,1) #If one then A and B are switched. This is so scenario one and two alternate thier A and B positions roughly 50% of the time\n # add the wav files\n # pick one to duplicate\n x_answer=random.randint(0,1)\n if switch_A_B:\n # add A\n cmd_command_copy_a = WNDWS_COPY_CMD+\" \" + case[1] + \" \"+ os.path.join(test_case_path, A_CASE_NAME+str(case_num)+AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n # add B \n cmd_command_copy_b = WNDWS_COPY_CMD+\" \" + case[0] + \" \"+ os.path.join(test_case_path, B_CASE_NAME+str(case_num)+AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n # add X\n if x_answer==1:\n x_answer_alpha=USER_ANSWER_CASE_A\n cmd_command_copy_a = WNDWS_COPY_CMD+\" \" + case[1] + \" \"+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n if x_answer==0:\n x_answer_alpha=USER_ANSWER_CASE_B\n cmd_command_copy_b = WNDWS_COPY_CMD+\" \" + case[0] + \" \"+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n A_value=ANSWER_KEY_SCENARIO_TWO\n B_value=ANSWER_KEY_SCENARIO_ONE\n else:\n # add A\n cmd_command_copy_a = WNDWS_COPY_CMD+\" \" + case[0] + \" \"+ os.path.join(test_case_path, A_CASE_NAME+str(case_num)+AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n # add B \n cmd_command_copy_b = WNDWS_COPY_CMD+\" \" + case[1] + \" \"+ os.path.join(test_case_path, B_CASE_NAME+str(case_num)+AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n # add X\n if x_answer==0:\n x_answer_alpha=USER_ANSWER_CASE_A\n cmd_command_copy_a = WNDWS_COPY_CMD+\" \" + case[0] + \" \"+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)\n os.system(cmd_command_copy_a)\n if x_answer==1:\n x_answer_alpha=USER_ANSWER_CASE_B\n cmd_command_copy_b = WNDWS_COPY_CMD+\" \" + case[1] + \" \"+ os.path.join(test_case_path, X_CASE_NAME+str(case_num)+AUDIO_TYPE)\n os.system(cmd_command_copy_b)\n A_value=ANSWER_KEY_SCENARIO_ONE\n B_value=ANSWER_KEY_SCENARIO_TWO\n question_info = Answer(case_num, x_answer_alpha=x_answer_alpha,A_value=A_value, B_value=B_value)\n answer_key.append(question_info)\n logging.info(\"Exit: create_A_B_X_cases\")\n \n\ndef create_manual_tests():\n logging.info(\"Enter: create_manual_tests\")\n global root_directory\n scenario_one, scenario_two, output_base_path=_collect_locations()\n output_path = _create_output_directory(output_base_path)\n # Confirm another answer key does not already exist\n if os.path.exists(os.path.join(output_path, ANSWER_KEY_NAME)):\n input(\"An answer_key.yml file already exists at - \"+output_path+\" - this file will be deleted. Press enter if this is okay of CNTRL-C to exit\")\n os.remove(os.path.join(output_path, ANSWER_KEY_NAME))\n adjusted_file_path, scenario_one_temp, scenario_two_temp= _create_temp_dir(root_directory, scenario_one, scenario_two)\n print(\"Please note that to create the manual tests, the latency of each file must be calculated. This takes roughly 30 minutes per 25 recordings. Press Enter to continue.\")\n rate_log, correlation_sample_log, correlation_coefficient_log = aa.find_latency_values(scenario_one_temp, scenario_two_temp)\n # Negative value indicates that scenario one signal was delayed. Positive value indicates that scenario two signal was delayed\n file_zip = aa.pair_directories(scenario_one_temp, scenario_two_temp)\n aa.adjust_files(correlation_sample_log, rate_log, file_zip)\n create_A_B_X_cases(file_zip, output_path)\n _cleanup_scenarios(adjusted_file_path)\n _create_answer_key(output_base_path)\n print(\"done\")\n logging.info(\"Exit: create_manual_tests\")\n\n\nif __name__ ==\"__main__\":\n logging.basicConfig(filename=\"manualtest.log\", level=logging.INFO, format=\"%(asctime)s %(levelname)s %(module)s line: %(lineno)d, %(message)s\")\n logging.info(\"Enter: main\")\n create_manual_tests()\n logging.info(\"Exit: main\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n",
"step-ids": [
4,
10,
11,
13,
15
]
}
|
[
4,
10,
11,
13,
15
] |
"""
Unpacks and preprocesses all of the data from the tarball of partial data,
which includes the flats and dark frames.
"""
import tools.unpack
import util.files
import util.dark
import util.flat
def main():
tools.unpack.main()
util.files.main()
util.dark.main()
util.flat.main()
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "3667651697ac1c093d48fe2c4baa4b4dbdf20f8a",
"index": 6832,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n tools.unpack.main()\n util.files.main()\n util.dark.main()\n util.flat.main()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n tools.unpack.main()\n util.files.main()\n util.dark.main()\n util.flat.main()\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nimport tools.unpack\nimport util.files\nimport util.dark\nimport util.flat\n\n\ndef main():\n tools.unpack.main()\n util.files.main()\n util.dark.main()\n util.flat.main()\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nUnpacks and preprocesses all of the data from the tarball of partial data,\nwhich includes the flats and dark frames.\n\"\"\"\n\nimport tools.unpack\nimport util.files\nimport util.dark\nimport util.flat\n\ndef main():\n tools.unpack.main()\n util.files.main()\n util.dark.main()\n util.flat.main()\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import matplotlib.pyplot as plt
# image data
a = np.array([0.1,0.2,0.3,
0.4,0.5,0.6,
0.7,0.8,0.9]).reshape(3,3)
plt.imshow(a,interpolation='nearest',cmap='bone',origin='upper')
plt.colorbar()
plt.xticks(())
plt.yticks(())
plt.show()
|
normal
|
{
"blob_id": "f01f97f8998134f5e4b11232d1c5d341349c3c79",
"index": 4074,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nplt.imshow(a, interpolation='nearest', cmap='bone', origin='upper')\nplt.colorbar()\nplt.xticks(())\nplt.yticks(())\nplt.show()\n",
"step-3": "<mask token>\na = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]).reshape(3, 3)\nplt.imshow(a, interpolation='nearest', cmap='bone', origin='upper')\nplt.colorbar()\nplt.xticks(())\nplt.yticks(())\nplt.show()\n",
"step-4": "import numpy as np\nimport matplotlib.pyplot as plt\na = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]).reshape(3, 3)\nplt.imshow(a, interpolation='nearest', cmap='bone', origin='upper')\nplt.colorbar()\nplt.xticks(())\nplt.yticks(())\nplt.show()\n",
"step-5": "import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n# image data\r\na = np.array([0.1,0.2,0.3,\r\n 0.4,0.5,0.6,\r\n 0.7,0.8,0.9]).reshape(3,3)\r\n\r\nplt.imshow(a,interpolation='nearest',cmap='bone',origin='upper')\r\nplt.colorbar()\r\n\r\n\r\nplt.xticks(())\r\nplt.yticks(())\r\nplt.show()\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python
import sys
import requests
import numpy as np
import astropy.table as at
if __name__=='__main__':
targets = at.Table.read('targets_LCO2018A_002.txt', format='ascii')
headers={'Authorization': 'Token {}'.format(sys.argv[1])}
for x in targets['targetname']:
obs = requests.get('https://observe.lco.global/api/userrequests/?proposal=LCO2018A-002&title={}'.format(x.split('.')[0]),headers=headers).json()
for y in obs['results']:
print(y['group_id'])
|
normal
|
{
"blob_id": "705bc651e7d12769bcf5994168fe6685a6bae05d",
"index": 5983,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n targets = at.Table.read('targets_LCO2018A_002.txt', format='ascii')\n headers = {'Authorization': 'Token {}'.format(sys.argv[1])}\n for x in targets['targetname']:\n obs = requests.get(\n 'https://observe.lco.global/api/userrequests/?proposal=LCO2018A-002&title={}'\n .format(x.split('.')[0]), headers=headers).json()\n for y in obs['results']:\n print(y['group_id'])\n",
"step-3": "import sys\nimport requests\nimport numpy as np\nimport astropy.table as at\nif __name__ == '__main__':\n targets = at.Table.read('targets_LCO2018A_002.txt', format='ascii')\n headers = {'Authorization': 'Token {}'.format(sys.argv[1])}\n for x in targets['targetname']:\n obs = requests.get(\n 'https://observe.lco.global/api/userrequests/?proposal=LCO2018A-002&title={}'\n .format(x.split('.')[0]), headers=headers).json()\n for y in obs['results']:\n print(y['group_id'])\n",
"step-4": "#!/usr/bin/env python\nimport sys\nimport requests\nimport numpy as np\nimport astropy.table as at\n\nif __name__=='__main__':\n targets = at.Table.read('targets_LCO2018A_002.txt', format='ascii')\n headers={'Authorization': 'Token {}'.format(sys.argv[1])}\n for x in targets['targetname']:\n obs = requests.get('https://observe.lco.global/api/userrequests/?proposal=LCO2018A-002&title={}'.format(x.split('.')[0]),headers=headers).json()\n for y in obs['results']:\n print(y['group_id'])\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/python3
"""
This module add a better setattr function
"""
def add_attribute(obj, name, value):
""" add an attribute to a class if possible"""
if hasattr(obj, "__dict__"):
setattr(obj, name, value)
else:
raise TypeError("can't add new attribute")
|
normal
|
{
"blob_id": "bee7f3acdb103f3c20b6149407854c83ad367a6b",
"index": 2621,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef add_attribute(obj, name, value):\n \"\"\" add an attribute to a class if possible\"\"\"\n if hasattr(obj, '__dict__'):\n setattr(obj, name, value)\n else:\n raise TypeError(\"can't add new attribute\")\n",
"step-3": "#!/usr/bin/python3\n\"\"\"\nThis module add a better setattr function\n\"\"\"\n\n\ndef add_attribute(obj, name, value):\n \"\"\" add an attribute to a class if possible\"\"\"\n if hasattr(obj, \"__dict__\"):\n setattr(obj, name, value)\n else:\n raise TypeError(\"can't add new attribute\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from vmgCommanderBase import CommanderBase
from vmgInstallerApt import InstallerApt
from vmgInstallerYum import InstallerYum
from vmgConfigLinux import ConfigLinux
from runCommands import *
import shutil
import os
import time
from vmgLogging import *
from writeFormat import *
from vmgControlVmware import *
from vmgUtils import *
""" Functions to write lines in a .vmx file. """
log = logging.getLogger("vmgen.vmgCommanderLxc")
""" The distribution used for container creation parameters. """
distro = {
"debian":{
"vm":"/home/vmgen/vmware/Debian (lxc)/Debian (lxc).vmx",
"hostname":"root@debian-lxc",
"script":"my-lxc-debian.sh",
"scripts-folder":"../scripts-lxc/debian/"},
"fedora":{
"vm":"/home/vmgen/vmware/Fedora 64-bit/Fedora 64-bit.vmx",
"hostname":"root@fedora-lxc",
"script":"my-lxc-fedora.sh",
"scripts-folder":"../scripts-lxc/fedora/"}
}
installer = {
'debian' : InstallerApt,
'ubuntu' : InstallerApt,
'fedora' : InstallerYum
}
""" Container operating system parameters. """
os_params = {
"fedora-64":{
"os":"fedora",
"version":"14",
"arch":"amd64"},
"fedora":{
"os":"fedora",
"version":"14",
"arch":"x86"},
"debian5-64":{
"os":"debian",
"version":"lenny",
"arch":"amd64"},
"debian5":{
"os":"debian",
"version":"lenny",
"arch":"x86"},
}
""" The path in the VMware machine where the container is created. """
path = "/lxc"
class CommanderLxc(CommanderBase):
def setupHardware(self):
log.info("Creating the hardware configuration...")
self.os = self.data.getSection("hardware").get("os")
self.id = self.data.getSection("hardware").get("vm_id")
# extract the os parameters from the config file
os_type = os_params[self.os]["os"]
ver = os_params[self.os]["version"]
arch = os_params[self.os]["arch"]
self.vm = distro[os_type]["vm"]
self.host = distro[os_type]["hostname"]
folder = distro[os_type]["scripts-folder"]
script = distro[os_type]["script"]
self.config = path + "/" + self.id + "/" + "config." + self.id
self.roots = path + "/" + self.id + "/" + "rootfs." + self.id
self.fstab = path + "/" + self.id + "/" + "fstab." + self.id
# set the user and host used for the SSH connection
setUserHost(self.host)
# power on the auxiliary VMware machine
log.info("\tStarting the virtual machine...")
try_power_on_vm(self.vm)
# set default root password
passwd = "pass"
#self.data.getSection("config").get("root_passwd")
# copy the needed scripts to the virtual machine
log.info("\tCopying the scripts to the virtual machine...")
files = os.listdir(folder)
paths = [os.path.join(folder, f) for f in files]
copyFilesToVM(paths, self.host)
for f in files:
executeCommandSSH("chmod a+x " + f)
# create a temp file containing lines to be appended to the container
# config file
log.info("\tFilling up the network section in the config file...")
temp_file = "eth.tmp"
with open(temp_file, "w") as f:
log.info("\Setting memory and CPUs...")
section = self.data.getSection("hardware")
ram = section.get("ram") + "M"
num_cpu = int(section.get("num_cpu"))
if num_cpu == 1:
cpus = "0"
else:
cpus = "0" + "-" + str(num_cpu - 1)
# TODO: the kernel needs support for the memory controller
writeOption(f, "#lxc.cgroup.memory.limit_in_bytes", ram, False)
writeOption(f, "lxc.cgroup.cpuset.cpus", cpus, False)
# create network interfaces
log.info("\tCreating the network interfaces...")
self.eth_list = getSortedValues(section.get("eths").data)
eth_config = getSortedValues(
self.data.getSection("network").get("eths").data)
for i, eth_pair in enumerate(zip(self.eth_list, eth_config)):
i = str(i)
eth, eth_c = eth_pair
eth_name = eth.get("name")
writeOption(f, "lxc.network.type", "veth", False)
writeOption(f, "lxc.network.link", "br0", False)
writeOption(f, "lxc.network.name", eth_name, False)
writeOption(f, "lxc.network.mtu", "1500", False)
# set IP address
ip_type = eth_c.get("type")
if ip_type == "static":
ip = eth_c.get("address")
mask = getNetmaskCIDR(eth_c.get("network"))
else:
ip = "0.0.0.0"
mask = ""
writeOption(f, "lxc.network.ipv4", ip+mask, False)
if eth.contains("connected"):
writeOption(f, "lxc.network.flags", "up", False)
# set MAC address, if present
mac = eth.get("hw_address")
if mac:
writeOption(f, "lxc.network.hwaddr", mac)
# copy the temp file to the virtual machine
copyFileToVM(temp_file, self.host)
os.remove(temp_file)
# run the script on the virtual machine, to create the container
log.info("\tRun the container creation script...")
executeCommandSSH("./" + script + " " + path + " " + self.id + " " +
ver + " " + arch + " " + passwd)
def setupOperatingSystem(self):
pass
def startVM(self):
""" Start the container. """
log.info("\tStarting the container...")
executeCommandSSH("pushd " + path)
executeCommandSSH("lxc-create" + " -n " + self.id + " -f " + self.config)
# executeCommandSSH("lxc-start" + " -n " + self.id + " -f " + self.config)
def shutdownVM(self):
""" Shutdown the container and the virtual machine. """
log.info("\tStopping the container...")
# executeCommandSSH("lxc-stop" + " -n " + self.id)
executeCommandSSH("lxc-destroy" + " -n " + self.id)
executeCommandSSH("shutdown -h now")
def connectToVM(self):
print "\nEstablishing connection to the VM..."
def disconnectFromVM(self):
print "\nTerminating connection to the VM..."
def setupServices(self):
print "\nInstalling services..."
section = self.data.getSection("services")
self.installPrograms(section)
def setupDeveloperTools(self):
print "\nInstalling developer tools..."
section = self.data.getSection("devel")
self.installPrograms(section)
def setupGuiTools(self):
print "\nInstalling GUI tools..."
section = self.data.getSection("gui")
self.installPrograms(section)
def createArchive(self):
executeCommandSSH("cd " + path)
files = self.config + " " + self.fstab + " " + self.rootfs
arch_name = self.id + ".zip"
executeCommandSSH("zip -r " + arch_name + " " + files)
copyFileFromVM(path + "/" + arch_name, "./", self.host)
return [arch_name, ""]
def getModuleName(self):
return "lxc"
def getConfigInstance(self):
return ConfigLinux(self.data, self.communicator)
def getInstallerInstance(self):
vm_os = self.data.getSection("hardware").get("os")
for k in installer.keys():
if str(k) in vm_os:
return installer[k](self.communicator)
return None
|
normal
|
{
"blob_id": "22fe07a237f2c5f531d189c07596a22df191d038",
"index": 1140,
"step-1": "from vmgCommanderBase import CommanderBase\nfrom vmgInstallerApt import InstallerApt\nfrom vmgInstallerYum import InstallerYum\nfrom vmgConfigLinux import ConfigLinux\nfrom runCommands import *\nimport shutil\nimport os\nimport time\nfrom vmgLogging import *\nfrom writeFormat import *\nfrom vmgControlVmware import *\nfrom vmgUtils import *\n\n\"\"\" Functions to write lines in a .vmx file. \"\"\"\nlog = logging.getLogger(\"vmgen.vmgCommanderLxc\")\n\n\"\"\"\tThe distribution used for container creation parameters. \"\"\"\ndistro = {\n\t\"debian\":{\n\t\t\"vm\":\"/home/vmgen/vmware/Debian (lxc)/Debian (lxc).vmx\",\n\t\t\"hostname\":\"root@debian-lxc\",\n\t\t\"script\":\"my-lxc-debian.sh\",\n\t\t\"scripts-folder\":\"../scripts-lxc/debian/\"},\n\t\"fedora\":{\n\t\t\"vm\":\"/home/vmgen/vmware/Fedora 64-bit/Fedora 64-bit.vmx\",\n\t\t\"hostname\":\"root@fedora-lxc\",\n\t\t\"script\":\"my-lxc-fedora.sh\",\n\t\t\"scripts-folder\":\"../scripts-lxc/fedora/\"}\n}\n\ninstaller = {\n\t'debian' : InstallerApt,\n\t'ubuntu' : InstallerApt,\n\t'fedora' : InstallerYum\n}\n\n\"\"\" Container operating system parameters. \"\"\"\nos_params = {\n\t\t\"fedora-64\":{\n\t\t\t\"os\":\"fedora\",\n\t\t\t\"version\":\"14\", \n\t\t\t\"arch\":\"amd64\"},\n\t\t\"fedora\":{\n\t\t\t\"os\":\"fedora\",\n\t\t\t\"version\":\"14\", \n\t\t\t\"arch\":\"x86\"},\n\t\t\"debian5-64\":{\n\t\t\t\"os\":\"debian\",\n\t\t\t\"version\":\"lenny\", \n\t\t\t\"arch\":\"amd64\"},\n\t\t\"debian5\":{\n\t\t\t\"os\":\"debian\",\n\t\t\t\"version\":\"lenny\", \n\t\t\t\"arch\":\"x86\"},\n}\n\n\"\"\"\tThe path in the VMware machine where the container is created. \"\"\"\npath = \"/lxc\"\n\nclass CommanderLxc(CommanderBase):\n\n\tdef setupHardware(self):\n\t\tlog.info(\"Creating the hardware configuration...\")\n\n\t\tself.os = self.data.getSection(\"hardware\").get(\"os\")\n\t\tself.id = self.data.getSection(\"hardware\").get(\"vm_id\")\n\n\t\t# extract the os parameters from the config file\n\t\tos_type = os_params[self.os][\"os\"]\n\t\tver = os_params[self.os][\"version\"]\n\t\tarch = os_params[self.os][\"arch\"]\n\n\t\tself.vm = distro[os_type][\"vm\"]\n\t\tself.host = distro[os_type][\"hostname\"]\n\t\tfolder = distro[os_type][\"scripts-folder\"]\n\t\tscript = distro[os_type][\"script\"]\n\n\t\tself.config = path + \"/\" + self.id + \"/\" + \"config.\" + self.id\n\t\tself.roots = path + \"/\" + self.id + \"/\" + \"rootfs.\" + self.id\n\t\tself.fstab = path + \"/\" + self.id + \"/\" + \"fstab.\" + self.id\n\n\t\t# set the user and host used for the SSH connection\n\t\tsetUserHost(self.host)\n\n\t\t# power on the auxiliary VMware machine\n\t\tlog.info(\"\\tStarting the virtual machine...\")\n\t\ttry_power_on_vm(self.vm)\n\n\t\t# set default root password\n\t\tpasswd = \"pass\" \n\t\t#self.data.getSection(\"config\").get(\"root_passwd\")\n\n\t\t# copy the needed scripts to the virtual machine\n\t\tlog.info(\"\\tCopying the scripts to the virtual machine...\")\n\t\tfiles = os.listdir(folder)\n\t\tpaths = [os.path.join(folder, f) for f in files]\n\t\tcopyFilesToVM(paths, self.host)\n\t\tfor f in files:\n\t\t\texecuteCommandSSH(\"chmod a+x \" + f)\n\n\t\t# create a temp file containing lines to be appended to the container\n\t\t# config file\n\t\tlog.info(\"\\tFilling up the network section in the config file...\")\n\t\ttemp_file = \"eth.tmp\"\n\t\twith open(temp_file, \"w\") as f:\n\t\t\tlog.info(\"\\Setting memory and CPUs...\")\n\t\t\tsection = self.data.getSection(\"hardware\")\n\t\t\tram = section.get(\"ram\") + \"M\"\n\t\t\tnum_cpu = int(section.get(\"num_cpu\"))\n\n\t\t\tif num_cpu == 1:\n\t\t\t\tcpus = \"0\"\n\t\t\telse:\n\t\t\t\tcpus = \"0\" + \"-\" + str(num_cpu - 1)\n\n\t\t\t# TODO: the kernel needs support for the memory controller\n\t\t\twriteOption(f, \"#lxc.cgroup.memory.limit_in_bytes\", ram, False)\n\t\t\twriteOption(f, \"lxc.cgroup.cpuset.cpus\", cpus, False)\n\n\t\t\t# create network interfaces\n\t\t\tlog.info(\"\\tCreating the network interfaces...\")\n\t\t\tself.eth_list = getSortedValues(section.get(\"eths\").data)\n\t\t\teth_config = getSortedValues(\n\t\t\t\t\tself.data.getSection(\"network\").get(\"eths\").data)\n\t\t\tfor i, eth_pair in enumerate(zip(self.eth_list, eth_config)):\n\t\t\t\ti = str(i)\n\t\t\t\teth, eth_c = eth_pair\n\n\t\t\t\teth_name = eth.get(\"name\")\n\t\t\t\twriteOption(f, \"lxc.network.type\", \"veth\", False)\n\n\t\t\t\twriteOption(f, \"lxc.network.link\", \"br0\", False)\n\n\t\t\t\twriteOption(f, \"lxc.network.name\", eth_name, False)\n\t\t\t\twriteOption(f, \"lxc.network.mtu\", \"1500\", False)\n\n\t\t\t\t# set IP address\n\t\t\t\tip_type = eth_c.get(\"type\")\n\t\t\t\tif ip_type == \"static\":\n\t\t\t\t\tip = eth_c.get(\"address\")\n\t\t\t\t\tmask = getNetmaskCIDR(eth_c.get(\"network\"))\n\t\t\t\telse:\n\t\t\t\t\tip = \"0.0.0.0\"\n\t\t\t\t\tmask = \"\"\n\n\t\t\t\twriteOption(f, \"lxc.network.ipv4\", ip+mask, False)\n\n\t\t\t\tif eth.contains(\"connected\"):\n\t\t\t\t\twriteOption(f, \"lxc.network.flags\", \"up\", False)\n\n\t\t\t\t# set MAC address, if present\n\t\t\t\tmac = eth.get(\"hw_address\")\n\t\t\t\tif mac:\n\t\t\t\t\twriteOption(f, \"lxc.network.hwaddr\", mac)\n\n\t\t# copy the temp file to the virtual machine\n\t\tcopyFileToVM(temp_file, self.host)\n\t\tos.remove(temp_file)\n\n\t\t# run the script on the virtual machine, to create the container\n\t\tlog.info(\"\\tRun the container creation script...\")\n\t\texecuteCommandSSH(\"./\" + script + \" \" + path + \" \" + self.id + \" \" + \n\t\t\tver + \" \" + arch + \" \" + passwd)\n\n\n\tdef setupOperatingSystem(self):\n\t\tpass\n\t\t\n\tdef startVM(self):\n\t\t\"\"\" Start the container. \"\"\"\n\t\tlog.info(\"\\tStarting the container...\")\n\t\texecuteCommandSSH(\"pushd \" + path)\n\t\texecuteCommandSSH(\"lxc-create\" + \" -n \" + self.id + \" -f \" + self.config)\n#\t\texecuteCommandSSH(\"lxc-start\" + \" -n \" + self.id + \" -f \" + self.config)\n\n\tdef shutdownVM(self):\n\t\t\"\"\" Shutdown the container and the virtual machine. \"\"\"\n\t\tlog.info(\"\\tStopping the container...\")\n#\t\texecuteCommandSSH(\"lxc-stop\" + \" -n \" + self.id)\n\t\texecuteCommandSSH(\"lxc-destroy\" + \" -n \" + self.id)\n\t\texecuteCommandSSH(\"shutdown -h now\")\n\n\tdef connectToVM(self):\n\t\tprint \"\\nEstablishing connection to the VM...\"\n\n\tdef disconnectFromVM(self):\n\t\tprint \"\\nTerminating connection to the VM...\"\n\n\tdef setupServices(self):\n\t\tprint \"\\nInstalling services...\"\n\t\tsection = self.data.getSection(\"services\")\n\t\tself.installPrograms(section)\n\n\tdef setupDeveloperTools(self):\n\t\tprint \"\\nInstalling developer tools...\"\n\t\tsection = self.data.getSection(\"devel\")\n\t\tself.installPrograms(section)\n\n\tdef setupGuiTools(self):\n\t\tprint \"\\nInstalling GUI tools...\"\n\t\tsection = self.data.getSection(\"gui\")\n\t\tself.installPrograms(section)\n\n\tdef createArchive(self):\n\t\texecuteCommandSSH(\"cd \" + path)\n\t\tfiles = self.config + \" \" + self.fstab + \" \" + self.rootfs\n\n\t\tarch_name = self.id + \".zip\"\n\n\t\texecuteCommandSSH(\"zip -r \" + arch_name + \" \" + files)\n\t\tcopyFileFromVM(path + \"/\" + arch_name, \"./\", self.host)\n\n\t\treturn [arch_name, \"\"]\n\n\tdef getModuleName(self):\n\t\treturn \"lxc\"\n\n\tdef getConfigInstance(self):\n\t\treturn ConfigLinux(self.data, self.communicator)\n\n\tdef getInstallerInstance(self):\n\t\tvm_os = self.data.getSection(\"hardware\").get(\"os\")\n\t\tfor k in installer.keys():\n\t\t\tif str(k) in vm_os:\n\t\t\t\treturn installer[k](self.communicator)\n\t\treturn None",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
"""
The epitome package is a set of command-line tools for analyzing MRI data, and a
set of scriptuit modules for stitching them (and others) together.
"""
from . import utilities
from . import stats
from . import signal
from . import plot
from . import docopt
|
normal
|
{
"blob_id": "4d58926e812789768fdf5be59bd54f9b66850e57",
"index": 2554,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfrom . import utilities\nfrom . import stats\nfrom . import signal\nfrom . import plot\nfrom . import docopt\n",
"step-3": "\"\"\"\nThe epitome package is a set of command-line tools for analyzing MRI data, and a \nset of scriptuit modules for stitching them (and others) together.\n\"\"\"\n\nfrom . import utilities\nfrom . import stats\nfrom . import signal\nfrom . import plot\nfrom . import docopt\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
#!/usr/bin/env python3
import base64
from apiclient import errors
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import mimetypes
def Get_Attachments(service, userId, msg_id, store_dir):
"""Get and store attachment from Message with given id.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me"
can be used to indicate the authenticated user.
msg_id: ID of Message containing attachment.
store_dir: The directory used to store attachments.
"""
try:
message = service.users().messages().get(userId=userId, id=msg_id).execute()
parts = [message['payload']]
while parts:
part = parts.pop()
if part.get('parts'):
parts.extend(part['parts'])
if part.get('filename'):
if 'data' in part['body']:
file_data = base64.urlsafe_b64decode(part['body']['data'].encode('UTF-8'))
#self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], part['size']))
elif 'attachmentId' in part['body']:
attachment = service.users().messages().attachments().get(
userId=userId, messageId=message['id'], id=part['body']['attachmentId']
).execute()
file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))
#self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], attachment['size']))
else:
file_data = None
if file_data:
#do some staff, e.g.
path = ''.join([store_dir, part['filename']])
with open(path, 'wb') as f:
f.write(file_data)
except errors.HttpError as error:
print('An error occurred: %s' % error)
def Reply_With_Attchment(service, userId, receiver, subject, message, attachments, threadId, message_id):
"""Reply to message with the new pdf attached.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
receiver: Email address of who to send to.
subject: Email subject.
message: Email message, plain text
attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs
threadId: Used to match reply with message thread
message_id: Identifies specific message to interact with.
"""
# Create email message
emailMsg = message
mimeMessage = MIMEMultipart()
mimeMessage['to'] = receiver
mimeMessage['subject'] = subject
mimeMessage['threadId'] = threadId
mimeMessage['In-Reply-To'] = message_id
mimeMessage['References'] = message_id
mimeMessage.attach(MIMEText(emailMsg, 'plain'))
# Attach files
if attachments != None:
attachment = attachments
content_type = mimetypes.guess_type(attachment)
main_type, sub_type = content_type[0].split('/', 1)
file_name = os.path.basename(attachment)
f = open(attachment, 'rb')
myFile = MIMEBase(main_type, sub_type)
myFile.set_payload(f.read())
myFile.add_header('Content-Disposition', 'attachment', filename=file_name)
encoders.encode_base64(myFile)
f.close()
mimeMessage.attach(myFile)
raw_string = {'raw':base64.urlsafe_b64encode(mimeMessage.as_bytes()).decode()}
raw_string['threadId']=threadId
message = service.users().messages().send(userId=userId, body=raw_string).execute()
def Get_Unread_Messages(service, userId):
"""Retrieves all unread messages with attachments, returns list of message ids.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
"""
message_list = []
message_ids = service.users().messages().list(userId=userId, labelIds='INBOX', alt="json", q='is:unread has:attachment').execute()
if message_ids['resultSizeEstimate'] > 0:
for message in message_ids['messages']:
message_list.append(message['id'])
return message_list
def Get_Message_Info(service, userId, message_id):
"""Retrieves received message info, returns tuple.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
message_info = service.users().messages().get(userId=userId, id=message_id).execute()
ID = message_info['id']
thread_id = message_info['threadId']
header_info = message_info['payload']['headers']
for header in header_info:
if header['name']=='Message-ID':
message_id=header['value']
if header['name']=='From':
sender=header['value']
if header['name']=='Subject':
subject=header['value']
attachment_info = message_info['payload']['parts']
attachment_list = []
for attachment in attachment_info:
if attachment['mimeType'] == 'application/pdf':
attachment_list.append(attachment['filename'])
info = (sender, subject, thread_id, message_id, attachment_list, ID)
return info
def Delete_Message(service, userId, message_id):
"""Permanently delete message.
Args:
service: Authorized Gmail API service instance.
userId: User's email address. The special value "me".
can be used to indicate the authenticated user.
message_id: Identifies specific message to interact with.
"""
service.users().messages().delete(userId=userId, id=message_id).execute()
|
normal
|
{
"blob_id": "dee1ab3adb7f627680410c774be44ae196f63f6c",
"index": 587,
"step-1": "<mask token>\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\n<mask token>\n\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()\n",
"step-2": "<mask token>\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\n<mask token>\n\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id\n ).execute()\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name'] == 'Message-ID':\n message_id = header['value']\n if header['name'] == 'From':\n sender = header['value']\n if header['name'] == 'Subject':\n subject = header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n info = sender, subject, thread_id, message_id, attachment_list, ID\n return info\n\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()\n",
"step-3": "<mask token>\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\ndef Get_Unread_Messages(service, userId):\n \"\"\"Retrieves all unread messages with attachments, returns list of message ids.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n \"\"\"\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds=\n 'INBOX', alt='json', q='is:unread has:attachment').execute()\n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n return message_list\n\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id\n ).execute()\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name'] == 'Message-ID':\n message_id = header['value']\n if header['name'] == 'From':\n sender = header['value']\n if header['name'] == 'Subject':\n subject = header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n info = sender, subject, thread_id, message_id, attachment_list, ID\n return info\n\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()\n",
"step-4": "import base64\nfrom apiclient import errors\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nimport mimetypes\n\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id\n ).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body'][\n 'data'].encode('UTF-8'))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part[\n 'body']['attachmentId']).execute()\n file_data = base64.urlsafe_b64decode(attachment['data']\n .encode('UTF-8'))\n else:\n file_data = None\n if file_data:\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message,\n attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n f = open(attachment, 'rb')\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=\n file_name)\n encoders.encode_base64(myFile)\n f.close()\n mimeMessage.attach(myFile)\n raw_string = {'raw': base64.urlsafe_b64encode(mimeMessage.as_bytes()).\n decode()}\n raw_string['threadId'] = threadId\n message = service.users().messages().send(userId=userId, body=raw_string\n ).execute()\n\n\ndef Get_Unread_Messages(service, userId):\n \"\"\"Retrieves all unread messages with attachments, returns list of message ids.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n \"\"\"\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds=\n 'INBOX', alt='json', q='is:unread has:attachment').execute()\n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n return message_list\n\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id\n ).execute()\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name'] == 'Message-ID':\n message_id = header['value']\n if header['name'] == 'From':\n sender = header['value']\n if header['name'] == 'Subject':\n subject = header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n info = sender, subject, thread_id, message_id, attachment_list, ID\n return info\n\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()\n",
"step-5": "#!/usr/bin/env python3\n\nimport base64\nfrom apiclient import errors\nimport os\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.mime.base import MIMEBase\nfrom email import encoders\nimport mimetypes\n\ndef Get_Attachments(service, userId, msg_id, store_dir):\n \"\"\"Get and store attachment from Message with given id.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\"\n can be used to indicate the authenticated user.\n msg_id: ID of Message containing attachment.\n store_dir: The directory used to store attachments.\n \"\"\"\n try:\n message = service.users().messages().get(userId=userId, id=msg_id).execute()\n parts = [message['payload']]\n while parts:\n part = parts.pop()\n if part.get('parts'):\n parts.extend(part['parts'])\n if part.get('filename'):\n if 'data' in part['body']:\n file_data = base64.urlsafe_b64decode(part['body']['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], part['size']))\n elif 'attachmentId' in part['body']:\n attachment = service.users().messages().attachments().get(\n userId=userId, messageId=message['id'], id=part['body']['attachmentId']\n ).execute()\n file_data = base64.urlsafe_b64decode(attachment['data'].encode('UTF-8'))\n #self.stdout.write('FileData for %s, %s found! size: %s' % (message['id'], part['filename'], attachment['size']))\n else:\n file_data = None\n if file_data:\n #do some staff, e.g.\n path = ''.join([store_dir, part['filename']])\n with open(path, 'wb') as f:\n f.write(file_data)\n except errors.HttpError as error:\n print('An error occurred: %s' % error)\n\ndef Reply_With_Attchment(service, userId, receiver, subject, message, attachments, threadId, message_id):\n \"\"\"Reply to message with the new pdf attached.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n receiver: Email address of who to send to.\n subject: Email subject.\n message: Email message, plain text\n attachments: 'new_pdf.pdf' Name can be changed in pdf.combine_pdfs\n threadId: Used to match reply with message thread\n message_id: Identifies specific message to interact with.\n \"\"\"\n # Create email message\n emailMsg = message\n mimeMessage = MIMEMultipart()\n mimeMessage['to'] = receiver\n mimeMessage['subject'] = subject\n mimeMessage['threadId'] = threadId\n mimeMessage['In-Reply-To'] = message_id\n mimeMessage['References'] = message_id\n mimeMessage.attach(MIMEText(emailMsg, 'plain'))\n \n # Attach files\n if attachments != None:\n attachment = attachments\n content_type = mimetypes.guess_type(attachment)\n main_type, sub_type = content_type[0].split('/', 1)\n file_name = os.path.basename(attachment)\n\n f = open(attachment, 'rb')\n\n myFile = MIMEBase(main_type, sub_type)\n myFile.set_payload(f.read())\n myFile.add_header('Content-Disposition', 'attachment', filename=file_name)\n encoders.encode_base64(myFile)\n\n f.close()\n\n mimeMessage.attach(myFile)\n \n raw_string = {'raw':base64.urlsafe_b64encode(mimeMessage.as_bytes()).decode()}\n raw_string['threadId']=threadId\n \n message = service.users().messages().send(userId=userId, body=raw_string).execute()\n\ndef Get_Unread_Messages(service, userId):\n \"\"\"Retrieves all unread messages with attachments, returns list of message ids.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n \"\"\"\n message_list = []\n message_ids = service.users().messages().list(userId=userId, labelIds='INBOX', alt=\"json\", q='is:unread has:attachment').execute()\n \n if message_ids['resultSizeEstimate'] > 0:\n for message in message_ids['messages']:\n message_list.append(message['id'])\n\n return message_list\n\ndef Get_Message_Info(service, userId, message_id):\n \"\"\"Retrieves received message info, returns tuple.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n message_info = service.users().messages().get(userId=userId, id=message_id).execute()\n\n ID = message_info['id']\n thread_id = message_info['threadId']\n header_info = message_info['payload']['headers']\n for header in header_info:\n if header['name']=='Message-ID':\n message_id=header['value']\n if header['name']=='From':\n sender=header['value']\n if header['name']=='Subject':\n subject=header['value']\n attachment_info = message_info['payload']['parts']\n attachment_list = []\n for attachment in attachment_info:\n if attachment['mimeType'] == 'application/pdf':\n attachment_list.append(attachment['filename'])\n\n info = (sender, subject, thread_id, message_id, attachment_list, ID)\n return info\n\ndef Delete_Message(service, userId, message_id):\n \"\"\"Permanently delete message.\n Args:\n service: Authorized Gmail API service instance.\n userId: User's email address. The special value \"me\".\n can be used to indicate the authenticated user.\n message_id: Identifies specific message to interact with.\n \"\"\"\n service.users().messages().delete(userId=userId, id=message_id).execute()",
"step-ids": [
2,
4,
5,
6,
7
]
}
|
[
2,
4,
5,
6,
7
] |
import os
import lasagne
import theano
import theano.tensor as T
import numpy as np
from lasagne.layers import Conv2DLayer,\
MaxPool2DLayer,\
InputLayer
from lasagne.nonlinearities import elu, sigmoid, rectify
from lasagne.regularization import l2, regularize_layer_params
from utils.maxpool_multiply import MaxPoolMultiplyLayer
from models.cascade_base import CascadeBase
class FaceTrigger(CascadeBase):
def build_network(self):
net = lasagne.layers.batch_norm(InputLayer((None, 1) + tuple(self.img_shape),
self.input_X,
name='network input'))
convs = []
# Build network
for i in range(self.num_cascades):
net = lasagne.layers.batch_norm(Conv2DLayer(net,
nonlinearity=elu,
num_filters=self.num_filters[i],
filter_size=self.filter_sizes[i],
pad='same',
name='conv {}'.format(i + 1)))
convs.append(net)
net = MaxPool2DLayer(net,
pool_size=self.pool_sizes[i],
name='Max Pool {} {}'.format(i + 1, i + 2))
out = Conv2DLayer(net,
nonlinearity=sigmoid,
num_filters=1,
filter_size=1,
pad='same',
name='prediction layer')
branches = [None] * self.num_cascades
# Build branches
for i in range(self.num_cascades):
branches[i] = Conv2DLayer(convs[i],
num_filters=1,
filter_size=1,
nonlinearity=sigmoid,
name='decide network {} output'.format(i + 1))
downsampled_activation_layers = [branches[0]]
for i in range(self.num_cascades - 1):
downsampled_activation_layers.append(MaxPoolMultiplyLayer(branches[i + 1],
downsampled_activation_layers[-1],
self.pool_sizes[i]))
masked_out = MaxPoolMultiplyLayer(out,
downsampled_activation_layers[-1],
self.pool_sizes[-1])
return out, downsampled_activation_layers, masked_out
|
normal
|
{
"blob_id": "1dd5c25cd3b7bc933ba0b63d9a42fdddc92b8531",
"index": 8737,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass FaceTrigger(CascadeBase):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FaceTrigger(CascadeBase):\n\n def build_network(self):\n net = lasagne.layers.batch_norm(InputLayer((None, 1) + tuple(self.\n img_shape), self.input_X, name='network input'))\n convs = []\n for i in range(self.num_cascades):\n net = lasagne.layers.batch_norm(Conv2DLayer(net, nonlinearity=\n elu, num_filters=self.num_filters[i], filter_size=self.\n filter_sizes[i], pad='same', name='conv {}'.format(i + 1)))\n convs.append(net)\n net = MaxPool2DLayer(net, pool_size=self.pool_sizes[i], name=\n 'Max Pool {} {}'.format(i + 1, i + 2))\n out = Conv2DLayer(net, nonlinearity=sigmoid, num_filters=1,\n filter_size=1, pad='same', name='prediction layer')\n branches = [None] * self.num_cascades\n for i in range(self.num_cascades):\n branches[i] = Conv2DLayer(convs[i], num_filters=1, filter_size=\n 1, nonlinearity=sigmoid, name='decide network {} output'.\n format(i + 1))\n downsampled_activation_layers = [branches[0]]\n for i in range(self.num_cascades - 1):\n downsampled_activation_layers.append(MaxPoolMultiplyLayer(\n branches[i + 1], downsampled_activation_layers[-1], self.\n pool_sizes[i]))\n masked_out = MaxPoolMultiplyLayer(out,\n downsampled_activation_layers[-1], self.pool_sizes[-1])\n return out, downsampled_activation_layers, masked_out\n",
"step-4": "import os\nimport lasagne\nimport theano\nimport theano.tensor as T\nimport numpy as np\nfrom lasagne.layers import Conv2DLayer, MaxPool2DLayer, InputLayer\nfrom lasagne.nonlinearities import elu, sigmoid, rectify\nfrom lasagne.regularization import l2, regularize_layer_params\nfrom utils.maxpool_multiply import MaxPoolMultiplyLayer\nfrom models.cascade_base import CascadeBase\n\n\nclass FaceTrigger(CascadeBase):\n\n def build_network(self):\n net = lasagne.layers.batch_norm(InputLayer((None, 1) + tuple(self.\n img_shape), self.input_X, name='network input'))\n convs = []\n for i in range(self.num_cascades):\n net = lasagne.layers.batch_norm(Conv2DLayer(net, nonlinearity=\n elu, num_filters=self.num_filters[i], filter_size=self.\n filter_sizes[i], pad='same', name='conv {}'.format(i + 1)))\n convs.append(net)\n net = MaxPool2DLayer(net, pool_size=self.pool_sizes[i], name=\n 'Max Pool {} {}'.format(i + 1, i + 2))\n out = Conv2DLayer(net, nonlinearity=sigmoid, num_filters=1,\n filter_size=1, pad='same', name='prediction layer')\n branches = [None] * self.num_cascades\n for i in range(self.num_cascades):\n branches[i] = Conv2DLayer(convs[i], num_filters=1, filter_size=\n 1, nonlinearity=sigmoid, name='decide network {} output'.\n format(i + 1))\n downsampled_activation_layers = [branches[0]]\n for i in range(self.num_cascades - 1):\n downsampled_activation_layers.append(MaxPoolMultiplyLayer(\n branches[i + 1], downsampled_activation_layers[-1], self.\n pool_sizes[i]))\n masked_out = MaxPoolMultiplyLayer(out,\n downsampled_activation_layers[-1], self.pool_sizes[-1])\n return out, downsampled_activation_layers, masked_out\n",
"step-5": "import os\nimport lasagne\nimport theano\nimport theano.tensor as T\nimport numpy as np\nfrom lasagne.layers import Conv2DLayer,\\\n MaxPool2DLayer,\\\n InputLayer\nfrom lasagne.nonlinearities import elu, sigmoid, rectify\nfrom lasagne.regularization import l2, regularize_layer_params\nfrom utils.maxpool_multiply import MaxPoolMultiplyLayer\n\nfrom models.cascade_base import CascadeBase\n\nclass FaceTrigger(CascadeBase): \n def build_network(self):\n net = lasagne.layers.batch_norm(InputLayer((None, 1) + tuple(self.img_shape),\n self.input_X,\n name='network input'))\n \n convs = []\n\n # Build network\n for i in range(self.num_cascades):\n net = lasagne.layers.batch_norm(Conv2DLayer(net,\n nonlinearity=elu,\n num_filters=self.num_filters[i],\n filter_size=self.filter_sizes[i],\n pad='same',\n name='conv {}'.format(i + 1)))\n convs.append(net)\n net = MaxPool2DLayer(net,\n pool_size=self.pool_sizes[i],\n name='Max Pool {} {}'.format(i + 1, i + 2))\n\n \n out = Conv2DLayer(net,\n nonlinearity=sigmoid,\n num_filters=1,\n filter_size=1,\n pad='same',\n name='prediction layer')\n \n branches = [None] * self.num_cascades\n\n # Build branches\n for i in range(self.num_cascades):\n branches[i] = Conv2DLayer(convs[i],\n num_filters=1,\n filter_size=1,\n nonlinearity=sigmoid,\n name='decide network {} output'.format(i + 1))\n\n downsampled_activation_layers = [branches[0]]\n\n for i in range(self.num_cascades - 1):\n downsampled_activation_layers.append(MaxPoolMultiplyLayer(branches[i + 1],\n downsampled_activation_layers[-1],\n self.pool_sizes[i]))\n masked_out = MaxPoolMultiplyLayer(out,\n downsampled_activation_layers[-1],\n self.pool_sizes[-1])\n \n return out, downsampled_activation_layers, masked_out",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# coding: utf-8
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.addons.ud.ud import _TIPOS_BOLSA
TIPOS_BOLSA = dict(_TIPOS_BOLSA)
def get_banco(cls, cr, browse_record, usuario_id, context=None):
dados_bancarios_model = cls.pool.get("ud.dados.bancarios")
args = [("banco_id", "=", browse_record.banco_id.id)]
if browse_record.agencia_v:
args.append(("agencia", "=", browse_record.agencia))
if browse_record.dv_agencia_v:
args.append(("dv_agencia", "=", browse_record.dv_agencia))
if browse_record.conta_v:
args.append(("conta", "=", browse_record.conta))
if browse_record.dv_conta_v:
args.append(("dv_conta", "=", browse_record.dv_conta))
if browse_record.operacao_v:
args.append(("operacao", "=", browse_record.operacao))
dados_bancarios = dados_bancarios_model.search(cr, SUPERUSER_ID, args, context=context)
if dados_bancarios:
dados_bancarios = dados_bancarios_model.browse(cr, SUPERUSER_ID, dados_bancarios[0])
if not dados_bancarios.ud_conta_id:
return dados_bancarios.id
elif dados_bancarios.ud_conta_id.id == usuario_id:
return dados_bancarios.id
raise osv.except_osv(u"Dados Bancários duplicados", u"Outra pessoa já possui esses dados bancários!")
dados = {"banco_id": browse_record.banco_id.id, "agencia": browse_record.agencia, "dv_agencia": browse_record.dv_agencia,
"conta": browse_record.conta, "dv_conta": browse_record.dv_conta, "operacao": browse_record.operacao,
"ud_conta_id": usuario_id}
return dados_bancarios_model.create(cr, SUPERUSER_ID, dados, context=context)
class AdicionarBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.adicionar.wizard"
_description = u"Inclusão de bolsa de monitoria para discente (UD)"
_STATES = [
("n_bolsista", u"Não Bolsista"),
("reserva", u"Cadastro de Reserva"),
]
def _bolsas(self, cr, uid, ids, campos, args, context=None):
oferta_model = self.pool.get("ud.monitoria.oferta.disciplina")
res = {}
for add in self.browse(cr, uid, ids, context):
res[add.id] = add.disciplina_id.bolsas
return res
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), "
"('is_active', '=', True)]"),
"bolsas": fields.function(_bolsas, type="integer", string=u"Bolsas disponíveis",
help=u"Número de bolsas disponíveis para a disciplina"),
"valor_bolsa": fields.float(u"Bolsa (R$)"),
"tutor": fields.boolean(u"Tutor?"),
"status": fields.selection(_STATES, u"Status", required=True),
"doc_discente_id": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), "
"('is_active', '=', True), ('state', '=', status)]"),
# DADOS BANCÁRIOS
"dados_bancarios_id": fields.many2one("ud.dados.bancarios", u"Dados Bancários", domain=[('id', '=', False)]),
"banco_id": fields.many2one("ud.banco", u"Banco", ondelete="restrict"),
"agencia": fields.char(u"Agência", size=4, help=u"Número da Agência"),
"dv_agencia": fields.char(u"DV Agência", size=2, help=u"Dígito verificador da Agência"),
"conta": fields.char(u"Conta", size=10, help=u"Número da Conta"),
"dv_conta": fields.char(u"DV Conta", size=1, help=u"Dígito verificador da Conta"),
"operacao": fields.char(u"Operação", size=3, help=u"Tipo de conta"),
"agencia_v": fields.related("banco_id", "agencia", type="boolean", invisible=True, readonly=True),
"dv_agencia_v": fields.related("banco_id", "dv_agencia", type="boolean", invisible=True, readonly=True),
"conta_v": fields.related("banco_id", "conta", type="boolean", invisible=True, readonly=True),
"dv_conta_v": fields.related("banco_id", "dv_conta", type="boolean", invisible=True, readonly=True),
"operacao_v": fields.related("banco_id", "operacao", type="boolean", invisible=True, readonly=True),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(AdicionarBolsaWizard, self).default_get(cr, uid, fields_list, context)
res["status"] = "n_bolsista"
res["valor_bolsa"] = 400.
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"), context)
if doc.state == "bolsista":
raise osv.except_osv(u"Discente bolsista", u"O discente já é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo", u"Não é possível alterar o status de discentes inativos")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id"] = doc.disciplina_id.curso_id.id
res["disciplina_id"] = doc.disciplina_id.id
res["tutor"] = doc.tutor
res["status"] = doc.state
res["doc_discente_id"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id": False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id": [("id", "in", disc)]}}
if not disc:
res["value"]= {"disciplina_id": False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):
if disciplina_id:
if doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
disciplina_id = self.pool.get("ud.monitoria.disciplina").browse(cr, uid, disciplina_id, context)
return {
"value": {"doc_discente_id": doc_discente_id,
"bolsas": disciplina_id.bolsas}
}
return {"value": {"doc_discente_id": False, "bolsas": 0}}
def onchange_doc_discente(self, cr, uid, ids, doc_discente_id, dados_bancarios_id, context=None):
if doc_discente_id:
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
if not dados_bancarios_id:
dados_bancarios_id = getattr(doc.dados_bancarios_id, "id", False)
return {"value": {"dados_bancarios_id": dados_bancarios_id},
"domain": {"dados_bancarios_id": [("ud_conta_id", "=", doc.discente_id.id)]}}
return {"value": {"dados_bancarios_id": False},
"domain": {"dados_bancarios_id": [("id", "=", False)]}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get("ud.banco").read(cr, uid, banco_id, [
"agencia", "dv_agencia", "conta", "dv_conta", "operacao"
], context=context, load="_classic_write")
vals = {"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}
vals.update({"%s_v" % dado: banco.get(dado) for dado in banco.keys()})
return {"value": vals}
return {"value": {"agencia_v": False, "dv_agencia_v": False, "conta_v": False, "dv_conta_v": False,"operacao_v": False,
"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}}
def botao_adicionar(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
for add in self.browse(cr, uid, ids, context):
if add.bolsas == 0:
raise osv.except_osv(u"Bolsas Insuficientes", u"Não há bolsas disponíveis para essa disciplina")
elif not add.doc_discente_id.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"O discente não pode ser classificado como bolsista")
if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:
raise osv.except_osv(
u"Discente bolsista",
u"O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"".format(
add.doc_discente_id.discente_id.name, add.doc_discente_id.inscricao_id.perfil_id.matricula,
TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.tipo_bolsa]
)
)
responsavel = self.pool.get("ud.employee").search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.inscricao_id.perfil_id.id, {
"is_bolsista": True, "tipo_bolsa": "m", "valor_bolsa": ("%.2f" % add.valor_bolsa).replace(".", ",")
})
if not add.dados_bancarios_id:
dados_bancarios = get_banco(self, cr, add, add.doc_discente_id.discente_id.id, context)
else:
dados_bancarios = add.dados_bancarios_id.id
add.doc_discente_id.write({"state": "bolsista", "dados_bancarios_id": dados_bancarios})
evento = {
"responsavel_id": responsavel[0],
"name": u"Adição de bolsa: \"%s\"" % add.doc_discente_id.discente_id.name,
"envolvidos_ids": [(4, add.doc_discente_id.discente_id.id)],
"descricao": u"Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\"." % (
("%.2f" % add.valor_bolsa).replace(".", ","),
add.doc_discente_id.discente_id.name.upper(), add.doc_discente_id.inscricao_id.perfil_id.matricula
)
}
add.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
class TransferirBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.transferir.wizard"
_description = u"Transferência de bolsa de monitoria (UD)"
_STATES = [
("n_bolsista", u"Não Bolsista"),
("reserva", u"Cadastro de Reserva"),
]
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id_de": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id_de": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('id', '=', False)]"),
"tutor_de": fields.boolean(u"Tutor?"),
"doc_discente_id_de": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('is_active', '=', True), ('state', '=', 'bolsista'), "
"('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]"),
"curso_id_para": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id_para": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), "
"('is_active', '=', True)]"),
"tutor_para": fields.boolean(u"Tutor?"),
"status_para": fields.selection(_STATES, u"Status", required=True),
"doc_discente_id_para": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('is_active', '=', True), ('state', '=', status_para), "
"('disciplina_id', '=', disciplina_id_para), "
"('tutor', '=', tutor_para)]"),
# DADOS BANCÁRIOS
"banco_id": fields.many2one("ud.banco", u"Banco", ondelete="restrict"),
"agencia": fields.char(u"Agência", size=4, help=u"Número da Agência"),
"dv_agencia": fields.char(u"DV Agência", size=2, help=u"Dígito verificador da Agência"),
"conta": fields.char(u"Conta", size=10, help=u"Número da Conta"),
"dv_conta": fields.char(u"DV Conta", size=1, help=u"Dígito verificador da Conta"),
"operacao": fields.char(u"Operação", size=3, help=u"Tipo de conta"),
"agencia_v": fields.related("banco_id", "agencia", type="boolean", invisible=True, readonly=True),
"dv_agencia_v": fields.related("banco_id", "dv_agencia", type="boolean", invisible=True, readonly=True),
"conta_v": fields.related("banco_id", "conta", type="boolean", invisible=True, readonly=True),
"dv_conta_v": fields.related("banco_id", "dv_conta", type="boolean", invisible=True, readonly=True),
"operacao_v": fields.related("banco_id", "operacao", type="boolean", invisible=True, readonly=True),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(TransferirBolsaWizard, self).default_get(cr, uid, fields_list, context)
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"),
context)
if doc.state != "bolsista":
raise osv.except_osv(u"Discente bolsista", u"O discente já é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"O discente não pode ser classificado como bolsista")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id_de"] = doc.disciplina_id.curso_id.id
res["disciplina_id_de"] = doc.disciplina_id.id
res["tutor_de"] = doc.tutor
res["status_de"] = doc.state
res["doc_discente_id_de"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id_" + comp: False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id_" + comp: [("id", "in", disc)]}}
if not disc:
res["value"] = {"disciplina_id_" + comp: False}
return res
def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id, doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
return {
"value": {"doc_discente_id_" + comp: doc_discente_id}
}
return {"value": {"doc_discente_id_" + comp: False}}
def onchange_banco(self, cr, uid, ids, banco_id, context=None):
if banco_id:
banco = self.pool.get("ud.banco").read(cr, uid, banco_id, [
"agencia", "dv_agencia", "conta", "dv_conta", "operacao"
], context=context, load="_classic_write")
vals = {"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}
vals.update({"%s_v" % dado: banco.get(dado) for dado in banco.keys()})
return {"value": vals}
return {"value": {"agencia_v": False, "dv_agencia_v": False, "conta_v": False, "dv_conta_v": False,"operacao_v": False,
"agencia": False, "dv_agencia": False, "conta": False, "dv_conta": False, "operacao": False}}
def botao_transferir(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
for transf in self.browse(cr, uid, ids, context):
matricula = transf.doc_discente_id_para.discente_id.matricula
for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == "a":
if perfil.is_bolsista:
raise osv.except_osv(
u"Discente bolsista",
u"O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"".format(
transf.doc_discente_id_para.discente_id.pessoa_id.name, matricula,
TIPOS_BOLSA[perfil.tipo_bolsa]
)
)
break
if not perfil:
raise osv.except_osv(
u"Perfil excluído",
u"O perfil do discente para a matrícula \"%s\" não existe ou foi excluído" % matricula or ""
)
matricula = transf.doc_discente_id_de.discente_id.matricula
for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:
if perfil.matricula == matricula and perfil.tipo == "a":
break
responsavel = self.pool.get("ud.employee").search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
valor = perfil_de.valor_bolsa
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {
"is_bolsista": True, "tipo_bolsa": "m", "valor_bolsa": valor
})
perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {
"is_bolsista": False, "tipo_bolsa": False, "valor_bolsa": False
})
transf.doc_discente_id_de.write({"state": "n_bolsista"})
transf.doc_discente_id_para.write({"state": "bolsista", "is_active": True})
get_banco(self, cr, transf, transf.doc_discente_id_para.discente_id.pessoa_id.id, context)
evento = {
"responsavel_id": responsavel[0],
"name": u"Transferência de bolsa",
"envolvidos_ids": [(4, transf.doc_discente_id_de.discente_id.pessoa_id.id),
(4, transf.doc_discente_id_para.discente_id.pessoa_id.id)],
"descricao": u"Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula "
u"%(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula"
u"\"%(matricula_para)s\"." % {
"valor": valor, "discente_de": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),
"matricula_de": perfil_de.matricula,
"discente_para": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),
"matricula_para": perfil_de.matricula
}
}
transf.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
class RemoverBolsaWizard(osv.TransientModel):
_name = "ud.monitoria.bolsa.remover.wizard"
_description = u"Remoção de bolsa de discente"
_columns = {
"semestre_id": fields.many2one("ud.monitoria.registro", u"Semestre", required=True, readonly=True),
"curso_id": fields.many2one("ud.curso", u"Curso", required=True, domain="[('is_active', '=', True)]"),
"disciplina_id": fields.many2one("ud.monitoria.disciplina", u"Disciplinas", required=True,
domain="[('id', '=', False)]"),
"tutor": fields.boolean(u"Tutor?"),
"doc_discente_id": fields.many2one("ud.monitoria.documentos.discente", u"Discente", required=True,
domain="[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), "
"('is_active', '=', True), ('state', '=', 'bolsista')]"),
}
def default_get(self, cr, uid, fields_list, context=None):
res = super(RemoverBolsaWizard, self).default_get(cr, uid, fields_list, context)
context = context or {}
if context.get("active_id", False):
if context.get("active_model", False) == "ud.monitoria.registro":
res["semestre_id"] = context.get("active_id")
elif context.get("active_model", False) == "ud.monitoria.documentos.discente":
doc = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, context.get("active_id"), context)
if doc.state != "bolsista":
raise osv.except_osv(u"Discente não bolsista", u"O discente não é bolsista")
elif not doc.is_active:
raise osv.except_osv(u"Documento do discente inativo",
u"Não é possível alterar o status de discentes inativos")
res["semestre_id"] = doc.disciplina_id.semestre_id.id
res["curso_id"] = doc.disciplina_id.curso_id.id
res["disciplina_id"] = doc.disciplina_id.id
res["tutor"] = doc.tutor
res["doc_discente_id"] = doc.id
return res
def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):
if not (semestre_id and curso_id):
return {"value": {"disciplina_id": False}}
reg = self.pool.get("ud.monitoria.registro").read(cr, uid, semestre_id, ["processos_seletivos_ids"], context=context, load="_classic_write")
args = [("curso_id", "=", curso_id), ("processo_seletivo_id", "=", reg["processos_seletivos_ids"]), ("is_active", "=", True)]
disc = self.pool.get("ud.monitoria.disciplina").search(cr, uid, args, context=context)
res = {"domain": {"disciplina_id": [("id", "in", disc)]}}
if not disc:
res["value"] = {"disciplina_id": False}
return res
def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):
if disciplina_id and doc_discente_id:
doc_discente = self.pool.get("ud.monitoria.documentos.discente").browse(cr, uid, doc_discente_id, context)
doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False
return {
"value": {"doc_discente_id": doc_discente_id}
}
return {"value": {"doc_discente_id": False}}
def botao_remover(self, cr, uid, ids, context=None):
perfil_model = self.pool.get("ud.perfil")
pessoa_model = self.pool.get("ud.employee")
for rem in self.browse(cr, uid, ids, context):
responsavel = pessoa_model.search(cr, SUPERUSER_ID, [("user_id", "=", uid)], limit=2)
if not responsavel:
raise osv.except_osv(
u"Registro Inexistente",
u"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo"
)
if len(responsavel) > 1:
raise osv.except_osv(
u"Multiplos vínculos",
u"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo"
)
perfil = rem.doc_discente_id.inscricao_id.perfil_id
perfil_model.write(cr, SUPERUSER_ID, perfil.id, {
"is_bolsista": False, "tipo_bolsa": False, "valor_bolsa": False
})
rem.doc_discente_id.write({"state": "n_bolsista"})
evento = {
"responsavel_id": responsavel[0],
"name": u"Remoção de bolsa: \"%s\"" % rem.doc_discente_id.discente_id.name,
"envolvidos_ids": [(4, rem.doc_discente_id.discente_id.id)],
"descricao": u"A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida." % (
rem.doc_discente_id.discente_id.name.upper(), perfil.matricula
)
}
rem.semestre_id.write({"eventos_ids": [(0, 0, evento)]})
return True
|
normal
|
{
"blob_id": "fd877f5952c1fc0b2115d0950a066501ee7545f8",
"index": 4150,
"step-1": "<mask token>\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n <mask token>\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n <mask token>\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n <mask token>\n <mask token>\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-2": "<mask token>\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n <mask token>\n <mask token>\n <mask token>\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n <mask token>\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,\n dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,\n uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id', \n False)\n return {'value': {'dados_bancarios_id': dados_bancarios_id},\n 'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.\n discente_id.id)]}}\n return {'value': {'dados_bancarios_id': False}, 'domain': {\n 'dados_bancarios_id': [('id', '=', False)]}}\n <mask token>\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-3": "<mask token>\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.adicionar.wizard'\n _description = u'Inclusão de bolsa de monitoria para discente (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), ('is_active', '=', True)]\"\n ), 'bolsas': fields.function(_bolsas, type='integer', string=\n u'Bolsas disponíveis', help=\n u'Número de bolsas disponíveis para a disciplina'), 'valor_bolsa':\n fields.float(u'Bolsa (R$)'), 'tutor': fields.boolean(u'Tutor?'),\n 'status': fields.selection(_STATES, u'Status', required=True),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', status)]\"\n ), 'dados_bancarios_id': fields.many2one('ud.dados.bancarios',\n u'Dados Bancários', domain=[('id', '=', False)]), 'banco_id':\n fields.many2one('ud.banco', u'Banco', ondelete='restrict'),\n 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,\n dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,\n uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id', \n False)\n return {'value': {'dados_bancarios_id': dados_bancarios_id},\n 'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.\n discente_id.id)]}}\n return {'value': {'dados_bancarios_id': False}, 'domain': {\n 'dados_bancarios_id': [('id', '=', False)]}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-4": "<mask token>\nTIPOS_BOLSA = dict(_TIPOS_BOLSA)\n\n\ndef get_banco(cls, cr, browse_record, usuario_id, context=None):\n dados_bancarios_model = cls.pool.get('ud.dados.bancarios')\n args = [('banco_id', '=', browse_record.banco_id.id)]\n if browse_record.agencia_v:\n args.append(('agencia', '=', browse_record.agencia))\n if browse_record.dv_agencia_v:\n args.append(('dv_agencia', '=', browse_record.dv_agencia))\n if browse_record.conta_v:\n args.append(('conta', '=', browse_record.conta))\n if browse_record.dv_conta_v:\n args.append(('dv_conta', '=', browse_record.dv_conta))\n if browse_record.operacao_v:\n args.append(('operacao', '=', browse_record.operacao))\n dados_bancarios = dados_bancarios_model.search(cr, SUPERUSER_ID, args,\n context=context)\n if dados_bancarios:\n dados_bancarios = dados_bancarios_model.browse(cr, SUPERUSER_ID,\n dados_bancarios[0])\n if not dados_bancarios.ud_conta_id:\n return dados_bancarios.id\n elif dados_bancarios.ud_conta_id.id == usuario_id:\n return dados_bancarios.id\n raise osv.except_osv(u'Dados Bancários duplicados',\n u'Outra pessoa já possui esses dados bancários!')\n dados = {'banco_id': browse_record.banco_id.id, 'agencia':\n browse_record.agencia, 'dv_agencia': browse_record.dv_agencia,\n 'conta': browse_record.conta, 'dv_conta': browse_record.dv_conta,\n 'operacao': browse_record.operacao, 'ud_conta_id': usuario_id}\n return dados_bancarios_model.create(cr, SUPERUSER_ID, dados, context=\n context)\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.adicionar.wizard'\n _description = u'Inclusão de bolsa de monitoria para discente (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get('ud.monitoria.oferta.disciplina')\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), ('is_active', '=', True)]\"\n ), 'bolsas': fields.function(_bolsas, type='integer', string=\n u'Bolsas disponíveis', help=\n u'Número de bolsas disponíveis para a disciplina'), 'valor_bolsa':\n fields.float(u'Bolsa (R$)'), 'tutor': fields.boolean(u'Tutor?'),\n 'status': fields.selection(_STATES, u'Status', required=True),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', status)]\"\n ), 'dados_bancarios_id': fields.many2one('ud.dados.bancarios',\n u'Dados Bancários', domain=[('id', '=', False)]), 'banco_id':\n fields.many2one('ud.banco', u'Banco', ondelete='restrict'),\n 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n res['status'] = 'n_bolsista'\n res['valor_bolsa'] = 400.0\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state == 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['status'] = doc.state\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n disciplina_id = self.pool.get('ud.monitoria.disciplina').browse(cr,\n uid, disciplina_id, context)\n return {'value': {'doc_discente_id': doc_discente_id, 'bolsas':\n disciplina_id.bolsas}}\n return {'value': {'doc_discente_id': False, 'bolsas': 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id,\n dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(cr,\n uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, 'id', \n False)\n return {'value': {'dados_bancarios_id': dados_bancarios_id},\n 'domain': {'dados_bancarios_id': [('ud_conta_id', '=', doc.\n discente_id.id)]}}\n return {'value': {'dados_bancarios_id': False}, 'domain': {\n 'dados_bancarios_id': [('id', '=', False)]}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u'Bolsas Insuficientes',\n u'Não há bolsas disponíveis para essa disciplina')\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(add.doc_discente_id.discente_id.name, add.\n doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.\n tipo_bolsa]))\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.\n inscricao_id.perfil_id.id, {'is_bolsista': True,\n 'tipo_bolsa': 'm', 'valor_bolsa': ('%.2f' % add.valor_bolsa\n ).replace('.', ',')})\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.\n doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({'state': 'bolsista',\n 'dados_bancarios_id': dados_bancarios})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Adição de bolsa: \"%s\"' % add.doc_discente_id.discente_id.\n name, 'envolvidos_ids': [(4, add.doc_discente_id.\n discente_id.id)], 'descricao': \n u'Uma bolsa de R$ %s foi vinculada para o(a) discente \"%s\" sob matrícula \"%s\".'\n % (('%.2f' % add.valor_bolsa).replace('.', ','), add.\n doc_discente_id.discente_id.name.upper(), add.\n doc_discente_id.inscricao_id.perfil_id.matricula)}\n add.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.transferir.wizard'\n _description = u'Transferência de bolsa de monitoria (UD)'\n _STATES = [('n_bolsista', u'Não Bolsista'), ('reserva',\n u'Cadastro de Reserva')]\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id_de': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id_de': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor_de': fields.boolean(u'Tutor?'),\n 'doc_discente_id_de': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', 'bolsista'), ('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"\n ), 'curso_id_para': fields.many2one('ud.curso', u'Curso', required=\n True, domain=\"[('is_active', '=', True)]\"), 'disciplina_id_para':\n fields.many2one('ud.monitoria.disciplina', u'Disciplinas', required\n =True, domain=\n \"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), ('is_active', '=', True)]\"\n ), 'tutor_para': fields.boolean(u'Tutor?'), 'status_para': fields.\n selection(_STATES, u'Status', required=True),\n 'doc_discente_id_para': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('is_active', '=', True), ('state', '=', status_para), ('disciplina_id', '=', disciplina_id_para), ('tutor', '=', tutor_para)]\"\n ), 'banco_id': fields.many2one('ud.banco', u'Banco', ondelete=\n 'restrict'), 'agencia': fields.char(u'Agência', size=4, help=\n u'Número da Agência'), 'dv_agencia': fields.char(u'DV Agência',\n size=2, help=u'Dígito verificador da Agência'), 'conta': fields.\n char(u'Conta', size=10, help=u'Número da Conta'), 'dv_conta':\n fields.char(u'DV Conta', size=1, help=\n u'Dígito verificador da Conta'), 'operacao': fields.char(\n u'Operação', size=3, help=u'Tipo de conta'), 'agencia_v': fields.\n related('banco_id', 'agencia', type='boolean', invisible=True,\n readonly=True), 'dv_agencia_v': fields.related('banco_id',\n 'dv_agencia', type='boolean', invisible=True, readonly=True),\n 'conta_v': fields.related('banco_id', 'conta', type='boolean',\n invisible=True, readonly=True), 'dv_conta_v': fields.related(\n 'banco_id', 'dv_conta', type='boolean', invisible=True, readonly=\n True), 'operacao_v': fields.related('banco_id', 'operacao', type=\n 'boolean', invisible=True, readonly=True)}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente bolsista',\n u'O discente já é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'O discente não pode ser classificado como bolsista')\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id_de'] = doc.disciplina_id.curso_id.id\n res['disciplina_id_de'] = doc.disciplina_id.id\n res['tutor_de'] = doc.tutor\n res['status_de'] = doc.state\n res['doc_discente_id_de'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {('disciplina_id_' + comp): False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {('disciplina_id_' + comp): [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {('disciplina_id_' + comp): False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {('doc_discente_id_' + comp): doc_discente_id}}\n return {'value': {('doc_discente_id_' + comp): False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get('ud.banco').read(cr, uid, banco_id, [\n 'agencia', 'dv_agencia', 'conta', 'dv_conta', 'operacao'],\n context=context, load='_classic_write')\n vals = {'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}\n vals.update({('%s_v' % dado): banco.get(dado) for dado in banco\n .keys()})\n return {'value': vals}\n return {'value': {'agencia_v': False, 'dv_agencia_v': False,\n 'conta_v': False, 'dv_conta_v': False, 'operacao_v': False,\n 'agencia': False, 'dv_agencia': False, 'conta': False,\n 'dv_conta': False, 'operacao': False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n if perfil.is_bolsista:\n raise osv.except_osv(u'Discente bolsista',\n u'O discente \"{}\" sob matrícula \"{}\" possui bolsa do tipo: \"{}\"'\n .format(transf.doc_discente_id_para.discente_id\n .pessoa_id.name, matricula, TIPOS_BOLSA[perfil.\n tipo_bolsa]))\n break\n if not perfil:\n raise osv.except_osv(u'Perfil excluído', \n u'O perfil do discente para a matrícula \"%s\" não existe ou foi excluído'\n % matricula or '')\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == 'a':\n break\n responsavel = self.pool.get('ud.employee').search(cr,\n SUPERUSER_ID, [('user_id', '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n True, 'tipo_bolsa': 'm', 'valor_bolsa': valor})\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n 'is_bolsista': False, 'tipo_bolsa': False, 'valor_bolsa': \n False})\n transf.doc_discente_id_de.write({'state': 'n_bolsista'})\n transf.doc_discente_id_para.write({'state': 'bolsista',\n 'is_active': True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.\n discente_id.pessoa_id.id, context)\n evento = {'responsavel_id': responsavel[0], 'name':\n u'Transferência de bolsa', 'envolvidos_ids': [(4, transf.\n doc_discente_id_de.discente_id.pessoa_id.id), (4, transf.\n doc_discente_id_para.discente_id.pessoa_id.id)],\n 'descricao': \n u'Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula %(matricula_de)s para o(a) discente \"%(discente_para)s\" sob matrícula\"%(matricula_para)s\".'\n % {'valor': valor, 'discente_de': transf.\n doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n 'matricula_de': perfil_de.matricula, 'discente_para':\n transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(\n ), 'matricula_para': perfil_de.matricula}}\n transf.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = 'ud.monitoria.bolsa.remover.wizard'\n _description = u'Remoção de bolsa de discente'\n _columns = {'semestre_id': fields.many2one('ud.monitoria.registro',\n u'Semestre', required=True, readonly=True), 'curso_id': fields.\n many2one('ud.curso', u'Curso', required=True, domain=\n \"[('is_active', '=', True)]\"), 'disciplina_id': fields.many2one(\n 'ud.monitoria.disciplina', u'Disciplinas', required=True, domain=\n \"[('id', '=', False)]\"), 'tutor': fields.boolean(u'Tutor?'),\n 'doc_discente_id': fields.many2one(\n 'ud.monitoria.documentos.discente', u'Discente', required=True,\n domain=\n \"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), ('is_active', '=', True), ('state', '=', 'bolsista')]\"\n )}\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid,\n fields_list, context)\n context = context or {}\n if context.get('active_id', False):\n if context.get('active_model', False) == 'ud.monitoria.registro':\n res['semestre_id'] = context.get('active_id')\n elif context.get('active_model', False\n ) == 'ud.monitoria.documentos.discente':\n doc = self.pool.get('ud.monitoria.documentos.discente').browse(\n cr, uid, context.get('active_id'), context)\n if doc.state != 'bolsista':\n raise osv.except_osv(u'Discente não bolsista',\n u'O discente não é bolsista')\n elif not doc.is_active:\n raise osv.except_osv(u'Documento do discente inativo',\n u'Não é possível alterar o status de discentes inativos'\n )\n res['semestre_id'] = doc.disciplina_id.semestre_id.id\n res['curso_id'] = doc.disciplina_id.curso_id.id\n res['disciplina_id'] = doc.disciplina_id.id\n res['tutor'] = doc.tutor\n res['doc_discente_id'] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id,\n disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {'value': {'disciplina_id': False}}\n reg = self.pool.get('ud.monitoria.registro').read(cr, uid,\n semestre_id, ['processos_seletivos_ids'], context=context, load\n ='_classic_write')\n args = [('curso_id', '=', curso_id), ('processo_seletivo_id', '=',\n reg['processos_seletivos_ids']), ('is_active', '=', True)]\n disc = self.pool.get('ud.monitoria.disciplina').search(cr, uid,\n args, context=context)\n res = {'domain': {'disciplina_id': [('id', 'in', disc)]}}\n if not disc:\n res['value'] = {'disciplina_id': False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id,\n doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get('ud.monitoria.documentos.discente'\n ).browse(cr, uid, doc_discente_id, context)\n doc_discente_id = (doc_discente_id if doc_discente.\n disciplina_id.id == disciplina_id else False)\n return {'value': {'doc_discente_id': doc_discente_id}}\n return {'value': {'doc_discente_id': False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get('ud.perfil')\n pessoa_model = self.pool.get('ud.employee')\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [('user_id',\n '=', uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(u'Registro Inexistente',\n u'Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo'\n )\n if len(responsavel) > 1:\n raise osv.except_osv(u'Multiplos vínculos',\n u'Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo'\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {'is_bolsista':\n False, 'tipo_bolsa': False, 'valor_bolsa': False})\n rem.doc_discente_id.write({'state': 'n_bolsista'})\n evento = {'responsavel_id': responsavel[0], 'name': \n u'Remoção de bolsa: \"%s\"' % rem.doc_discente_id.discente_id\n .name, 'envolvidos_ids': [(4, rem.doc_discente_id.\n discente_id.id)], 'descricao': \n u'A bolsa do discente \"%s\" sob matrícula \"%s\" foi removida.' %\n (rem.doc_discente_id.discente_id.name.upper(), perfil.\n matricula)}\n rem.semestre_id.write({'eventos_ids': [(0, 0, evento)]})\n return True\n",
"step-5": "# coding: utf-8\nfrom openerp import SUPERUSER_ID\nfrom openerp.osv import osv, fields\nfrom openerp.addons.ud.ud import _TIPOS_BOLSA\n\nTIPOS_BOLSA = dict(_TIPOS_BOLSA)\n\n\ndef get_banco(cls, cr, browse_record, usuario_id, context=None):\n dados_bancarios_model = cls.pool.get(\"ud.dados.bancarios\")\n args = [(\"banco_id\", \"=\", browse_record.banco_id.id)]\n if browse_record.agencia_v:\n args.append((\"agencia\", \"=\", browse_record.agencia))\n if browse_record.dv_agencia_v:\n args.append((\"dv_agencia\", \"=\", browse_record.dv_agencia))\n if browse_record.conta_v:\n args.append((\"conta\", \"=\", browse_record.conta))\n if browse_record.dv_conta_v:\n args.append((\"dv_conta\", \"=\", browse_record.dv_conta))\n if browse_record.operacao_v:\n args.append((\"operacao\", \"=\", browse_record.operacao))\n dados_bancarios = dados_bancarios_model.search(cr, SUPERUSER_ID, args, context=context)\n if dados_bancarios:\n dados_bancarios = dados_bancarios_model.browse(cr, SUPERUSER_ID, dados_bancarios[0])\n if not dados_bancarios.ud_conta_id:\n return dados_bancarios.id\n elif dados_bancarios.ud_conta_id.id == usuario_id:\n return dados_bancarios.id\n raise osv.except_osv(u\"Dados Bancários duplicados\", u\"Outra pessoa já possui esses dados bancários!\")\n dados = {\"banco_id\": browse_record.banco_id.id, \"agencia\": browse_record.agencia, \"dv_agencia\": browse_record.dv_agencia,\n \"conta\": browse_record.conta, \"dv_conta\": browse_record.dv_conta, \"operacao\": browse_record.operacao,\n \"ud_conta_id\": usuario_id}\n return dados_bancarios_model.create(cr, SUPERUSER_ID, dados, context=context)\n\n\nclass AdicionarBolsaWizard(osv.TransientModel):\n _name = \"ud.monitoria.bolsa.adicionar.wizard\"\n _description = u\"Inclusão de bolsa de monitoria para discente (UD)\"\n\n _STATES = [\n (\"n_bolsista\", u\"Não Bolsista\"),\n (\"reserva\", u\"Cadastro de Reserva\"),\n ]\n\n def _bolsas(self, cr, uid, ids, campos, args, context=None):\n oferta_model = self.pool.get(\"ud.monitoria.oferta.disciplina\")\n res = {}\n for add in self.browse(cr, uid, ids, context):\n res[add.id] = add.disciplina_id.bolsas\n return res\n\n _columns = {\n \"semestre_id\": fields.many2one(\"ud.monitoria.registro\", u\"Semestre\", required=True, readonly=True),\n \"curso_id\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id), \"\n \"('is_active', '=', True)]\"),\n \"bolsas\": fields.function(_bolsas, type=\"integer\", string=u\"Bolsas disponíveis\",\n help=u\"Número de bolsas disponíveis para a disciplina\"),\n \"valor_bolsa\": fields.float(u\"Bolsa (R$)\"),\n \"tutor\": fields.boolean(u\"Tutor?\"),\n \"status\": fields.selection(_STATES, u\"Status\", required=True),\n \"doc_discente_id\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), \"\n \"('is_active', '=', True), ('state', '=', status)]\"),\n # DADOS BANCÁRIOS\n \"dados_bancarios_id\": fields.many2one(\"ud.dados.bancarios\", u\"Dados Bancários\", domain=[('id', '=', False)]),\n \"banco_id\": fields.many2one(\"ud.banco\", u\"Banco\", ondelete=\"restrict\"),\n \"agencia\": fields.char(u\"Agência\", size=4, help=u\"Número da Agência\"),\n \"dv_agencia\": fields.char(u\"DV Agência\", size=2, help=u\"Dígito verificador da Agência\"),\n \"conta\": fields.char(u\"Conta\", size=10, help=u\"Número da Conta\"),\n \"dv_conta\": fields.char(u\"DV Conta\", size=1, help=u\"Dígito verificador da Conta\"),\n \"operacao\": fields.char(u\"Operação\", size=3, help=u\"Tipo de conta\"),\n\n \"agencia_v\": fields.related(\"banco_id\", \"agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_agencia_v\": fields.related(\"banco_id\", \"dv_agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"conta_v\": fields.related(\"banco_id\", \"conta\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_conta_v\": fields.related(\"banco_id\", \"dv_conta\", type=\"boolean\", invisible=True, readonly=True),\n \"operacao_v\": fields.related(\"banco_id\", \"operacao\", type=\"boolean\", invisible=True, readonly=True),\n }\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(AdicionarBolsaWizard, self).default_get(cr, uid, fields_list, context)\n res[\"status\"] = \"n_bolsista\"\n res[\"valor_bolsa\"] = 400.\n context = context or {}\n if context.get(\"active_id\", False):\n if context.get(\"active_model\", False) == \"ud.monitoria.registro\":\n res[\"semestre_id\"] = context.get(\"active_id\")\n elif context.get(\"active_model\", False) == \"ud.monitoria.documentos.discente\":\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, context.get(\"active_id\"), context)\n if doc.state == \"bolsista\":\n raise osv.except_osv(u\"Discente bolsista\", u\"O discente já é bolsista\")\n elif not doc.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\", u\"Não é possível alterar o status de discentes inativos\")\n res[\"semestre_id\"] = doc.disciplina_id.semestre_id.id\n res[\"curso_id\"] = doc.disciplina_id.curso_id.id\n res[\"disciplina_id\"] = doc.disciplina_id.id\n res[\"tutor\"] = doc.tutor\n res[\"status\"] = doc.state\n res[\"doc_discente_id\"] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {\"value\": {\"disciplina_id\": False}}\n reg = self.pool.get(\"ud.monitoria.registro\").read(cr, uid, semestre_id, [\"processos_seletivos_ids\"], context=context, load=\"_classic_write\")\n args = [(\"curso_id\", \"=\", curso_id), (\"processo_seletivo_id\", \"=\", reg[\"processos_seletivos_ids\"]), (\"is_active\", \"=\", True)]\n disc = self.pool.get(\"ud.monitoria.disciplina\").search(cr, uid, args, context=context)\n res = {\"domain\": {\"disciplina_id\": [(\"id\", \"in\", disc)]}}\n if not disc:\n res[\"value\"]= {\"disciplina_id\": False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):\n if disciplina_id:\n if doc_discente_id:\n doc_discente = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False\n disciplina_id = self.pool.get(\"ud.monitoria.disciplina\").browse(cr, uid, disciplina_id, context)\n return {\n \"value\": {\"doc_discente_id\": doc_discente_id,\n \"bolsas\": disciplina_id.bolsas}\n }\n return {\"value\": {\"doc_discente_id\": False, \"bolsas\": 0}}\n\n def onchange_doc_discente(self, cr, uid, ids, doc_discente_id, dados_bancarios_id, context=None):\n if doc_discente_id:\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n if not dados_bancarios_id:\n dados_bancarios_id = getattr(doc.dados_bancarios_id, \"id\", False)\n return {\"value\": {\"dados_bancarios_id\": dados_bancarios_id},\n \"domain\": {\"dados_bancarios_id\": [(\"ud_conta_id\", \"=\", doc.discente_id.id)]}}\n return {\"value\": {\"dados_bancarios_id\": False},\n \"domain\": {\"dados_bancarios_id\": [(\"id\", \"=\", False)]}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get(\"ud.banco\").read(cr, uid, banco_id, [\n \"agencia\", \"dv_agencia\", \"conta\", \"dv_conta\", \"operacao\"\n ], context=context, load=\"_classic_write\")\n vals = {\"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}\n vals.update({\"%s_v\" % dado: banco.get(dado) for dado in banco.keys()})\n return {\"value\": vals}\n return {\"value\": {\"agencia_v\": False, \"dv_agencia_v\": False, \"conta_v\": False, \"dv_conta_v\": False,\"operacao_v\": False,\n \"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}}\n\n def botao_adicionar(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get(\"ud.perfil\")\n for add in self.browse(cr, uid, ids, context):\n if add.bolsas == 0:\n raise osv.except_osv(u\"Bolsas Insuficientes\", u\"Não há bolsas disponíveis para essa disciplina\")\n elif not add.doc_discente_id.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\",\n u\"O discente não pode ser classificado como bolsista\")\n if add.doc_discente_id.inscricao_id.perfil_id.is_bolsista:\n raise osv.except_osv(\n u\"Discente bolsista\",\n u\"O discente \\\"{}\\\" sob matrícula \\\"{}\\\" possui bolsa do tipo: \\\"{}\\\"\".format(\n add.doc_discente_id.discente_id.name, add.doc_discente_id.inscricao_id.perfil_id.matricula,\n TIPOS_BOLSA[add.doc_discente_id.inscricao_id.perfil_id.tipo_bolsa]\n )\n )\n responsavel = self.pool.get(\"ud.employee\").search(cr, SUPERUSER_ID, [(\"user_id\", \"=\", uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(\n u\"Registro Inexistente\",\n u\"Não é possível realizar essa alteração enquanto seu login não estiver vinculado a uma pessoa no núcleo\"\n )\n if len(responsavel) > 1:\n raise osv.except_osv(\n u\"Multiplos vínculos\",\n u\"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo\"\n )\n perfil_model.write(cr, SUPERUSER_ID, add.doc_discente_id.inscricao_id.perfil_id.id, {\n \"is_bolsista\": True, \"tipo_bolsa\": \"m\", \"valor_bolsa\": (\"%.2f\" % add.valor_bolsa).replace(\".\", \",\")\n })\n if not add.dados_bancarios_id:\n dados_bancarios = get_banco(self, cr, add, add.doc_discente_id.discente_id.id, context)\n else:\n dados_bancarios = add.dados_bancarios_id.id\n add.doc_discente_id.write({\"state\": \"bolsista\", \"dados_bancarios_id\": dados_bancarios})\n evento = {\n \"responsavel_id\": responsavel[0],\n \"name\": u\"Adição de bolsa: \\\"%s\\\"\" % add.doc_discente_id.discente_id.name,\n \"envolvidos_ids\": [(4, add.doc_discente_id.discente_id.id)],\n \"descricao\": u\"Uma bolsa de R$ %s foi vinculada para o(a) discente \\\"%s\\\" sob matrícula \\\"%s\\\".\" % (\n (\"%.2f\" % add.valor_bolsa).replace(\".\", \",\"),\n add.doc_discente_id.discente_id.name.upper(), add.doc_discente_id.inscricao_id.perfil_id.matricula\n )\n }\n add.semestre_id.write({\"eventos_ids\": [(0, 0, evento)]})\n return True\n\n\nclass TransferirBolsaWizard(osv.TransientModel):\n _name = \"ud.monitoria.bolsa.transferir.wizard\"\n _description = u\"Transferência de bolsa de monitoria (UD)\"\n\n _STATES = [\n (\"n_bolsista\", u\"Não Bolsista\"),\n (\"reserva\", u\"Cadastro de Reserva\"),\n ]\n\n _columns = {\n \"semestre_id\": fields.many2one(\"ud.monitoria.registro\", u\"Semestre\", required=True, readonly=True),\n\n \"curso_id_de\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id_de\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('id', '=', False)]\"),\n \"tutor_de\": fields.boolean(u\"Tutor?\"),\n \"doc_discente_id_de\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('is_active', '=', True), ('state', '=', 'bolsista'), \"\n \"('disciplina_id', '=', disciplina_id_de), ('tutor', '=', tutor_de)]\"),\n\n \"curso_id_para\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id_para\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('semestre_id', '=', semestre_id), ('curso_id', '=', curso_id_para), \"\n \"('is_active', '=', True)]\"),\n \"tutor_para\": fields.boolean(u\"Tutor?\"),\n \"status_para\": fields.selection(_STATES, u\"Status\", required=True),\n \"doc_discente_id_para\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('is_active', '=', True), ('state', '=', status_para), \"\n \"('disciplina_id', '=', disciplina_id_para), \"\n \"('tutor', '=', tutor_para)]\"),\n # DADOS BANCÁRIOS\n \"banco_id\": fields.many2one(\"ud.banco\", u\"Banco\", ondelete=\"restrict\"),\n \"agencia\": fields.char(u\"Agência\", size=4, help=u\"Número da Agência\"),\n \"dv_agencia\": fields.char(u\"DV Agência\", size=2, help=u\"Dígito verificador da Agência\"),\n \"conta\": fields.char(u\"Conta\", size=10, help=u\"Número da Conta\"),\n \"dv_conta\": fields.char(u\"DV Conta\", size=1, help=u\"Dígito verificador da Conta\"),\n \"operacao\": fields.char(u\"Operação\", size=3, help=u\"Tipo de conta\"),\n\n \"agencia_v\": fields.related(\"banco_id\", \"agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_agencia_v\": fields.related(\"banco_id\", \"dv_agencia\", type=\"boolean\", invisible=True, readonly=True),\n \"conta_v\": fields.related(\"banco_id\", \"conta\", type=\"boolean\", invisible=True, readonly=True),\n \"dv_conta_v\": fields.related(\"banco_id\", \"dv_conta\", type=\"boolean\", invisible=True, readonly=True),\n \"operacao_v\": fields.related(\"banco_id\", \"operacao\", type=\"boolean\", invisible=True, readonly=True),\n }\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(TransferirBolsaWizard, self).default_get(cr, uid, fields_list, context)\n context = context or {}\n if context.get(\"active_id\", False):\n if context.get(\"active_model\", False) == \"ud.monitoria.registro\":\n res[\"semestre_id\"] = context.get(\"active_id\")\n elif context.get(\"active_model\", False) == \"ud.monitoria.documentos.discente\":\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, context.get(\"active_id\"),\n context)\n if doc.state != \"bolsista\":\n raise osv.except_osv(u\"Discente bolsista\", u\"O discente já é bolsista\")\n elif not doc.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\",\n u\"O discente não pode ser classificado como bolsista\")\n res[\"semestre_id\"] = doc.disciplina_id.semestre_id.id\n res[\"curso_id_de\"] = doc.disciplina_id.curso_id.id\n res[\"disciplina_id_de\"] = doc.disciplina_id.id\n res[\"tutor_de\"] = doc.tutor\n res[\"status_de\"] = doc.state\n res[\"doc_discente_id_de\"] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, comp, semestre_id, curso_id, disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {\"value\": {\"disciplina_id_\" + comp: False}}\n reg = self.pool.get(\"ud.monitoria.registro\").read(cr, uid, semestre_id, [\"processos_seletivos_ids\"], context=context, load=\"_classic_write\")\n args = [(\"curso_id\", \"=\", curso_id), (\"processo_seletivo_id\", \"=\", reg[\"processos_seletivos_ids\"]), (\"is_active\", \"=\", True)]\n disc = self.pool.get(\"ud.monitoria.disciplina\").search(cr, uid, args, context=context)\n res = {\"domain\": {\"disciplina_id_\" + comp: [(\"id\", \"in\", disc)]}}\n if not disc:\n res[\"value\"] = {\"disciplina_id_\" + comp: False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, comp, disciplina_id, doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False\n return {\n \"value\": {\"doc_discente_id_\" + comp: doc_discente_id}\n }\n return {\"value\": {\"doc_discente_id_\" + comp: False}}\n\n def onchange_banco(self, cr, uid, ids, banco_id, context=None):\n if banco_id:\n banco = self.pool.get(\"ud.banco\").read(cr, uid, banco_id, [\n \"agencia\", \"dv_agencia\", \"conta\", \"dv_conta\", \"operacao\"\n ], context=context, load=\"_classic_write\")\n vals = {\"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}\n vals.update({\"%s_v\" % dado: banco.get(dado) for dado in banco.keys()})\n return {\"value\": vals}\n return {\"value\": {\"agencia_v\": False, \"dv_agencia_v\": False, \"conta_v\": False, \"dv_conta_v\": False,\"operacao_v\": False,\n \"agencia\": False, \"dv_agencia\": False, \"conta\": False, \"dv_conta\": False, \"operacao\": False}}\n\n def botao_transferir(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get(\"ud.perfil\")\n for transf in self.browse(cr, uid, ids, context):\n matricula = transf.doc_discente_id_para.discente_id.matricula\n for perfil in transf.doc_discente_id_para.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == \"a\":\n if perfil.is_bolsista:\n raise osv.except_osv(\n u\"Discente bolsista\",\n u\"O discente \\\"{}\\\" sob matrícula \\\"{}\\\" possui bolsa do tipo: \\\"{}\\\"\".format(\n transf.doc_discente_id_para.discente_id.pessoa_id.name, matricula,\n TIPOS_BOLSA[perfil.tipo_bolsa]\n )\n )\n break\n if not perfil:\n raise osv.except_osv(\n u\"Perfil excluído\",\n u\"O perfil do discente para a matrícula \\\"%s\\\" não existe ou foi excluído\" % matricula or \"\"\n )\n matricula = transf.doc_discente_id_de.discente_id.matricula\n for perfil_de in transf.doc_discente_id_de.discente_id.pessoa_id.papel_ids:\n if perfil.matricula == matricula and perfil.tipo == \"a\":\n break\n responsavel = self.pool.get(\"ud.employee\").search(cr, SUPERUSER_ID, [(\"user_id\", \"=\", uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(\n u\"Registro Inexistente\",\n u\"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo\"\n )\n if len(responsavel) > 1:\n raise osv.except_osv(\n u\"Multiplos vínculos\",\n u\"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo\"\n )\n valor = perfil_de.valor_bolsa\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {\n \"is_bolsista\": True, \"tipo_bolsa\": \"m\", \"valor_bolsa\": valor\n })\n perfil_model.write(cr, SUPERUSER_ID, perfil_de.id, {\n \"is_bolsista\": False, \"tipo_bolsa\": False, \"valor_bolsa\": False\n })\n transf.doc_discente_id_de.write({\"state\": \"n_bolsista\"})\n transf.doc_discente_id_para.write({\"state\": \"bolsista\", \"is_active\": True})\n get_banco(self, cr, transf, transf.doc_discente_id_para.discente_id.pessoa_id.id, context)\n evento = {\n \"responsavel_id\": responsavel[0],\n \"name\": u\"Transferência de bolsa\",\n \"envolvidos_ids\": [(4, transf.doc_discente_id_de.discente_id.pessoa_id.id),\n (4, transf.doc_discente_id_para.discente_id.pessoa_id.id)],\n \"descricao\": u\"Transferência de bolsa no valor de R$ %(valor)s do discente %(discente_de)s sob matrícula \"\n u\"%(matricula_de)s para o(a) discente \\\"%(discente_para)s\\\" sob matrícula\"\n u\"\\\"%(matricula_para)s\\\".\" % {\n \"valor\": valor, \"discente_de\": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n \"matricula_de\": perfil_de.matricula,\n \"discente_para\": transf.doc_discente_id_de.discente_id.pessoa_id.name.upper(),\n \"matricula_para\": perfil_de.matricula\n }\n }\n transf.semestre_id.write({\"eventos_ids\": [(0, 0, evento)]})\n return True\n\n\nclass RemoverBolsaWizard(osv.TransientModel):\n _name = \"ud.monitoria.bolsa.remover.wizard\"\n _description = u\"Remoção de bolsa de discente\"\n\n _columns = {\n \"semestre_id\": fields.many2one(\"ud.monitoria.registro\", u\"Semestre\", required=True, readonly=True),\n \"curso_id\": fields.many2one(\"ud.curso\", u\"Curso\", required=True, domain=\"[('is_active', '=', True)]\"),\n \"disciplina_id\": fields.many2one(\"ud.monitoria.disciplina\", u\"Disciplinas\", required=True,\n domain=\"[('id', '=', False)]\"),\n \"tutor\": fields.boolean(u\"Tutor?\"),\n \"doc_discente_id\": fields.many2one(\"ud.monitoria.documentos.discente\", u\"Discente\", required=True,\n domain=\"[('disciplina_id', '=', disciplina_id), ('tutor', '=', tutor), \"\n \"('is_active', '=', True), ('state', '=', 'bolsista')]\"),\n }\n\n def default_get(self, cr, uid, fields_list, context=None):\n res = super(RemoverBolsaWizard, self).default_get(cr, uid, fields_list, context)\n context = context or {}\n if context.get(\"active_id\", False):\n if context.get(\"active_model\", False) == \"ud.monitoria.registro\":\n res[\"semestre_id\"] = context.get(\"active_id\")\n elif context.get(\"active_model\", False) == \"ud.monitoria.documentos.discente\":\n doc = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, context.get(\"active_id\"), context)\n if doc.state != \"bolsista\":\n raise osv.except_osv(u\"Discente não bolsista\", u\"O discente não é bolsista\")\n elif not doc.is_active:\n raise osv.except_osv(u\"Documento do discente inativo\",\n u\"Não é possível alterar o status de discentes inativos\")\n res[\"semestre_id\"] = doc.disciplina_id.semestre_id.id\n res[\"curso_id\"] = doc.disciplina_id.curso_id.id\n res[\"disciplina_id\"] = doc.disciplina_id.id\n res[\"tutor\"] = doc.tutor\n res[\"doc_discente_id\"] = doc.id\n return res\n\n def onchange_curso(self, cr, uid, ids, semestre_id, curso_id, disciplina_id, context=None):\n if not (semestre_id and curso_id):\n return {\"value\": {\"disciplina_id\": False}}\n reg = self.pool.get(\"ud.monitoria.registro\").read(cr, uid, semestre_id, [\"processos_seletivos_ids\"], context=context, load=\"_classic_write\")\n args = [(\"curso_id\", \"=\", curso_id), (\"processo_seletivo_id\", \"=\", reg[\"processos_seletivos_ids\"]), (\"is_active\", \"=\", True)]\n disc = self.pool.get(\"ud.monitoria.disciplina\").search(cr, uid, args, context=context)\n res = {\"domain\": {\"disciplina_id\": [(\"id\", \"in\", disc)]}}\n if not disc:\n res[\"value\"] = {\"disciplina_id\": False}\n return res\n\n def onchange_disciplina(self, cr, uid, ids, disciplina_id, doc_discente_id, context=None):\n if disciplina_id and doc_discente_id:\n doc_discente = self.pool.get(\"ud.monitoria.documentos.discente\").browse(cr, uid, doc_discente_id, context)\n doc_discente_id = doc_discente_id if doc_discente.disciplina_id.id == disciplina_id else False\n return {\n \"value\": {\"doc_discente_id\": doc_discente_id}\n }\n return {\"value\": {\"doc_discente_id\": False}}\n\n def botao_remover(self, cr, uid, ids, context=None):\n perfil_model = self.pool.get(\"ud.perfil\")\n pessoa_model = self.pool.get(\"ud.employee\")\n for rem in self.browse(cr, uid, ids, context):\n responsavel = pessoa_model.search(cr, SUPERUSER_ID, [(\"user_id\", \"=\", uid)], limit=2)\n if not responsavel:\n raise osv.except_osv(\n u\"Registro Inexistente\",\n u\"Não é possível realizar essa alteração enquanto seu login não estiver vinculado ao núcleo\"\n )\n if len(responsavel) > 1:\n raise osv.except_osv(\n u\"Multiplos vínculos\",\n u\"Não é possível realizar essa alteração enquanto seu login possuir multiplos vínculos no núcleo\"\n )\n perfil = rem.doc_discente_id.inscricao_id.perfil_id\n perfil_model.write(cr, SUPERUSER_ID, perfil.id, {\n \"is_bolsista\": False, \"tipo_bolsa\": False, \"valor_bolsa\": False\n })\n rem.doc_discente_id.write({\"state\": \"n_bolsista\"})\n evento = {\n \"responsavel_id\": responsavel[0],\n \"name\": u\"Remoção de bolsa: \\\"%s\\\"\" % rem.doc_discente_id.discente_id.name,\n \"envolvidos_ids\": [(4, rem.doc_discente_id.discente_id.id)],\n \"descricao\": u\"A bolsa do discente \\\"%s\\\" sob matrícula \\\"%s\\\" foi removida.\" % (\n rem.doc_discente_id.discente_id.name.upper(), perfil.matricula\n )\n }\n rem.semestre_id.write({\"eventos_ids\": [(0, 0, evento)]})\n return True\n",
"step-ids": [
18,
20,
22,
24,
26
]
}
|
[
18,
20,
22,
24,
26
] |
import sys
from PySide2.QtWidgets import QApplication, QDialog, QLineEdit, QPushButton,QVBoxLayout, QLabel, QWidget
from docx import Document
from docx.shared import Inches
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
#set the size
#Creat widgets
self.setWindowTitle("Cover Letter Developer")
self.label1 = QLabel('Input Company Name')
self.edit1 = QLineEdit("")
self.label2 = QLabel('Input Position Title')
self.edit2 = QLineEdit("")
self.label3 = QLabel('How did you get introduced to the company?')
self.edit3 = QLineEdit("")
self.label4 = QLabel('What skills do you have that would help the COOP/Internship')
self.edit4 = QLineEdit("")
self.button = QPushButton("Develop")
# Creat layout and add widgets
layout = QVBoxLayout()
layout.addWidget(self.label1)
layout.addWidget(self.edit1)
layout.addWidget(self.label2)
layout.addWidget(self.edit2)
layout.addWidget(self.label3)
layout.addWidget(self.edit3)
layout.addWidget(self.label4)
layout.addWidget(self.edit4)
layout.addWidget(self.button)
#set dialog layout
self.setLayout(layout)
self.button.clicked.connect(self.coverlet)
def coverlet(self):
name = self.edit1.text()
pos = self.edit2.text()
intro = self.edit3.text()
skills = self.edit4.text()
mytext = """
Dear """ + name + """’s Hiring Team,
\n
""" + """ """ + """ I am writing to apply to the """ + pos + """ Intern/COOP position at """ + name + """. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. """ + intro + """
"""+ """As an intern at """ + name + """ , I will bring my toolset of """ + skills + """. Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.
""" + """ """ + """ As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to """ + name + """. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for """ + name + """ and gain experience in engineering and further """+ name +""" initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.
""" + """ """ + """ You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.
"""
anothertext = """
Respectfully yours,
Martynas Baranauskas
[email protected]
781-572-9775
Personal Website: https://baranauskasm.wixsite.com/mysite
or scan QR code with smartphone camera
"""
document = Document()
p = document.add_paragraph(mytext)
g = document.add_paragraph(anothertext)
k = document.add_picture('qr_code.png', width=Inches(0.7))
# document.add_page_break()
# the saving of the document and the path to the
filename = name + '_' + pos + '_baranauskas_.docx'
# filepath = r'C:\Users\baranauskasm\Desktop\COOP Stuff\Summer 2020 COOP (future)\cover letters\automated cover letters'
document.save(filename)
print("-----------------------------------------------------")
print(name + "_" + pos + "_baranauskas.doxc document was developed")
print("------------------------------------------------------")
#clear the form for another submition
self.edit1.clear()
self.edit2.clear()
self.edit3.clear()
self.edit4.clear()
if __name__ == '__main__':
#or you can do a automatic one with something like
# Create the Qt Application
app = QApplication(sys.argv)
# Create and show the form
form = Form()
#the size of the gui
form.resize(1300,250)
form.show()
# Run the main Qt loop
sys.exit(app.exec_())
|
normal
|
{
"blob_id": "bad13218a7a9e687fbd29099ca80771296789d36",
"index": 1321,
"step-1": "<mask token>\n\n\nclass Form(QDialog):\n\n def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setWindowTitle('Cover Letter Developer')\n self.label1 = QLabel('Input Company Name')\n self.edit1 = QLineEdit('')\n self.label2 = QLabel('Input Position Title')\n self.edit2 = QLineEdit('')\n self.label3 = QLabel('How did you get introduced to the company?')\n self.edit3 = QLineEdit('')\n self.label4 = QLabel(\n 'What skills do you have that would help the COOP/Internship')\n self.edit4 = QLineEdit('')\n self.button = QPushButton('Develop')\n layout = QVBoxLayout()\n layout.addWidget(self.label1)\n layout.addWidget(self.edit1)\n layout.addWidget(self.label2)\n layout.addWidget(self.edit2)\n layout.addWidget(self.label3)\n layout.addWidget(self.edit3)\n layout.addWidget(self.label4)\n layout.addWidget(self.edit4)\n layout.addWidget(self.button)\n self.setLayout(layout)\n self.button.clicked.connect(self.coverlet)\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Form(QDialog):\n\n def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setWindowTitle('Cover Letter Developer')\n self.label1 = QLabel('Input Company Name')\n self.edit1 = QLineEdit('')\n self.label2 = QLabel('Input Position Title')\n self.edit2 = QLineEdit('')\n self.label3 = QLabel('How did you get introduced to the company?')\n self.edit3 = QLineEdit('')\n self.label4 = QLabel(\n 'What skills do you have that would help the COOP/Internship')\n self.edit4 = QLineEdit('')\n self.button = QPushButton('Develop')\n layout = QVBoxLayout()\n layout.addWidget(self.label1)\n layout.addWidget(self.edit1)\n layout.addWidget(self.label2)\n layout.addWidget(self.edit2)\n layout.addWidget(self.label3)\n layout.addWidget(self.edit3)\n layout.addWidget(self.label4)\n layout.addWidget(self.edit4)\n layout.addWidget(self.button)\n self.setLayout(layout)\n self.button.clicked.connect(self.coverlet)\n\n def coverlet(self):\n name = self.edit1.text()\n pos = self.edit2.text()\n intro = self.edit3.text()\n skills = self.edit4.text()\n mytext = '\\n Dear ' + name + \"\"\"’s Hiring Team,\n \n\n \"\"\" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + \"\"\" \n \n \"\"\" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + \"\"\". Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.\n\n \"\"\" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + \"\"\" initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.\n\n \"\"\" + ' ' + \"\"\" You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.\n\n \"\"\"\n anothertext = \"\"\" \nRespectfully yours,\nMartynas Baranauskas\[email protected]\n781-572-9775\nPersonal Website: https://baranauskasm.wixsite.com/mysite\nor scan QR code with smartphone camera\n \"\"\"\n document = Document()\n p = document.add_paragraph(mytext)\n g = document.add_paragraph(anothertext)\n k = document.add_picture('qr_code.png', width=Inches(0.7))\n filename = name + '_' + pos + '_baranauskas_.docx'\n document.save(filename)\n print('-----------------------------------------------------')\n print(name + '_' + pos + '_baranauskas.doxc document was developed')\n print('------------------------------------------------------')\n self.edit1.clear()\n self.edit2.clear()\n self.edit3.clear()\n self.edit4.clear()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Form(QDialog):\n\n def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setWindowTitle('Cover Letter Developer')\n self.label1 = QLabel('Input Company Name')\n self.edit1 = QLineEdit('')\n self.label2 = QLabel('Input Position Title')\n self.edit2 = QLineEdit('')\n self.label3 = QLabel('How did you get introduced to the company?')\n self.edit3 = QLineEdit('')\n self.label4 = QLabel(\n 'What skills do you have that would help the COOP/Internship')\n self.edit4 = QLineEdit('')\n self.button = QPushButton('Develop')\n layout = QVBoxLayout()\n layout.addWidget(self.label1)\n layout.addWidget(self.edit1)\n layout.addWidget(self.label2)\n layout.addWidget(self.edit2)\n layout.addWidget(self.label3)\n layout.addWidget(self.edit3)\n layout.addWidget(self.label4)\n layout.addWidget(self.edit4)\n layout.addWidget(self.button)\n self.setLayout(layout)\n self.button.clicked.connect(self.coverlet)\n\n def coverlet(self):\n name = self.edit1.text()\n pos = self.edit2.text()\n intro = self.edit3.text()\n skills = self.edit4.text()\n mytext = '\\n Dear ' + name + \"\"\"’s Hiring Team,\n \n\n \"\"\" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + \"\"\" \n \n \"\"\" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + \"\"\". Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.\n\n \"\"\" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + \"\"\" initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.\n\n \"\"\" + ' ' + \"\"\" You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.\n\n \"\"\"\n anothertext = \"\"\" \nRespectfully yours,\nMartynas Baranauskas\[email protected]\n781-572-9775\nPersonal Website: https://baranauskasm.wixsite.com/mysite\nor scan QR code with smartphone camera\n \"\"\"\n document = Document()\n p = document.add_paragraph(mytext)\n g = document.add_paragraph(anothertext)\n k = document.add_picture('qr_code.png', width=Inches(0.7))\n filename = name + '_' + pos + '_baranauskas_.docx'\n document.save(filename)\n print('-----------------------------------------------------')\n print(name + '_' + pos + '_baranauskas.doxc document was developed')\n print('------------------------------------------------------')\n self.edit1.clear()\n self.edit2.clear()\n self.edit3.clear()\n self.edit4.clear()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n form = Form()\n form.resize(1300, 250)\n form.show()\n sys.exit(app.exec_())\n",
"step-4": "import sys\nfrom PySide2.QtWidgets import QApplication, QDialog, QLineEdit, QPushButton, QVBoxLayout, QLabel, QWidget\nfrom docx import Document\nfrom docx.shared import Inches\n\n\nclass Form(QDialog):\n\n def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n self.setWindowTitle('Cover Letter Developer')\n self.label1 = QLabel('Input Company Name')\n self.edit1 = QLineEdit('')\n self.label2 = QLabel('Input Position Title')\n self.edit2 = QLineEdit('')\n self.label3 = QLabel('How did you get introduced to the company?')\n self.edit3 = QLineEdit('')\n self.label4 = QLabel(\n 'What skills do you have that would help the COOP/Internship')\n self.edit4 = QLineEdit('')\n self.button = QPushButton('Develop')\n layout = QVBoxLayout()\n layout.addWidget(self.label1)\n layout.addWidget(self.edit1)\n layout.addWidget(self.label2)\n layout.addWidget(self.edit2)\n layout.addWidget(self.label3)\n layout.addWidget(self.edit3)\n layout.addWidget(self.label4)\n layout.addWidget(self.edit4)\n layout.addWidget(self.button)\n self.setLayout(layout)\n self.button.clicked.connect(self.coverlet)\n\n def coverlet(self):\n name = self.edit1.text()\n pos = self.edit2.text()\n intro = self.edit3.text()\n skills = self.edit4.text()\n mytext = '\\n Dear ' + name + \"\"\"’s Hiring Team,\n \n\n \"\"\" + ' ' + ' I am writing to apply to the ' + pos + ' Intern/COOP position at ' + name + '. I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. ' + intro + \"\"\" \n \n \"\"\" + 'As an intern at ' + name + ' , I will bring my toolset of ' + skills + \"\"\". Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.\n\n \"\"\" + ' ' + ' As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to ' + name + '. Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for ' + name + ' and gain experience in engineering and further ' + name + \"\"\" initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.\n\n \"\"\" + ' ' + \"\"\" You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.\n\n \"\"\"\n anothertext = \"\"\" \nRespectfully yours,\nMartynas Baranauskas\[email protected]\n781-572-9775\nPersonal Website: https://baranauskasm.wixsite.com/mysite\nor scan QR code with smartphone camera\n \"\"\"\n document = Document()\n p = document.add_paragraph(mytext)\n g = document.add_paragraph(anothertext)\n k = document.add_picture('qr_code.png', width=Inches(0.7))\n filename = name + '_' + pos + '_baranauskas_.docx'\n document.save(filename)\n print('-----------------------------------------------------')\n print(name + '_' + pos + '_baranauskas.doxc document was developed')\n print('------------------------------------------------------')\n self.edit1.clear()\n self.edit2.clear()\n self.edit3.clear()\n self.edit4.clear()\n\n\nif __name__ == '__main__':\n app = QApplication(sys.argv)\n form = Form()\n form.resize(1300, 250)\n form.show()\n sys.exit(app.exec_())\n",
"step-5": "import sys\nfrom PySide2.QtWidgets import QApplication, QDialog, QLineEdit, QPushButton,QVBoxLayout, QLabel, QWidget\nfrom docx import Document\nfrom docx.shared import Inches\n\nclass Form(QDialog):\n\n def __init__(self, parent=None):\n super(Form, self).__init__(parent)\n #set the size\n #Creat widgets\n self.setWindowTitle(\"Cover Letter Developer\")\n self.label1 = QLabel('Input Company Name')\n self.edit1 = QLineEdit(\"\")\n self.label2 = QLabel('Input Position Title')\n self.edit2 = QLineEdit(\"\")\n self.label3 = QLabel('How did you get introduced to the company?')\n self.edit3 = QLineEdit(\"\")\n self.label4 = QLabel('What skills do you have that would help the COOP/Internship')\n self.edit4 = QLineEdit(\"\")\n self.button = QPushButton(\"Develop\")\n # Creat layout and add widgets\n layout = QVBoxLayout()\n layout.addWidget(self.label1)\n layout.addWidget(self.edit1)\n layout.addWidget(self.label2)\n layout.addWidget(self.edit2)\n layout.addWidget(self.label3)\n layout.addWidget(self.edit3)\n layout.addWidget(self.label4)\n layout.addWidget(self.edit4)\n layout.addWidget(self.button)\n #set dialog layout\n self.setLayout(layout)\n self.button.clicked.connect(self.coverlet)\n\n\n def coverlet(self):\n name = self.edit1.text()\n pos = self.edit2.text()\n intro = self.edit3.text()\n skills = self.edit4.text()\n mytext = \"\"\"\n Dear \"\"\" + name + \"\"\"’s Hiring Team,\n \\n\n \"\"\" + \"\"\" \"\"\" + \"\"\" I am writing to apply to the \"\"\" + pos + \"\"\" Intern/COOP position at \"\"\" + name + \"\"\". I am a 4th year at Wentworth Institute of Technology, pursuing a Bachelor of Science degree in Electro-mechanical Engineering. The Electro-mechanical Engineering program combines the technical disciplines of Electrical and Mechanical Engineering. \"\"\" + intro + \"\"\" \n \n \"\"\"+ \"\"\"As an intern at \"\"\" + name + \"\"\" , I will bring my toolset of \"\"\" + skills + \"\"\". Additionally I have experience in quality and reliability of electronic circuit systems through the tests that I have done when I was Analog Devices like shock, high voltage, HALT testing. Along with developing reliability testers that I programmed using LabView(a graphical programming language). My C programming and Python experience is from a project that I have done for a Junior Design Project and you can see the pictures through my personal website list below.\n\n \"\"\" + \"\"\" \"\"\" + \"\"\" As an engineering student, the most valuable thing that I have currently learned about myself is that when faced with a difficult problem I may initially fail, but I don’t quit until I eventually solve the problem. I am a quick learner and will be a good asset to \"\"\" + name + \"\"\". Wentworth Institute of Technology incorporates COOPS/internships as part of its curriculum, and, therefore, I would be available to work full time throughout the summer for a minimum of 14 weeks. I would be honored to intern for \"\"\" + name + \"\"\" and gain experience in engineering and further \"\"\"+ name +\"\"\" initiative. has a reputation for excellence, and I value your commitment to making the world a better and safer place.\n\n \"\"\" + \"\"\" \"\"\" + \"\"\" You may contact me by phone, email or my personal website, which I have supplied below. Thank you for your time and consideration.\n\n \"\"\"\n\n anothertext = \"\"\" \nRespectfully yours,\nMartynas Baranauskas\[email protected]\n781-572-9775\nPersonal Website: https://baranauskasm.wixsite.com/mysite\nor scan QR code with smartphone camera\n \"\"\"\n\n document = Document()\n p = document.add_paragraph(mytext)\n g = document.add_paragraph(anothertext)\n k = document.add_picture('qr_code.png', width=Inches(0.7))\n # document.add_page_break()\n\n # the saving of the document and the path to the\n filename = name + '_' + pos + '_baranauskas_.docx'\n # filepath = r'C:\\Users\\baranauskasm\\Desktop\\COOP Stuff\\Summer 2020 COOP (future)\\cover letters\\automated cover letters'\n document.save(filename)\n print(\"-----------------------------------------------------\")\n print(name + \"_\" + pos + \"_baranauskas.doxc document was developed\")\n print(\"------------------------------------------------------\")\n\n #clear the form for another submition\n self.edit1.clear()\n self.edit2.clear()\n self.edit3.clear()\n self.edit4.clear()\n\nif __name__ == '__main__':\n #or you can do a automatic one with something like\n # Create the Qt Application\n app = QApplication(sys.argv)\n # Create and show the form\n form = Form()\n #the size of the gui\n form.resize(1300,250)\n form.show()\n # Run the main Qt loop\n sys.exit(app.exec_())\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from django.contrib import admin
from students.models import Child_detail
class ChildAdmin(admin.ModelAdmin):
def queryset(self, request):
"""
Filter the Child objects to only
display those for the currently signed in user.
"""
qs = super(ChildAdmin, self).queryset(request)
if request.user.is_superuser:
return qs
if request.user.user_category == 'block':
return qs.filter(block=request.user.account.associated_with)
if request.user.user_category == 'school':
return qs.filter(school=request.user.account.associated_with)
if request.user.user_category == 'district':
return qs.filter(district=request.user.account.associated_with)
# Register your models here.
admin.site.register(Child_detail,ChildAdmin)
|
normal
|
{
"blob_id": "582f2e6972bad85c2aaedd248f050f708c61973b",
"index": 2332,
"step-1": "<mask token>\n\n\nclass ChildAdmin(admin.ModelAdmin):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ChildAdmin(admin.ModelAdmin):\n\n def queryset(self, request):\n \"\"\"\n Filter the Child objects to only\n display those for the currently signed in user.\n \"\"\"\n qs = super(ChildAdmin, self).queryset(request)\n if request.user.is_superuser:\n return qs\n if request.user.user_category == 'block':\n return qs.filter(block=request.user.account.associated_with)\n if request.user.user_category == 'school':\n return qs.filter(school=request.user.account.associated_with)\n if request.user.user_category == 'district':\n return qs.filter(district=request.user.account.associated_with)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ChildAdmin(admin.ModelAdmin):\n\n def queryset(self, request):\n \"\"\"\n Filter the Child objects to only\n display those for the currently signed in user.\n \"\"\"\n qs = super(ChildAdmin, self).queryset(request)\n if request.user.is_superuser:\n return qs\n if request.user.user_category == 'block':\n return qs.filter(block=request.user.account.associated_with)\n if request.user.user_category == 'school':\n return qs.filter(school=request.user.account.associated_with)\n if request.user.user_category == 'district':\n return qs.filter(district=request.user.account.associated_with)\n\n\nadmin.site.register(Child_detail, ChildAdmin)\n",
"step-4": "from django.contrib import admin\nfrom students.models import Child_detail\n\n\nclass ChildAdmin(admin.ModelAdmin):\n\n def queryset(self, request):\n \"\"\"\n Filter the Child objects to only\n display those for the currently signed in user.\n \"\"\"\n qs = super(ChildAdmin, self).queryset(request)\n if request.user.is_superuser:\n return qs\n if request.user.user_category == 'block':\n return qs.filter(block=request.user.account.associated_with)\n if request.user.user_category == 'school':\n return qs.filter(school=request.user.account.associated_with)\n if request.user.user_category == 'district':\n return qs.filter(district=request.user.account.associated_with)\n\n\nadmin.site.register(Child_detail, ChildAdmin)\n",
"step-5": "from django.contrib import admin\nfrom students.models import Child_detail\nclass ChildAdmin(admin.ModelAdmin):\n\t\n\n\n def queryset(self, request):\n \"\"\"\n Filter the Child objects to only\n display those for the currently signed in user.\n \"\"\"\n qs = super(ChildAdmin, self).queryset(request)\n if request.user.is_superuser:\n \treturn qs\n if request.user.user_category == 'block':\n \treturn qs.filter(block=request.user.account.associated_with)\n if request.user.user_category == 'school':\n \treturn qs.filter(school=request.user.account.associated_with)\n if request.user.user_category == 'district':\n \treturn qs.filter(district=request.user.account.associated_with)\n # Register your models here.\n\nadmin.site.register(Child_detail,ChildAdmin)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
#Import dependencies
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import string
import operator
from sklearn.feature_extraction.text import CountVectorizer
import pickle
import nltk
from nltk.corpus import stopwords
#nltk.download('stopwords')
from nltk.tokenize import word_tokenize
def text_process(text):
nopunc = [char for char in text if char not in string.punctuation]
nopunc = ''.join(nopunc)
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
#Looping through the web-scraped reviews to make predictions
def ml_predictor(web_scrapedf):
def text_process(text):
nopunc = [char for char in text if char not in string.punctuation]
nopunc = ''.join(nopunc)
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
#Loading the model
loaded_model = pickle.load(open("ml_model/model.pickle", 'rb'))
#Loading the vectorizor
loaded_vectorizor = pickle.load(open("ml_model/vectorizer.pickle", 'rb'))
#Creating predictions for each review
for label, row in web_scrapedf.iterrows():
text = row['Reviews']
text_transform = loaded_vectorizor.transform([text])
ml_prediction = loaded_model.predict(text_transform)[0]
web_scrapedf.at[label, 'ml_predictions'] = ml_prediction
#Filtering on columns we need
scrape_results_df = web_scrapedf[['Reviews', 'ml_predictions']]
return scrape_results_df
#Function to create positive words for word cloud
def positive_words(scrape_results_df):
def text_process(text):
nopunc = [char for char in text if char not in string.punctuation]
nopunc = ''.join(nopunc)
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
#Creating list of positive words
positive_wordcloud = scrape_results_df[scrape_results_df['ml_predictions'] == 'Positive']
positivecv = CountVectorizer(analyzer=text_process)
positive_fit=positivecv.fit_transform(positive_wordcloud['Reviews'])
#creating key value dicitionary pair of counts
positive_word_list = positivecv.get_feature_names();
positive_count_list = positive_fit.toarray().sum(axis=0)
positive_words = dict(zip(positive_word_list, positive_count_list))
positive_sorted = sorted(positive_words.items(), key=operator.itemgetter(1), reverse=True)
positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]
positive_sorted = positive_sorted[:49]
return positive_sorted
#Function to create negative words for word cloud
def negative_words(scrape_results_df):
def text_process(text):
nopunc = [char for char in text if char not in string.punctuation]
nopunc = ''.join(nopunc)
return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]
#Creating the list of negative words
negative_wordcloud = scrape_results_df[scrape_results_df['ml_predictions'] == 'Negative']
negativecv = CountVectorizer(analyzer=text_process)
negative_fit=negativecv.fit_transform(negative_wordcloud['Reviews'])
#creating key value dicitionary pair of counts
negative_word_list = negativecv.get_feature_names();
negative_count_list = negative_fit.toarray().sum(axis=0)
negative_words = dict(zip(negative_word_list, negative_count_list))
negative_sorted = sorted(negative_words.items(), key=operator.itemgetter(1), reverse=True)
negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]
negative_sorted = negative_sorted[:49]
return negative_sorted
|
normal
|
{
"blob_id": "82f86284dddf48bf2c65ddf55eb6d7a372306373",
"index": 7182,
"step-1": "<mask token>\n\n\ndef positive_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n positive_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Positive']\n positivecv = CountVectorizer(analyzer=text_process)\n positive_fit = positivecv.fit_transform(positive_wordcloud['Reviews'])\n positive_word_list = positivecv.get_feature_names()\n positive_count_list = positive_fit.toarray().sum(axis=0)\n positive_words = dict(zip(positive_word_list, positive_count_list))\n positive_sorted = sorted(positive_words.items(), key=operator.\n itemgetter(1), reverse=True)\n positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]\n positive_sorted = positive_sorted[:49]\n return positive_sorted\n\n\ndef negative_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n negative_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Negative']\n negativecv = CountVectorizer(analyzer=text_process)\n negative_fit = negativecv.fit_transform(negative_wordcloud['Reviews'])\n negative_word_list = negativecv.get_feature_names()\n negative_count_list = negative_fit.toarray().sum(axis=0)\n negative_words = dict(zip(negative_word_list, negative_count_list))\n negative_sorted = sorted(negative_words.items(), key=operator.\n itemgetter(1), reverse=True)\n negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]\n negative_sorted = negative_sorted[:49]\n return negative_sorted\n",
"step-2": "<mask token>\n\n\ndef text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n\n\n<mask token>\n\n\ndef positive_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n positive_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Positive']\n positivecv = CountVectorizer(analyzer=text_process)\n positive_fit = positivecv.fit_transform(positive_wordcloud['Reviews'])\n positive_word_list = positivecv.get_feature_names()\n positive_count_list = positive_fit.toarray().sum(axis=0)\n positive_words = dict(zip(positive_word_list, positive_count_list))\n positive_sorted = sorted(positive_words.items(), key=operator.\n itemgetter(1), reverse=True)\n positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]\n positive_sorted = positive_sorted[:49]\n return positive_sorted\n\n\ndef negative_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n negative_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Negative']\n negativecv = CountVectorizer(analyzer=text_process)\n negative_fit = negativecv.fit_transform(negative_wordcloud['Reviews'])\n negative_word_list = negativecv.get_feature_names()\n negative_count_list = negative_fit.toarray().sum(axis=0)\n negative_words = dict(zip(negative_word_list, negative_count_list))\n negative_sorted = sorted(negative_words.items(), key=operator.\n itemgetter(1), reverse=True)\n negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]\n negative_sorted = negative_sorted[:49]\n return negative_sorted\n",
"step-3": "<mask token>\n\n\ndef text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n\n\ndef ml_predictor(web_scrapedf):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n loaded_model = pickle.load(open('ml_model/model.pickle', 'rb'))\n loaded_vectorizor = pickle.load(open('ml_model/vectorizer.pickle', 'rb'))\n for label, row in web_scrapedf.iterrows():\n text = row['Reviews']\n text_transform = loaded_vectorizor.transform([text])\n ml_prediction = loaded_model.predict(text_transform)[0]\n web_scrapedf.at[label, 'ml_predictions'] = ml_prediction\n scrape_results_df = web_scrapedf[['Reviews', 'ml_predictions']]\n return scrape_results_df\n\n\ndef positive_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n positive_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Positive']\n positivecv = CountVectorizer(analyzer=text_process)\n positive_fit = positivecv.fit_transform(positive_wordcloud['Reviews'])\n positive_word_list = positivecv.get_feature_names()\n positive_count_list = positive_fit.toarray().sum(axis=0)\n positive_words = dict(zip(positive_word_list, positive_count_list))\n positive_sorted = sorted(positive_words.items(), key=operator.\n itemgetter(1), reverse=True)\n positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]\n positive_sorted = positive_sorted[:49]\n return positive_sorted\n\n\ndef negative_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n negative_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Negative']\n negativecv = CountVectorizer(analyzer=text_process)\n negative_fit = negativecv.fit_transform(negative_wordcloud['Reviews'])\n negative_word_list = negativecv.get_feature_names()\n negative_count_list = negative_fit.toarray().sum(axis=0)\n negative_words = dict(zip(negative_word_list, negative_count_list))\n negative_sorted = sorted(negative_words.items(), key=operator.\n itemgetter(1), reverse=True)\n negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]\n negative_sorted = negative_sorted[:49]\n return negative_sorted\n",
"step-4": "import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport string\nimport operator\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport pickle\nimport nltk\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import word_tokenize\n\n\ndef text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n\n\ndef ml_predictor(web_scrapedf):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n loaded_model = pickle.load(open('ml_model/model.pickle', 'rb'))\n loaded_vectorizor = pickle.load(open('ml_model/vectorizer.pickle', 'rb'))\n for label, row in web_scrapedf.iterrows():\n text = row['Reviews']\n text_transform = loaded_vectorizor.transform([text])\n ml_prediction = loaded_model.predict(text_transform)[0]\n web_scrapedf.at[label, 'ml_predictions'] = ml_prediction\n scrape_results_df = web_scrapedf[['Reviews', 'ml_predictions']]\n return scrape_results_df\n\n\ndef positive_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n positive_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Positive']\n positivecv = CountVectorizer(analyzer=text_process)\n positive_fit = positivecv.fit_transform(positive_wordcloud['Reviews'])\n positive_word_list = positivecv.get_feature_names()\n positive_count_list = positive_fit.toarray().sum(axis=0)\n positive_words = dict(zip(positive_word_list, positive_count_list))\n positive_sorted = sorted(positive_words.items(), key=operator.\n itemgetter(1), reverse=True)\n positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]\n positive_sorted = positive_sorted[:49]\n return positive_sorted\n\n\ndef negative_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in\n stopwords.words('english')]\n negative_wordcloud = scrape_results_df[scrape_results_df[\n 'ml_predictions'] == 'Negative']\n negativecv = CountVectorizer(analyzer=text_process)\n negative_fit = negativecv.fit_transform(negative_wordcloud['Reviews'])\n negative_word_list = negativecv.get_feature_names()\n negative_count_list = negative_fit.toarray().sum(axis=0)\n negative_words = dict(zip(negative_word_list, negative_count_list))\n negative_sorted = sorted(negative_words.items(), key=operator.\n itemgetter(1), reverse=True)\n negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]\n negative_sorted = negative_sorted[:49]\n return negative_sorted\n",
"step-5": "#Import dependencies\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport string\nimport operator\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport pickle\nimport nltk\nfrom nltk.corpus import stopwords\n#nltk.download('stopwords')\nfrom nltk.tokenize import word_tokenize\n\n\ndef text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n\n\n#Looping through the web-scraped reviews to make predictions\ndef ml_predictor(web_scrapedf):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n\n #Loading the model\n loaded_model = pickle.load(open(\"ml_model/model.pickle\", 'rb'))\n \n #Loading the vectorizor\n loaded_vectorizor = pickle.load(open(\"ml_model/vectorizer.pickle\", 'rb'))\n \n\n #Creating predictions for each review\n for label, row in web_scrapedf.iterrows():\n text = row['Reviews']\n \n text_transform = loaded_vectorizor.transform([text])\n \n ml_prediction = loaded_model.predict(text_transform)[0]\n web_scrapedf.at[label, 'ml_predictions'] = ml_prediction\n\n #Filtering on columns we need \n scrape_results_df = web_scrapedf[['Reviews', 'ml_predictions']]\n\n return scrape_results_df\n\n#Function to create positive words for word cloud\ndef positive_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n\n #Creating list of positive words\n positive_wordcloud = scrape_results_df[scrape_results_df['ml_predictions'] == 'Positive']\n\n positivecv = CountVectorizer(analyzer=text_process) \n positive_fit=positivecv.fit_transform(positive_wordcloud['Reviews'])\n\n #creating key value dicitionary pair of counts\n positive_word_list = positivecv.get_feature_names(); \n positive_count_list = positive_fit.toarray().sum(axis=0) \n\n\n positive_words = dict(zip(positive_word_list, positive_count_list))\n positive_sorted = sorted(positive_words.items(), key=operator.itemgetter(1), reverse=True)\n positive_sorted = [(p[0], int(p[1])) for p in positive_sorted]\n positive_sorted = positive_sorted[:49]\n\n return positive_sorted\n\n#Function to create negative words for word cloud\ndef negative_words(scrape_results_df):\n\n def text_process(text):\n nopunc = [char for char in text if char not in string.punctuation]\n nopunc = ''.join(nopunc)\n return [word for word in nopunc.split() if word.lower() not in stopwords.words('english')]\n \n #Creating the list of negative words\n negative_wordcloud = scrape_results_df[scrape_results_df['ml_predictions'] == 'Negative']\n\n negativecv = CountVectorizer(analyzer=text_process) \n negative_fit=negativecv.fit_transform(negative_wordcloud['Reviews'])\n\n #creating key value dicitionary pair of counts\n negative_word_list = negativecv.get_feature_names(); \n negative_count_list = negative_fit.toarray().sum(axis=0) \n\n\n negative_words = dict(zip(negative_word_list, negative_count_list))\n negative_sorted = sorted(negative_words.items(), key=operator.itemgetter(1), reverse=True)\n negative_sorted = [(n[0], int(n[1])) for n in negative_sorted]\n negative_sorted = negative_sorted[:49]\n\n return negative_sorted\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# coding: utf-8
from flask import Blueprint, make_response, render_template, request
from flask_restful import Resource
from flask_security import login_required
from ..clients.service import list_clients
from ..roles.service import list_roles
from ...models import Client, Role
admin = Blueprint('admin', __name__, url_prefix='/passport/admin')
@admin.route('/', methods=['GET'])
@login_required
def index():
headers = {'Content-Type': 'text/html'}
return make_response(render_template(
'index.html'), headers)
@admin.route('/clients/<client_id>', methods=['GET'])
@admin.route('/clients/new', methods=['GET'])
@admin.route('/clients', methods=['GET'])
@login_required
def clients(client_id=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
clients = [Client()]
operation_type = 'new'
else:
clients = list_clients(client_id)
operation_type = 'list' if not client_id else 'edit'
return make_response(render_template(
'clients.html', clients=clients, operation_type=operation_type))
@admin.route('/roles/<role_id>', methods=['GET'])
@admin.route('/roles/new', methods=['GET'])
@admin.route('/roles', methods=['GET'])
@login_required
def roles(role_id=None, operation_type=None):
headers = {'Content-Type': 'text/html'}
if request.path[-4:] == '/new':
roles = [Role()]
operation_type = 'new'
if not operation_type:
roles = list_roles(role_id)
operation_type = 'list' if not role_id else 'edit'
return make_response(render_template(
'roles.html', roles=roles, operation_type=operation_type))
|
normal
|
{
"blob_id": "f5f1a4db33cea8421cb4236606dfb288efee7621",
"index": 2142,
"step-1": "<mask token>\n\n\[email protected]('/', methods=['GET'])\n@login_required\ndef index():\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template('index.html'), headers)\n\n\n<mask token>\n\n\[email protected]('/roles/<role_id>', methods=['GET'])\[email protected]('/roles/new', methods=['GET'])\[email protected]('/roles', methods=['GET'])\n@login_required\ndef roles(role_id=None, operation_type=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n roles = [Role()]\n operation_type = 'new'\n if not operation_type:\n roles = list_roles(role_id)\n operation_type = 'list' if not role_id else 'edit'\n return make_response(render_template('roles.html', roles=roles,\n operation_type=operation_type))\n",
"step-2": "<mask token>\n\n\[email protected]('/', methods=['GET'])\n@login_required\ndef index():\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template('index.html'), headers)\n\n\[email protected]('/clients/<client_id>', methods=['GET'])\[email protected]('/clients/new', methods=['GET'])\[email protected]('/clients', methods=['GET'])\n@login_required\ndef clients(client_id=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n clients = [Client()]\n operation_type = 'new'\n else:\n clients = list_clients(client_id)\n operation_type = 'list' if not client_id else 'edit'\n return make_response(render_template('clients.html', clients=clients,\n operation_type=operation_type))\n\n\[email protected]('/roles/<role_id>', methods=['GET'])\[email protected]('/roles/new', methods=['GET'])\[email protected]('/roles', methods=['GET'])\n@login_required\ndef roles(role_id=None, operation_type=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n roles = [Role()]\n operation_type = 'new'\n if not operation_type:\n roles = list_roles(role_id)\n operation_type = 'list' if not role_id else 'edit'\n return make_response(render_template('roles.html', roles=roles,\n operation_type=operation_type))\n",
"step-3": "<mask token>\nadmin = Blueprint('admin', __name__, url_prefix='/passport/admin')\n\n\[email protected]('/', methods=['GET'])\n@login_required\ndef index():\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template('index.html'), headers)\n\n\[email protected]('/clients/<client_id>', methods=['GET'])\[email protected]('/clients/new', methods=['GET'])\[email protected]('/clients', methods=['GET'])\n@login_required\ndef clients(client_id=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n clients = [Client()]\n operation_type = 'new'\n else:\n clients = list_clients(client_id)\n operation_type = 'list' if not client_id else 'edit'\n return make_response(render_template('clients.html', clients=clients,\n operation_type=operation_type))\n\n\[email protected]('/roles/<role_id>', methods=['GET'])\[email protected]('/roles/new', methods=['GET'])\[email protected]('/roles', methods=['GET'])\n@login_required\ndef roles(role_id=None, operation_type=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n roles = [Role()]\n operation_type = 'new'\n if not operation_type:\n roles = list_roles(role_id)\n operation_type = 'list' if not role_id else 'edit'\n return make_response(render_template('roles.html', roles=roles,\n operation_type=operation_type))\n",
"step-4": "from flask import Blueprint, make_response, render_template, request\nfrom flask_restful import Resource\nfrom flask_security import login_required\nfrom ..clients.service import list_clients\nfrom ..roles.service import list_roles\nfrom ...models import Client, Role\nadmin = Blueprint('admin', __name__, url_prefix='/passport/admin')\n\n\[email protected]('/', methods=['GET'])\n@login_required\ndef index():\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template('index.html'), headers)\n\n\[email protected]('/clients/<client_id>', methods=['GET'])\[email protected]('/clients/new', methods=['GET'])\[email protected]('/clients', methods=['GET'])\n@login_required\ndef clients(client_id=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n clients = [Client()]\n operation_type = 'new'\n else:\n clients = list_clients(client_id)\n operation_type = 'list' if not client_id else 'edit'\n return make_response(render_template('clients.html', clients=clients,\n operation_type=operation_type))\n\n\[email protected]('/roles/<role_id>', methods=['GET'])\[email protected]('/roles/new', methods=['GET'])\[email protected]('/roles', methods=['GET'])\n@login_required\ndef roles(role_id=None, operation_type=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n roles = [Role()]\n operation_type = 'new'\n if not operation_type:\n roles = list_roles(role_id)\n operation_type = 'list' if not role_id else 'edit'\n return make_response(render_template('roles.html', roles=roles,\n operation_type=operation_type))\n",
"step-5": "# coding: utf-8\nfrom flask import Blueprint, make_response, render_template, request\nfrom flask_restful import Resource\nfrom flask_security import login_required\n\nfrom ..clients.service import list_clients\nfrom ..roles.service import list_roles\nfrom ...models import Client, Role\n\n\nadmin = Blueprint('admin', __name__, url_prefix='/passport/admin')\n\n\[email protected]('/', methods=['GET'])\n@login_required\ndef index():\n headers = {'Content-Type': 'text/html'}\n return make_response(render_template(\n 'index.html'), headers)\n\n\[email protected]('/clients/<client_id>', methods=['GET'])\[email protected]('/clients/new', methods=['GET'])\[email protected]('/clients', methods=['GET'])\n@login_required\ndef clients(client_id=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n clients = [Client()]\n operation_type = 'new'\n else:\n clients = list_clients(client_id)\n operation_type = 'list' if not client_id else 'edit'\n\n return make_response(render_template(\n 'clients.html', clients=clients, operation_type=operation_type))\n\n\[email protected]('/roles/<role_id>', methods=['GET'])\[email protected]('/roles/new', methods=['GET'])\[email protected]('/roles', methods=['GET'])\n@login_required\ndef roles(role_id=None, operation_type=None):\n headers = {'Content-Type': 'text/html'}\n if request.path[-4:] == '/new':\n roles = [Role()]\n operation_type = 'new'\n if not operation_type:\n roles = list_roles(role_id)\n operation_type = 'list' if not role_id else 'edit'\n\n return make_response(render_template(\n 'roles.html', roles=roles, operation_type=operation_type))\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
import pandas as pd
from fbprophet import Prophet
import os
from utils.json_utils import read_json, write_json
from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.metrics import mean_absolute_error
root_dir = "/home/charan/Documents/workspaces/python_workspaces/Data/ADL_Project/"
final_df_path = os.path.join(root_dir, "final_data/311_Cases_master_with_desc_with_prediction.csv")
test_train_df = os.path.join(root_dir, "final_data/Data_with_no_desc.csv")
dept_category = read_json(os.path.join(root_dir, "dept/dept_category.json"))
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), np.array(y_pred)
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
value_dict = {}
# Python
final_df = pd.read_csv(final_df_path)
test_train_df = pd.read_csv(test_train_df)
test_train_df = test_train_df[test_train_df['CREATION YEAR'] > 2015]
train_split = 80
final_df['DAYS TO CLOSE'].fillna(0, inplace=True)
print(final_df['CREATION DATE'].isna().sum())
print(final_df['DAYS TO CLOSE'].isna().sum())
test_train_df['DAYS TO CLOSE'] = test_train_df['DAYS TO CLOSE'].apply(lambda x: str(x).replace(",", ""))
list_of_dataframes = []
for each_dept in sorted(list(dept_category.values())):
print(f' processing - {each_dept}')
each_test_train = test_train_df[test_train_df.DEPARTMENT == each_dept].reset_index()
each_dept_df = final_df[final_df.DEPARTMENT == each_dept].reset_index()
test_time_train = each_test_train[['CREATION DATE', 'DAYS TO CLOSE']]
each_df = each_dept_df[['CREATION DATE', 'DAYS TO CLOSE']]
each_df.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE': 'y'}, inplace=True)
test_time_train.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE': 'y'}, inplace=True)
# test_time_train.y.apply(lambda x: str(x).replace(",", ""))
test_time_train.y = test_time_train.y.astype('float64')
test_time_train.y.fillna(0, inplace=True)
train, test = train_test_split(test_time_train, test_size=0.2)
m = Prophet()
m.fit(train)
forecast = m.predict(test)
mae_value = mean_absolute_error(test['y'].values, forecast['yhat'].values)
mape_error = mean_absolute_percentage_error(test['y'].values, forecast['yhat'].values)
print(f'mean absolute error : {mae_value},MAPE {mape_error} , department {each_dept}')
metric_dict = {'MAE': mae_value, 'MAPE': mape_error}
value_dict[each_dept] = metric_dict
fig1 = m.plot(forecast)
fig1.savefig(each_dept + ".png")
whole_result = m.predict(each_df)
each_df['TIME_PRED'] = whole_result['yhat']
each_df['CASE ID'] = each_dept_df['CASE ID']
list_of_dataframes.append(each_df)
write_json(value_dict, "time_series_metrics.json")
final_pred = pd.concat(list_of_dataframes)
final_pred.to_csv("final_val.csv", header=True, index=False)
|
normal
|
{
"blob_id": "25dd7ea4a154e5693c65f8c42107224efee42516",
"index": 4533,
"step-1": "<mask token>\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n\n<mask token>\nfinal_df['DAYS TO CLOSE'].fillna(0, inplace=True)\nprint(final_df['CREATION DATE'].isna().sum())\nprint(final_df['DAYS TO CLOSE'].isna().sum())\n<mask token>\nfor each_dept in sorted(list(dept_category.values())):\n print(f' processing - {each_dept}')\n each_test_train = test_train_df[test_train_df.DEPARTMENT == each_dept\n ].reset_index()\n each_dept_df = final_df[final_df.DEPARTMENT == each_dept].reset_index()\n test_time_train = each_test_train[['CREATION DATE', 'DAYS TO CLOSE']]\n each_df = each_dept_df[['CREATION DATE', 'DAYS TO CLOSE']]\n each_df.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE': 'y'},\n inplace=True)\n test_time_train.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE':\n 'y'}, inplace=True)\n test_time_train.y = test_time_train.y.astype('float64')\n test_time_train.y.fillna(0, inplace=True)\n train, test = train_test_split(test_time_train, test_size=0.2)\n m = Prophet()\n m.fit(train)\n forecast = m.predict(test)\n mae_value = mean_absolute_error(test['y'].values, forecast['yhat'].values)\n mape_error = mean_absolute_percentage_error(test['y'].values, forecast[\n 'yhat'].values)\n print(\n f'mean absolute error : {mae_value},MAPE {mape_error} , department {each_dept}'\n )\n metric_dict = {'MAE': mae_value, 'MAPE': mape_error}\n value_dict[each_dept] = metric_dict\n fig1 = m.plot(forecast)\n fig1.savefig(each_dept + '.png')\n whole_result = m.predict(each_df)\n each_df['TIME_PRED'] = whole_result['yhat']\n each_df['CASE ID'] = each_dept_df['CASE ID']\n list_of_dataframes.append(each_df)\nwrite_json(value_dict, 'time_series_metrics.json')\n<mask token>\nfinal_pred.to_csv('final_val.csv', header=True, index=False)\n",
"step-3": "<mask token>\nroot_dir = (\n '/home/charan/Documents/workspaces/python_workspaces/Data/ADL_Project/')\nfinal_df_path = os.path.join(root_dir,\n 'final_data/311_Cases_master_with_desc_with_prediction.csv')\ntest_train_df = os.path.join(root_dir, 'final_data/Data_with_no_desc.csv')\ndept_category = read_json(os.path.join(root_dir, 'dept/dept_category.json'))\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n\nvalue_dict = {}\nfinal_df = pd.read_csv(final_df_path)\ntest_train_df = pd.read_csv(test_train_df)\ntest_train_df = test_train_df[test_train_df['CREATION YEAR'] > 2015]\ntrain_split = 80\nfinal_df['DAYS TO CLOSE'].fillna(0, inplace=True)\nprint(final_df['CREATION DATE'].isna().sum())\nprint(final_df['DAYS TO CLOSE'].isna().sum())\ntest_train_df['DAYS TO CLOSE'] = test_train_df['DAYS TO CLOSE'].apply(lambda\n x: str(x).replace(',', ''))\nlist_of_dataframes = []\nfor each_dept in sorted(list(dept_category.values())):\n print(f' processing - {each_dept}')\n each_test_train = test_train_df[test_train_df.DEPARTMENT == each_dept\n ].reset_index()\n each_dept_df = final_df[final_df.DEPARTMENT == each_dept].reset_index()\n test_time_train = each_test_train[['CREATION DATE', 'DAYS TO CLOSE']]\n each_df = each_dept_df[['CREATION DATE', 'DAYS TO CLOSE']]\n each_df.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE': 'y'},\n inplace=True)\n test_time_train.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE':\n 'y'}, inplace=True)\n test_time_train.y = test_time_train.y.astype('float64')\n test_time_train.y.fillna(0, inplace=True)\n train, test = train_test_split(test_time_train, test_size=0.2)\n m = Prophet()\n m.fit(train)\n forecast = m.predict(test)\n mae_value = mean_absolute_error(test['y'].values, forecast['yhat'].values)\n mape_error = mean_absolute_percentage_error(test['y'].values, forecast[\n 'yhat'].values)\n print(\n f'mean absolute error : {mae_value},MAPE {mape_error} , department {each_dept}'\n )\n metric_dict = {'MAE': mae_value, 'MAPE': mape_error}\n value_dict[each_dept] = metric_dict\n fig1 = m.plot(forecast)\n fig1.savefig(each_dept + '.png')\n whole_result = m.predict(each_df)\n each_df['TIME_PRED'] = whole_result['yhat']\n each_df['CASE ID'] = each_dept_df['CASE ID']\n list_of_dataframes.append(each_df)\nwrite_json(value_dict, 'time_series_metrics.json')\nfinal_pred = pd.concat(list_of_dataframes)\nfinal_pred.to_csv('final_val.csv', header=True, index=False)\n",
"step-4": "import pandas as pd\nfrom fbprophet import Prophet\nimport os\nfrom utils.json_utils import read_json, write_json\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom sklearn.metrics import mean_absolute_error\nroot_dir = (\n '/home/charan/Documents/workspaces/python_workspaces/Data/ADL_Project/')\nfinal_df_path = os.path.join(root_dir,\n 'final_data/311_Cases_master_with_desc_with_prediction.csv')\ntest_train_df = os.path.join(root_dir, 'final_data/Data_with_no_desc.csv')\ndept_category = read_json(os.path.join(root_dir, 'dept/dept_category.json'))\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n\nvalue_dict = {}\nfinal_df = pd.read_csv(final_df_path)\ntest_train_df = pd.read_csv(test_train_df)\ntest_train_df = test_train_df[test_train_df['CREATION YEAR'] > 2015]\ntrain_split = 80\nfinal_df['DAYS TO CLOSE'].fillna(0, inplace=True)\nprint(final_df['CREATION DATE'].isna().sum())\nprint(final_df['DAYS TO CLOSE'].isna().sum())\ntest_train_df['DAYS TO CLOSE'] = test_train_df['DAYS TO CLOSE'].apply(lambda\n x: str(x).replace(',', ''))\nlist_of_dataframes = []\nfor each_dept in sorted(list(dept_category.values())):\n print(f' processing - {each_dept}')\n each_test_train = test_train_df[test_train_df.DEPARTMENT == each_dept\n ].reset_index()\n each_dept_df = final_df[final_df.DEPARTMENT == each_dept].reset_index()\n test_time_train = each_test_train[['CREATION DATE', 'DAYS TO CLOSE']]\n each_df = each_dept_df[['CREATION DATE', 'DAYS TO CLOSE']]\n each_df.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE': 'y'},\n inplace=True)\n test_time_train.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE':\n 'y'}, inplace=True)\n test_time_train.y = test_time_train.y.astype('float64')\n test_time_train.y.fillna(0, inplace=True)\n train, test = train_test_split(test_time_train, test_size=0.2)\n m = Prophet()\n m.fit(train)\n forecast = m.predict(test)\n mae_value = mean_absolute_error(test['y'].values, forecast['yhat'].values)\n mape_error = mean_absolute_percentage_error(test['y'].values, forecast[\n 'yhat'].values)\n print(\n f'mean absolute error : {mae_value},MAPE {mape_error} , department {each_dept}'\n )\n metric_dict = {'MAE': mae_value, 'MAPE': mape_error}\n value_dict[each_dept] = metric_dict\n fig1 = m.plot(forecast)\n fig1.savefig(each_dept + '.png')\n whole_result = m.predict(each_df)\n each_df['TIME_PRED'] = whole_result['yhat']\n each_df['CASE ID'] = each_dept_df['CASE ID']\n list_of_dataframes.append(each_df)\nwrite_json(value_dict, 'time_series_metrics.json')\nfinal_pred = pd.concat(list_of_dataframes)\nfinal_pred.to_csv('final_val.csv', header=True, index=False)\n",
"step-5": "import pandas as pd\nfrom fbprophet import Prophet\nimport os\nfrom utils.json_utils import read_json, write_json\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\nfrom sklearn.metrics import mean_absolute_error\n\nroot_dir = \"/home/charan/Documents/workspaces/python_workspaces/Data/ADL_Project/\"\nfinal_df_path = os.path.join(root_dir, \"final_data/311_Cases_master_with_desc_with_prediction.csv\")\ntest_train_df = os.path.join(root_dir, \"final_data/Data_with_no_desc.csv\")\ndept_category = read_json(os.path.join(root_dir, \"dept/dept_category.json\"))\n\n\ndef mean_absolute_percentage_error(y_true, y_pred):\n y_true, y_pred = np.array(y_true), np.array(y_pred)\n return np.mean(np.abs((y_true - y_pred) / y_true)) * 100\n\n\nvalue_dict = {}\n# Python\nfinal_df = pd.read_csv(final_df_path)\ntest_train_df = pd.read_csv(test_train_df)\ntest_train_df = test_train_df[test_train_df['CREATION YEAR'] > 2015]\ntrain_split = 80\n\nfinal_df['DAYS TO CLOSE'].fillna(0, inplace=True)\nprint(final_df['CREATION DATE'].isna().sum())\nprint(final_df['DAYS TO CLOSE'].isna().sum())\n\ntest_train_df['DAYS TO CLOSE'] = test_train_df['DAYS TO CLOSE'].apply(lambda x: str(x).replace(\",\", \"\"))\n\nlist_of_dataframes = []\n\nfor each_dept in sorted(list(dept_category.values())):\n print(f' processing - {each_dept}')\n each_test_train = test_train_df[test_train_df.DEPARTMENT == each_dept].reset_index()\n each_dept_df = final_df[final_df.DEPARTMENT == each_dept].reset_index()\n\n test_time_train = each_test_train[['CREATION DATE', 'DAYS TO CLOSE']]\n each_df = each_dept_df[['CREATION DATE', 'DAYS TO CLOSE']]\n\n each_df.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE': 'y'}, inplace=True)\n test_time_train.rename(columns={'CREATION DATE': 'ds', 'DAYS TO CLOSE': 'y'}, inplace=True)\n # test_time_train.y.apply(lambda x: str(x).replace(\",\", \"\"))\n test_time_train.y = test_time_train.y.astype('float64')\n test_time_train.y.fillna(0, inplace=True)\n train, test = train_test_split(test_time_train, test_size=0.2)\n m = Prophet()\n m.fit(train)\n forecast = m.predict(test)\n mae_value = mean_absolute_error(test['y'].values, forecast['yhat'].values)\n mape_error = mean_absolute_percentage_error(test['y'].values, forecast['yhat'].values)\n print(f'mean absolute error : {mae_value},MAPE {mape_error} , department {each_dept}')\n metric_dict = {'MAE': mae_value, 'MAPE': mape_error}\n value_dict[each_dept] = metric_dict\n fig1 = m.plot(forecast)\n fig1.savefig(each_dept + \".png\")\n whole_result = m.predict(each_df)\n each_df['TIME_PRED'] = whole_result['yhat']\n each_df['CASE ID'] = each_dept_df['CASE ID']\n list_of_dataframes.append(each_df)\n\nwrite_json(value_dict, \"time_series_metrics.json\")\nfinal_pred = pd.concat(list_of_dataframes)\nfinal_pred.to_csv(\"final_val.csv\", header=True, index=False)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from typing import (Any, Callable, Dict, List, Optional, Set, Tuple, Type,
Union, overload)
from pccm.stubs import EnumClassValue, EnumValue
from cumm.tensorview import Tensor
class ConvMainUnitTest:
@staticmethod
def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor, padding: List[int], stride: List[int], dilation: List[int], ndim: int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int, o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int], num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[int], i_interleave: int = 1, w_interleave: int = 1, o_interleave: int = 1, alpha: float = 1, beta: float = 0, split_k_slices: int = 1, workspace: Tensor = Tensor(), mask_sparse: bool = False, increment_k_first: bool = False, mask: Tensor = Tensor(), mask_argsort: Tensor = Tensor(), indices: Tensor = Tensor(), mask_output: Tensor = Tensor()) -> None:
"""
Args:
input:
weight:
output:
padding:
stride:
dilation:
ndim:
iter_algo_:
op_type_:
i_ltype_:
w_ltype_:
o_ltype_:
ts:
wts:
num_stage:
dacc:
dcomp:
algo:
tensorop:
i_interleave:
w_interleave:
o_interleave:
alpha:
beta:
split_k_slices:
workspace:
mask_sparse:
increment_k_first:
mask:
mask_argsort:
indices:
mask_output:
"""
...
|
normal
|
{
"blob_id": "a6f3c51d4115a6e0d6f01aa75bf5e6e367840d43",
"index": 914,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ConvMainUnitTest:\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ConvMainUnitTest:\n\n @staticmethod\n def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor,\n padding: List[int], stride: List[int], dilation: List[int], ndim:\n int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int,\n o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int],\n num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[\n int], i_interleave: int=1, w_interleave: int=1, o_interleave: int=1,\n alpha: float=1, beta: float=0, split_k_slices: int=1, workspace:\n Tensor=Tensor(), mask_sparse: bool=False, increment_k_first: bool=\n False, mask: Tensor=Tensor(), mask_argsort: Tensor=Tensor(),\n indices: Tensor=Tensor(), mask_output: Tensor=Tensor()) ->None:\n \"\"\"\n Args:\n input: \n weight: \n output: \n padding: \n stride: \n dilation: \n ndim: \n iter_algo_: \n op_type_: \n i_ltype_: \n w_ltype_: \n o_ltype_: \n ts: \n wts: \n num_stage: \n dacc: \n dcomp: \n algo: \n tensorop: \n i_interleave: \n w_interleave: \n o_interleave: \n alpha: \n beta: \n split_k_slices: \n workspace: \n mask_sparse: \n increment_k_first: \n mask: \n mask_argsort: \n indices: \n mask_output: \n \"\"\"\n ...\n",
"step-4": "from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union, overload\nfrom pccm.stubs import EnumClassValue, EnumValue\nfrom cumm.tensorview import Tensor\n\n\nclass ConvMainUnitTest:\n\n @staticmethod\n def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor,\n padding: List[int], stride: List[int], dilation: List[int], ndim:\n int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int,\n o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int],\n num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[\n int], i_interleave: int=1, w_interleave: int=1, o_interleave: int=1,\n alpha: float=1, beta: float=0, split_k_slices: int=1, workspace:\n Tensor=Tensor(), mask_sparse: bool=False, increment_k_first: bool=\n False, mask: Tensor=Tensor(), mask_argsort: Tensor=Tensor(),\n indices: Tensor=Tensor(), mask_output: Tensor=Tensor()) ->None:\n \"\"\"\n Args:\n input: \n weight: \n output: \n padding: \n stride: \n dilation: \n ndim: \n iter_algo_: \n op_type_: \n i_ltype_: \n w_ltype_: \n o_ltype_: \n ts: \n wts: \n num_stage: \n dacc: \n dcomp: \n algo: \n tensorop: \n i_interleave: \n w_interleave: \n o_interleave: \n alpha: \n beta: \n split_k_slices: \n workspace: \n mask_sparse: \n increment_k_first: \n mask: \n mask_argsort: \n indices: \n mask_output: \n \"\"\"\n ...\n",
"step-5": "from typing import (Any, Callable, Dict, List, Optional, Set, Tuple, Type,\n Union, overload)\n\nfrom pccm.stubs import EnumClassValue, EnumValue\n\nfrom cumm.tensorview import Tensor\n\nclass ConvMainUnitTest:\n @staticmethod\n def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor, padding: List[int], stride: List[int], dilation: List[int], ndim: int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int, o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int], num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[int], i_interleave: int = 1, w_interleave: int = 1, o_interleave: int = 1, alpha: float = 1, beta: float = 0, split_k_slices: int = 1, workspace: Tensor = Tensor(), mask_sparse: bool = False, increment_k_first: bool = False, mask: Tensor = Tensor(), mask_argsort: Tensor = Tensor(), indices: Tensor = Tensor(), mask_output: Tensor = Tensor()) -> None: \n \"\"\"\n Args:\n input: \n weight: \n output: \n padding: \n stride: \n dilation: \n ndim: \n iter_algo_: \n op_type_: \n i_ltype_: \n w_ltype_: \n o_ltype_: \n ts: \n wts: \n num_stage: \n dacc: \n dcomp: \n algo: \n tensorop: \n i_interleave: \n w_interleave: \n o_interleave: \n alpha: \n beta: \n split_k_slices: \n workspace: \n mask_sparse: \n increment_k_first: \n mask: \n mask_argsort: \n indices: \n mask_output: \n \"\"\"\n ...",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack.container_infrastructure_management.v1 import (
cluster_certificate,
)
from openstack.tests.unit import base
coe_cluster_ca_obj = dict(
cluster_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c",
pem="-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----\n",
bay_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c",
links=[],
)
coe_cluster_signed_cert_obj = dict(
cluster_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c',
pem='-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----',
bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c',
links=[],
csr=(
'-----BEGIN CERTIFICATE REQUEST-----\nMIICfz=='
'\n-----END CERTIFICATE REQUEST-----\n'
),
)
class TestCOEClusters(base.TestCase):
def _compare_cluster_certs(self, exp, real):
self.assertDictEqual(
cluster_certificate.ClusterCertificate(**exp).to_dict(
computed=False
),
real.to_dict(computed=False),
)
def get_mock_url(
self,
service_type='container-infrastructure-management',
base_url_append=None,
append=None,
resource=None,
):
return super(TestCOEClusters, self).get_mock_url(
service_type=service_type,
resource=resource,
append=append,
base_url_append=base_url_append,
)
def test_get_coe_cluster_certificate(self):
self.register_uris(
[
dict(
method='GET',
uri=self.get_mock_url(
resource='certificates',
append=[coe_cluster_ca_obj['cluster_uuid']],
),
json=coe_cluster_ca_obj,
)
]
)
ca_cert = self.cloud.get_coe_cluster_certificate(
coe_cluster_ca_obj['cluster_uuid']
)
self._compare_cluster_certs(coe_cluster_ca_obj, ca_cert)
self.assert_calls()
def test_sign_coe_cluster_certificate(self):
self.register_uris(
[
dict(
method='POST',
uri=self.get_mock_url(resource='certificates'),
json={
"cluster_uuid": coe_cluster_signed_cert_obj[
'cluster_uuid'
],
"csr": coe_cluster_signed_cert_obj['csr'],
},
)
]
)
self.cloud.sign_coe_cluster_certificate(
coe_cluster_signed_cert_obj['cluster_uuid'],
coe_cluster_signed_cert_obj['csr'],
)
self.assert_calls()
|
normal
|
{
"blob_id": "2bf057621df3b860c8f677baf54673d2da8c2bd1",
"index": 5804,
"step-1": "<mask token>\n\n\nclass TestCOEClusters(base.TestCase):\n <mask token>\n\n def get_mock_url(self, service_type=\n 'container-infrastructure-management', base_url_append=None, append\n =None, resource=None):\n return super(TestCOEClusters, self).get_mock_url(service_type=\n service_type, resource=resource, append=append, base_url_append\n =base_url_append)\n <mask token>\n\n def test_sign_coe_cluster_certificate(self):\n self.register_uris([dict(method='POST', uri=self.get_mock_url(\n resource='certificates'), json={'cluster_uuid':\n coe_cluster_signed_cert_obj['cluster_uuid'], 'csr':\n coe_cluster_signed_cert_obj['csr']})])\n self.cloud.sign_coe_cluster_certificate(coe_cluster_signed_cert_obj\n ['cluster_uuid'], coe_cluster_signed_cert_obj['csr'])\n self.assert_calls()\n",
"step-2": "<mask token>\n\n\nclass TestCOEClusters(base.TestCase):\n\n def _compare_cluster_certs(self, exp, real):\n self.assertDictEqual(cluster_certificate.ClusterCertificate(**exp).\n to_dict(computed=False), real.to_dict(computed=False))\n\n def get_mock_url(self, service_type=\n 'container-infrastructure-management', base_url_append=None, append\n =None, resource=None):\n return super(TestCOEClusters, self).get_mock_url(service_type=\n service_type, resource=resource, append=append, base_url_append\n =base_url_append)\n\n def test_get_coe_cluster_certificate(self):\n self.register_uris([dict(method='GET', uri=self.get_mock_url(\n resource='certificates', append=[coe_cluster_ca_obj[\n 'cluster_uuid']]), json=coe_cluster_ca_obj)])\n ca_cert = self.cloud.get_coe_cluster_certificate(coe_cluster_ca_obj\n ['cluster_uuid'])\n self._compare_cluster_certs(coe_cluster_ca_obj, ca_cert)\n self.assert_calls()\n\n def test_sign_coe_cluster_certificate(self):\n self.register_uris([dict(method='POST', uri=self.get_mock_url(\n resource='certificates'), json={'cluster_uuid':\n coe_cluster_signed_cert_obj['cluster_uuid'], 'csr':\n coe_cluster_signed_cert_obj['csr']})])\n self.cloud.sign_coe_cluster_certificate(coe_cluster_signed_cert_obj\n ['cluster_uuid'], coe_cluster_signed_cert_obj['csr'])\n self.assert_calls()\n",
"step-3": "<mask token>\ncoe_cluster_ca_obj = dict(cluster_uuid=\n '43e305ce-3a5f-412a-8a14-087834c34c8c', pem=\n \"\"\"-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----\n\"\"\",\n bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c', links=[])\ncoe_cluster_signed_cert_obj = dict(cluster_uuid=\n '43e305ce-3a5f-412a-8a14-087834c34c8c', pem=\n \"\"\"-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----\"\"\",\n bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c', links=[], csr=\n \"\"\"-----BEGIN CERTIFICATE REQUEST-----\nMIICfz==\n-----END CERTIFICATE REQUEST-----\n\"\"\"\n )\n\n\nclass TestCOEClusters(base.TestCase):\n\n def _compare_cluster_certs(self, exp, real):\n self.assertDictEqual(cluster_certificate.ClusterCertificate(**exp).\n to_dict(computed=False), real.to_dict(computed=False))\n\n def get_mock_url(self, service_type=\n 'container-infrastructure-management', base_url_append=None, append\n =None, resource=None):\n return super(TestCOEClusters, self).get_mock_url(service_type=\n service_type, resource=resource, append=append, base_url_append\n =base_url_append)\n\n def test_get_coe_cluster_certificate(self):\n self.register_uris([dict(method='GET', uri=self.get_mock_url(\n resource='certificates', append=[coe_cluster_ca_obj[\n 'cluster_uuid']]), json=coe_cluster_ca_obj)])\n ca_cert = self.cloud.get_coe_cluster_certificate(coe_cluster_ca_obj\n ['cluster_uuid'])\n self._compare_cluster_certs(coe_cluster_ca_obj, ca_cert)\n self.assert_calls()\n\n def test_sign_coe_cluster_certificate(self):\n self.register_uris([dict(method='POST', uri=self.get_mock_url(\n resource='certificates'), json={'cluster_uuid':\n coe_cluster_signed_cert_obj['cluster_uuid'], 'csr':\n coe_cluster_signed_cert_obj['csr']})])\n self.cloud.sign_coe_cluster_certificate(coe_cluster_signed_cert_obj\n ['cluster_uuid'], coe_cluster_signed_cert_obj['csr'])\n self.assert_calls()\n",
"step-4": "from openstack.container_infrastructure_management.v1 import cluster_certificate\nfrom openstack.tests.unit import base\ncoe_cluster_ca_obj = dict(cluster_uuid=\n '43e305ce-3a5f-412a-8a14-087834c34c8c', pem=\n \"\"\"-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----\n\"\"\",\n bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c', links=[])\ncoe_cluster_signed_cert_obj = dict(cluster_uuid=\n '43e305ce-3a5f-412a-8a14-087834c34c8c', pem=\n \"\"\"-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----\"\"\",\n bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c', links=[], csr=\n \"\"\"-----BEGIN CERTIFICATE REQUEST-----\nMIICfz==\n-----END CERTIFICATE REQUEST-----\n\"\"\"\n )\n\n\nclass TestCOEClusters(base.TestCase):\n\n def _compare_cluster_certs(self, exp, real):\n self.assertDictEqual(cluster_certificate.ClusterCertificate(**exp).\n to_dict(computed=False), real.to_dict(computed=False))\n\n def get_mock_url(self, service_type=\n 'container-infrastructure-management', base_url_append=None, append\n =None, resource=None):\n return super(TestCOEClusters, self).get_mock_url(service_type=\n service_type, resource=resource, append=append, base_url_append\n =base_url_append)\n\n def test_get_coe_cluster_certificate(self):\n self.register_uris([dict(method='GET', uri=self.get_mock_url(\n resource='certificates', append=[coe_cluster_ca_obj[\n 'cluster_uuid']]), json=coe_cluster_ca_obj)])\n ca_cert = self.cloud.get_coe_cluster_certificate(coe_cluster_ca_obj\n ['cluster_uuid'])\n self._compare_cluster_certs(coe_cluster_ca_obj, ca_cert)\n self.assert_calls()\n\n def test_sign_coe_cluster_certificate(self):\n self.register_uris([dict(method='POST', uri=self.get_mock_url(\n resource='certificates'), json={'cluster_uuid':\n coe_cluster_signed_cert_obj['cluster_uuid'], 'csr':\n coe_cluster_signed_cert_obj['csr']})])\n self.cloud.sign_coe_cluster_certificate(coe_cluster_signed_cert_obj\n ['cluster_uuid'], coe_cluster_signed_cert_obj['csr'])\n self.assert_calls()\n",
"step-5": "# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\nfrom openstack.container_infrastructure_management.v1 import (\n cluster_certificate,\n)\nfrom openstack.tests.unit import base\n\ncoe_cluster_ca_obj = dict(\n cluster_uuid=\"43e305ce-3a5f-412a-8a14-087834c34c8c\",\n pem=\"-----BEGIN CERTIFICATE-----\\nMIIDAO\\n-----END CERTIFICATE-----\\n\",\n bay_uuid=\"43e305ce-3a5f-412a-8a14-087834c34c8c\",\n links=[],\n)\n\ncoe_cluster_signed_cert_obj = dict(\n cluster_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c',\n pem='-----BEGIN CERTIFICATE-----\\nMIIDAO\\n-----END CERTIFICATE-----',\n bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c',\n links=[],\n csr=(\n '-----BEGIN CERTIFICATE REQUEST-----\\nMIICfz=='\n '\\n-----END CERTIFICATE REQUEST-----\\n'\n ),\n)\n\n\nclass TestCOEClusters(base.TestCase):\n def _compare_cluster_certs(self, exp, real):\n self.assertDictEqual(\n cluster_certificate.ClusterCertificate(**exp).to_dict(\n computed=False\n ),\n real.to_dict(computed=False),\n )\n\n def get_mock_url(\n self,\n service_type='container-infrastructure-management',\n base_url_append=None,\n append=None,\n resource=None,\n ):\n return super(TestCOEClusters, self).get_mock_url(\n service_type=service_type,\n resource=resource,\n append=append,\n base_url_append=base_url_append,\n )\n\n def test_get_coe_cluster_certificate(self):\n self.register_uris(\n [\n dict(\n method='GET',\n uri=self.get_mock_url(\n resource='certificates',\n append=[coe_cluster_ca_obj['cluster_uuid']],\n ),\n json=coe_cluster_ca_obj,\n )\n ]\n )\n ca_cert = self.cloud.get_coe_cluster_certificate(\n coe_cluster_ca_obj['cluster_uuid']\n )\n self._compare_cluster_certs(coe_cluster_ca_obj, ca_cert)\n self.assert_calls()\n\n def test_sign_coe_cluster_certificate(self):\n self.register_uris(\n [\n dict(\n method='POST',\n uri=self.get_mock_url(resource='certificates'),\n json={\n \"cluster_uuid\": coe_cluster_signed_cert_obj[\n 'cluster_uuid'\n ],\n \"csr\": coe_cluster_signed_cert_obj['csr'],\n },\n )\n ]\n )\n self.cloud.sign_coe_cluster_certificate(\n coe_cluster_signed_cert_obj['cluster_uuid'],\n coe_cluster_signed_cert_obj['csr'],\n )\n self.assert_calls()\n",
"step-ids": [
3,
5,
6,
7,
8
]
}
|
[
3,
5,
6,
7,
8
] |
import cv2
import os
import re
class TestData:
def __init__(self, image_path= '../../data/test_images/'):
test_names = os.listdir(image_path)
self.images = []
self.numbers = []
self.treshold = .25
for name in test_names:
self.images.append(cv2.imread(image_path + name))
self.numbers.append(int(re.sub("[^0-9]", "", name)))
def get_test_data(self):
return self.images
|
normal
|
{
"blob_id": "122c4f3a2949ee675b7dd64b9f9828e80cbe5610",
"index": 1246,
"step-1": "<mask token>\n\n\nclass TestData:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TestData:\n <mask token>\n\n def get_test_data(self):\n return self.images\n",
"step-3": "<mask token>\n\n\nclass TestData:\n\n def __init__(self, image_path='../../data/test_images/'):\n test_names = os.listdir(image_path)\n self.images = []\n self.numbers = []\n self.treshold = 0.25\n for name in test_names:\n self.images.append(cv2.imread(image_path + name))\n self.numbers.append(int(re.sub('[^0-9]', '', name)))\n\n def get_test_data(self):\n return self.images\n",
"step-4": "import cv2\nimport os\nimport re\n\n\nclass TestData:\n\n def __init__(self, image_path='../../data/test_images/'):\n test_names = os.listdir(image_path)\n self.images = []\n self.numbers = []\n self.treshold = 0.25\n for name in test_names:\n self.images.append(cv2.imread(image_path + name))\n self.numbers.append(int(re.sub('[^0-9]', '', name)))\n\n def get_test_data(self):\n return self.images\n",
"step-5": "import cv2\nimport os\nimport re\n\n\nclass TestData:\n def __init__(self, image_path= '../../data/test_images/'):\n test_names = os.listdir(image_path)\n\n self.images = []\n self.numbers = []\n \n self.treshold = .25\n\n for name in test_names:\n self.images.append(cv2.imread(image_path + name))\n self.numbers.append(int(re.sub(\"[^0-9]\", \"\", name)))\n \n def get_test_data(self):\n return self.images\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import json
import os
import pickle
import random
import urllib.request
from pathlib import Path
import tensorflow as tf
from matplotlib import pyplot as plt
class CNN(object):
def __init__(self):
self.model = tf.keras.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 1)),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),
tf.keras.layers.MaxPool2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
self.last_training_history = {}
def print_model_info(self):
print(self.model.summary())
def get_model(self):
return self.model
def load_weights(self, filepath='model.h5'):
self.model.load_weights(filepath)
self.model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['acc']
)
def load_last_training_history(self, filepath='result.pk'):
with open(filepath, 'rb') as f:
self.last_training_history = pickle.load(f)
def get_last_training_history(self):
return self.last_training_history
def plot_last_training_history(self, save_plot=False):
for key in self.last_training_history:
y = self.last_training_history[key]
plt.plot([i + 1 for i in range(len(y))], y, label=key)
plt.legend()
plt.grid()
plt.xlabel('epoch')
if save_plot:
plt.savefig('training_history.png', dpi=300)
else:
plt.show()
def train(self, directory, epochs=100, save_model=False, save_history=False):
train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255,
rotation_range=20,
width_shift_range=0.15,
height_shift_range=0.15,
shear_range=0.15,
zoom_range=0.15,
fill_mode='nearest',
horizontal_flip=True,
vertical_flip=False,
brightness_range=None,
channel_shift_range=0
)
test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=32,
color_mode='grayscale',
class_mode='binary'
)
test_generator = test_datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=32,
color_mode='grayscale',
class_mode='binary'
)
self.model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['acc']
)
history = self.model.fit(
train_generator,
epochs=epochs,
validation_data=test_generator
)
if save_model:
self.model.save('model.h5')
if save_history:
with open('result.pk', 'wb') as f:
pickle.dump(history.history, f)
self.last_training_history = history.history
return history.history
def predict_directory(self, directory, probabilities=True):
if directory[-1] != '\\' and directory[-1] != '/':
directory += '/'
predictions = {}
onlyfiles = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]
for image_file in onlyfiles:
img = tf.keras.preprocessing.image.load_img(directory + image_file, target_size=(150, 150),
color_mode='grayscale')
x = tf.keras.preprocessing.image.img_to_array(img, )
x = x.reshape((1,) + x.shape)
x = x / 255
y = self.model.predict(x)[0][0]
if probabilities:
predictions[image_file] = y
else:
predictions[image_file] = y > 0.5
return predictions
def predict_single_image(self, file_url):
self.load_weights()
self.load_last_training_history()
file_name = "image.jpg"
urllib.request.urlretrieve(file_url, file_name)
img = tf.keras.preprocessing.image.load_img(file_name, target_size=(150, 150),
color_mode='grayscale')
x = tf.keras.preprocessing.image.img_to_array(img, )
x = x.reshape((1,) + x.shape)
x = x / 255
prediction = self.model.predict(x)[0][0]
is_default_image = prediction < 0.5
print(prediction)
os.remove(file_name)
return json.dumps(True) if is_default_image else json.dumps(False)
def evaluate_on_directory(self, directory):
val_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)
val_generator = val_datagen.flow_from_directory(
directory,
target_size=(150, 150),
batch_size=32,
color_mode='grayscale',
class_mode='binary'
)
return self.model.evaluate(val_generator)
def split_directory(directory, train_size=0.75, test_size=0.2, val_size=0.05):
assert train_size + test_size + val_size == 1
assert 0 <= train_size <= 1 and 0 <= test_size <= 1 and 0 <= val_size <= 1
subdirs = next(os.walk(directory))[1]
if train_size > 0:
os.mkdir(directory + '/train')
for subdir in subdirs:
os.mkdir(directory + '/train/' + subdir)
if test_size > 0:
os.mkdir(directory + '/test')
for subdir in subdirs:
os.mkdir(directory + '/test/' + subdir)
if val_size > 0:
os.mkdir(directory + '/val')
for subdir in subdirs:
os.mkdir(directory + '/val/' + subdir)
pathlist = Path(directory).rglob('*.*')
for path in pathlist:
instance_path = str(path)
instance_properties = instance_path.split('/') if '/' in instance_path else instance_path.split('\\')
instance_name = instance_properties[-1]
instance_class = instance_properties[-2]
r = random.random()
if r < val_size:
subfolder = '/val/'
elif r < test_size + val_size:
subfolder = '/test/'
else:
subfolder = '/train/'
os.rename(instance_path, '/'.join(instance_properties[:-2]) + subfolder + instance_class + '/' + instance_name)
if __name__ == '__main__':
cnn = CNN()
cnn.load_weights()
cnn.load_last_training_history()
cnn.print_model_info()
|
normal
|
{
"blob_id": "9535335c70129f997d7b8739444a503d0b984ac8",
"index": 9753,
"step-1": "<mask token>\n\n\nclass CNN(object):\n\n def __init__(self):\n self.model = tf.keras.Sequential([tf.keras.layers.Conv2D(32, (3, 3),\n activation='relu', input_shape=(150, 150, 1)), tf.keras.layers.\n MaxPool2D((2, 2)), tf.keras.layers.Conv2D(64, (3, 3),\n activation='relu'), tf.keras.layers.MaxPool2D(2, 2), tf.keras.\n layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.\n MaxPool2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation\n ='relu'), tf.keras.layers.MaxPool2D(2, 2), tf.keras.layers.\n Flatten(), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(\n 512, activation='relu'), tf.keras.layers.Dense(1, activation=\n 'sigmoid')])\n self.last_training_history = {}\n\n def print_model_info(self):\n print(self.model.summary())\n\n def get_model(self):\n return self.model\n\n def load_weights(self, filepath='model.h5'):\n self.model.load_weights(filepath)\n self.model.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['acc'])\n\n def load_last_training_history(self, filepath='result.pk'):\n with open(filepath, 'rb') as f:\n self.last_training_history = pickle.load(f)\n\n def get_last_training_history(self):\n return self.last_training_history\n\n def plot_last_training_history(self, save_plot=False):\n for key in self.last_training_history:\n y = self.last_training_history[key]\n plt.plot([(i + 1) for i in range(len(y))], y, label=key)\n plt.legend()\n plt.grid()\n plt.xlabel('epoch')\n if save_plot:\n plt.savefig('training_history.png', dpi=300)\n else:\n plt.show()\n\n def train(self, directory, epochs=100, save_model=False, save_history=False\n ):\n train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale\n =1.0 / 255, rotation_range=20, width_shift_range=0.15,\n height_shift_range=0.15, shear_range=0.15, zoom_range=0.15,\n fill_mode='nearest', horizontal_flip=True, vertical_flip=False,\n brightness_range=None, channel_shift_range=0)\n test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale\n =1.0 / 255)\n train_generator = train_datagen.flow_from_directory(directory,\n target_size=(150, 150), batch_size=32, color_mode='grayscale',\n class_mode='binary')\n test_generator = test_datagen.flow_from_directory(directory,\n target_size=(150, 150), batch_size=32, color_mode='grayscale',\n class_mode='binary')\n self.model.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['acc'])\n history = self.model.fit(train_generator, epochs=epochs,\n validation_data=test_generator)\n if save_model:\n self.model.save('model.h5')\n if save_history:\n with open('result.pk', 'wb') as f:\n pickle.dump(history.history, f)\n self.last_training_history = history.history\n return history.history\n\n def predict_directory(self, directory, probabilities=True):\n if directory[-1] != '\\\\' and directory[-1] != '/':\n directory += '/'\n predictions = {}\n onlyfiles = [f for f in os.listdir(directory) if os.path.isfile(os.\n path.join(directory, f))]\n for image_file in onlyfiles:\n img = tf.keras.preprocessing.image.load_img(directory +\n image_file, target_size=(150, 150), color_mode='grayscale')\n x = tf.keras.preprocessing.image.img_to_array(img)\n x = x.reshape((1,) + x.shape)\n x = x / 255\n y = self.model.predict(x)[0][0]\n if probabilities:\n predictions[image_file] = y\n else:\n predictions[image_file] = y > 0.5\n return predictions\n\n def predict_single_image(self, file_url):\n self.load_weights()\n self.load_last_training_history()\n file_name = 'image.jpg'\n urllib.request.urlretrieve(file_url, file_name)\n img = tf.keras.preprocessing.image.load_img(file_name, target_size=\n (150, 150), color_mode='grayscale')\n x = tf.keras.preprocessing.image.img_to_array(img)\n x = x.reshape((1,) + x.shape)\n x = x / 255\n prediction = self.model.predict(x)[0][0]\n is_default_image = prediction < 0.5\n print(prediction)\n os.remove(file_name)\n return json.dumps(True) if is_default_image else json.dumps(False)\n\n\n<mask token>\n\n\ndef split_directory(directory, train_size=0.75, test_size=0.2, val_size=0.05):\n assert train_size + test_size + val_size == 1\n assert 0 <= train_size <= 1 and 0 <= test_size <= 1 and 0 <= val_size <= 1\n subdirs = next(os.walk(directory))[1]\n if train_size > 0:\n os.mkdir(directory + '/train')\n for subdir in subdirs:\n os.mkdir(directory + '/train/' + subdir)\n if test_size > 0:\n os.mkdir(directory + '/test')\n for subdir in subdirs:\n os.mkdir(directory + '/test/' + subdir)\n if val_size > 0:\n os.mkdir(directory + '/val')\n for subdir in subdirs:\n os.mkdir(directory + '/val/' + subdir)\n pathlist = Path(directory).rglob('*.*')\n for path in pathlist:\n instance_path = str(path)\n instance_properties = instance_path.split('/'\n ) if '/' in instance_path else instance_path.split('\\\\')\n instance_name = instance_properties[-1]\n instance_class = instance_properties[-2]\n r = random.random()\n if r < val_size:\n subfolder = '/val/'\n elif r < test_size + val_size:\n subfolder = '/test/'\n else:\n subfolder = '/train/'\n os.rename(instance_path, '/'.join(instance_properties[:-2]) +\n subfolder + instance_class + '/' + instance_name)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CNN(object):\n\n def __init__(self):\n self.model = tf.keras.Sequential([tf.keras.layers.Conv2D(32, (3, 3),\n activation='relu', input_shape=(150, 150, 1)), tf.keras.layers.\n MaxPool2D((2, 2)), tf.keras.layers.Conv2D(64, (3, 3),\n activation='relu'), tf.keras.layers.MaxPool2D(2, 2), tf.keras.\n layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.\n MaxPool2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation\n ='relu'), tf.keras.layers.MaxPool2D(2, 2), tf.keras.layers.\n Flatten(), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(\n 512, activation='relu'), tf.keras.layers.Dense(1, activation=\n 'sigmoid')])\n self.last_training_history = {}\n\n def print_model_info(self):\n print(self.model.summary())\n\n def get_model(self):\n return self.model\n\n def load_weights(self, filepath='model.h5'):\n self.model.load_weights(filepath)\n self.model.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['acc'])\n\n def load_last_training_history(self, filepath='result.pk'):\n with open(filepath, 'rb') as f:\n self.last_training_history = pickle.load(f)\n\n def get_last_training_history(self):\n return self.last_training_history\n\n def plot_last_training_history(self, save_plot=False):\n for key in self.last_training_history:\n y = self.last_training_history[key]\n plt.plot([(i + 1) for i in range(len(y))], y, label=key)\n plt.legend()\n plt.grid()\n plt.xlabel('epoch')\n if save_plot:\n plt.savefig('training_history.png', dpi=300)\n else:\n plt.show()\n\n def train(self, directory, epochs=100, save_model=False, save_history=False\n ):\n train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale\n =1.0 / 255, rotation_range=20, width_shift_range=0.15,\n height_shift_range=0.15, shear_range=0.15, zoom_range=0.15,\n fill_mode='nearest', horizontal_flip=True, vertical_flip=False,\n brightness_range=None, channel_shift_range=0)\n test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale\n =1.0 / 255)\n train_generator = train_datagen.flow_from_directory(directory,\n target_size=(150, 150), batch_size=32, color_mode='grayscale',\n class_mode='binary')\n test_generator = test_datagen.flow_from_directory(directory,\n target_size=(150, 150), batch_size=32, color_mode='grayscale',\n class_mode='binary')\n self.model.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['acc'])\n history = self.model.fit(train_generator, epochs=epochs,\n validation_data=test_generator)\n if save_model:\n self.model.save('model.h5')\n if save_history:\n with open('result.pk', 'wb') as f:\n pickle.dump(history.history, f)\n self.last_training_history = history.history\n return history.history\n\n def predict_directory(self, directory, probabilities=True):\n if directory[-1] != '\\\\' and directory[-1] != '/':\n directory += '/'\n predictions = {}\n onlyfiles = [f for f in os.listdir(directory) if os.path.isfile(os.\n path.join(directory, f))]\n for image_file in onlyfiles:\n img = tf.keras.preprocessing.image.load_img(directory +\n image_file, target_size=(150, 150), color_mode='grayscale')\n x = tf.keras.preprocessing.image.img_to_array(img)\n x = x.reshape((1,) + x.shape)\n x = x / 255\n y = self.model.predict(x)[0][0]\n if probabilities:\n predictions[image_file] = y\n else:\n predictions[image_file] = y > 0.5\n return predictions\n\n def predict_single_image(self, file_url):\n self.load_weights()\n self.load_last_training_history()\n file_name = 'image.jpg'\n urllib.request.urlretrieve(file_url, file_name)\n img = tf.keras.preprocessing.image.load_img(file_name, target_size=\n (150, 150), color_mode='grayscale')\n x = tf.keras.preprocessing.image.img_to_array(img)\n x = x.reshape((1,) + x.shape)\n x = x / 255\n prediction = self.model.predict(x)[0][0]\n is_default_image = prediction < 0.5\n print(prediction)\n os.remove(file_name)\n return json.dumps(True) if is_default_image else json.dumps(False)\n\n\ndef evaluate_on_directory(self, directory):\n val_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=\n 1.0 / 255)\n val_generator = val_datagen.flow_from_directory(directory, target_size=\n (150, 150), batch_size=32, color_mode='grayscale', class_mode='binary')\n return self.model.evaluate(val_generator)\n\n\ndef split_directory(directory, train_size=0.75, test_size=0.2, val_size=0.05):\n assert train_size + test_size + val_size == 1\n assert 0 <= train_size <= 1 and 0 <= test_size <= 1 and 0 <= val_size <= 1\n subdirs = next(os.walk(directory))[1]\n if train_size > 0:\n os.mkdir(directory + '/train')\n for subdir in subdirs:\n os.mkdir(directory + '/train/' + subdir)\n if test_size > 0:\n os.mkdir(directory + '/test')\n for subdir in subdirs:\n os.mkdir(directory + '/test/' + subdir)\n if val_size > 0:\n os.mkdir(directory + '/val')\n for subdir in subdirs:\n os.mkdir(directory + '/val/' + subdir)\n pathlist = Path(directory).rglob('*.*')\n for path in pathlist:\n instance_path = str(path)\n instance_properties = instance_path.split('/'\n ) if '/' in instance_path else instance_path.split('\\\\')\n instance_name = instance_properties[-1]\n instance_class = instance_properties[-2]\n r = random.random()\n if r < val_size:\n subfolder = '/val/'\n elif r < test_size + val_size:\n subfolder = '/test/'\n else:\n subfolder = '/train/'\n os.rename(instance_path, '/'.join(instance_properties[:-2]) +\n subfolder + instance_class + '/' + instance_name)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CNN(object):\n\n def __init__(self):\n self.model = tf.keras.Sequential([tf.keras.layers.Conv2D(32, (3, 3),\n activation='relu', input_shape=(150, 150, 1)), tf.keras.layers.\n MaxPool2D((2, 2)), tf.keras.layers.Conv2D(64, (3, 3),\n activation='relu'), tf.keras.layers.MaxPool2D(2, 2), tf.keras.\n layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.\n MaxPool2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation\n ='relu'), tf.keras.layers.MaxPool2D(2, 2), tf.keras.layers.\n Flatten(), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(\n 512, activation='relu'), tf.keras.layers.Dense(1, activation=\n 'sigmoid')])\n self.last_training_history = {}\n\n def print_model_info(self):\n print(self.model.summary())\n\n def get_model(self):\n return self.model\n\n def load_weights(self, filepath='model.h5'):\n self.model.load_weights(filepath)\n self.model.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['acc'])\n\n def load_last_training_history(self, filepath='result.pk'):\n with open(filepath, 'rb') as f:\n self.last_training_history = pickle.load(f)\n\n def get_last_training_history(self):\n return self.last_training_history\n\n def plot_last_training_history(self, save_plot=False):\n for key in self.last_training_history:\n y = self.last_training_history[key]\n plt.plot([(i + 1) for i in range(len(y))], y, label=key)\n plt.legend()\n plt.grid()\n plt.xlabel('epoch')\n if save_plot:\n plt.savefig('training_history.png', dpi=300)\n else:\n plt.show()\n\n def train(self, directory, epochs=100, save_model=False, save_history=False\n ):\n train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale\n =1.0 / 255, rotation_range=20, width_shift_range=0.15,\n height_shift_range=0.15, shear_range=0.15, zoom_range=0.15,\n fill_mode='nearest', horizontal_flip=True, vertical_flip=False,\n brightness_range=None, channel_shift_range=0)\n test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale\n =1.0 / 255)\n train_generator = train_datagen.flow_from_directory(directory,\n target_size=(150, 150), batch_size=32, color_mode='grayscale',\n class_mode='binary')\n test_generator = test_datagen.flow_from_directory(directory,\n target_size=(150, 150), batch_size=32, color_mode='grayscale',\n class_mode='binary')\n self.model.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['acc'])\n history = self.model.fit(train_generator, epochs=epochs,\n validation_data=test_generator)\n if save_model:\n self.model.save('model.h5')\n if save_history:\n with open('result.pk', 'wb') as f:\n pickle.dump(history.history, f)\n self.last_training_history = history.history\n return history.history\n\n def predict_directory(self, directory, probabilities=True):\n if directory[-1] != '\\\\' and directory[-1] != '/':\n directory += '/'\n predictions = {}\n onlyfiles = [f for f in os.listdir(directory) if os.path.isfile(os.\n path.join(directory, f))]\n for image_file in onlyfiles:\n img = tf.keras.preprocessing.image.load_img(directory +\n image_file, target_size=(150, 150), color_mode='grayscale')\n x = tf.keras.preprocessing.image.img_to_array(img)\n x = x.reshape((1,) + x.shape)\n x = x / 255\n y = self.model.predict(x)[0][0]\n if probabilities:\n predictions[image_file] = y\n else:\n predictions[image_file] = y > 0.5\n return predictions\n\n def predict_single_image(self, file_url):\n self.load_weights()\n self.load_last_training_history()\n file_name = 'image.jpg'\n urllib.request.urlretrieve(file_url, file_name)\n img = tf.keras.preprocessing.image.load_img(file_name, target_size=\n (150, 150), color_mode='grayscale')\n x = tf.keras.preprocessing.image.img_to_array(img)\n x = x.reshape((1,) + x.shape)\n x = x / 255\n prediction = self.model.predict(x)[0][0]\n is_default_image = prediction < 0.5\n print(prediction)\n os.remove(file_name)\n return json.dumps(True) if is_default_image else json.dumps(False)\n\n\ndef evaluate_on_directory(self, directory):\n val_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=\n 1.0 / 255)\n val_generator = val_datagen.flow_from_directory(directory, target_size=\n (150, 150), batch_size=32, color_mode='grayscale', class_mode='binary')\n return self.model.evaluate(val_generator)\n\n\ndef split_directory(directory, train_size=0.75, test_size=0.2, val_size=0.05):\n assert train_size + test_size + val_size == 1\n assert 0 <= train_size <= 1 and 0 <= test_size <= 1 and 0 <= val_size <= 1\n subdirs = next(os.walk(directory))[1]\n if train_size > 0:\n os.mkdir(directory + '/train')\n for subdir in subdirs:\n os.mkdir(directory + '/train/' + subdir)\n if test_size > 0:\n os.mkdir(directory + '/test')\n for subdir in subdirs:\n os.mkdir(directory + '/test/' + subdir)\n if val_size > 0:\n os.mkdir(directory + '/val')\n for subdir in subdirs:\n os.mkdir(directory + '/val/' + subdir)\n pathlist = Path(directory).rglob('*.*')\n for path in pathlist:\n instance_path = str(path)\n instance_properties = instance_path.split('/'\n ) if '/' in instance_path else instance_path.split('\\\\')\n instance_name = instance_properties[-1]\n instance_class = instance_properties[-2]\n r = random.random()\n if r < val_size:\n subfolder = '/val/'\n elif r < test_size + val_size:\n subfolder = '/test/'\n else:\n subfolder = '/train/'\n os.rename(instance_path, '/'.join(instance_properties[:-2]) +\n subfolder + instance_class + '/' + instance_name)\n\n\nif __name__ == '__main__':\n cnn = CNN()\n cnn.load_weights()\n cnn.load_last_training_history()\n cnn.print_model_info()\n",
"step-4": "import json\nimport os\nimport pickle\nimport random\nimport urllib.request\nfrom pathlib import Path\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\n\n\nclass CNN(object):\n\n def __init__(self):\n self.model = tf.keras.Sequential([tf.keras.layers.Conv2D(32, (3, 3),\n activation='relu', input_shape=(150, 150, 1)), tf.keras.layers.\n MaxPool2D((2, 2)), tf.keras.layers.Conv2D(64, (3, 3),\n activation='relu'), tf.keras.layers.MaxPool2D(2, 2), tf.keras.\n layers.Conv2D(128, (3, 3), activation='relu'), tf.keras.layers.\n MaxPool2D(2, 2), tf.keras.layers.Conv2D(128, (3, 3), activation\n ='relu'), tf.keras.layers.MaxPool2D(2, 2), tf.keras.layers.\n Flatten(), tf.keras.layers.Dropout(0.5), tf.keras.layers.Dense(\n 512, activation='relu'), tf.keras.layers.Dense(1, activation=\n 'sigmoid')])\n self.last_training_history = {}\n\n def print_model_info(self):\n print(self.model.summary())\n\n def get_model(self):\n return self.model\n\n def load_weights(self, filepath='model.h5'):\n self.model.load_weights(filepath)\n self.model.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['acc'])\n\n def load_last_training_history(self, filepath='result.pk'):\n with open(filepath, 'rb') as f:\n self.last_training_history = pickle.load(f)\n\n def get_last_training_history(self):\n return self.last_training_history\n\n def plot_last_training_history(self, save_plot=False):\n for key in self.last_training_history:\n y = self.last_training_history[key]\n plt.plot([(i + 1) for i in range(len(y))], y, label=key)\n plt.legend()\n plt.grid()\n plt.xlabel('epoch')\n if save_plot:\n plt.savefig('training_history.png', dpi=300)\n else:\n plt.show()\n\n def train(self, directory, epochs=100, save_model=False, save_history=False\n ):\n train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale\n =1.0 / 255, rotation_range=20, width_shift_range=0.15,\n height_shift_range=0.15, shear_range=0.15, zoom_range=0.15,\n fill_mode='nearest', horizontal_flip=True, vertical_flip=False,\n brightness_range=None, channel_shift_range=0)\n test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale\n =1.0 / 255)\n train_generator = train_datagen.flow_from_directory(directory,\n target_size=(150, 150), batch_size=32, color_mode='grayscale',\n class_mode='binary')\n test_generator = test_datagen.flow_from_directory(directory,\n target_size=(150, 150), batch_size=32, color_mode='grayscale',\n class_mode='binary')\n self.model.compile(optimizer='adam', loss='binary_crossentropy',\n metrics=['acc'])\n history = self.model.fit(train_generator, epochs=epochs,\n validation_data=test_generator)\n if save_model:\n self.model.save('model.h5')\n if save_history:\n with open('result.pk', 'wb') as f:\n pickle.dump(history.history, f)\n self.last_training_history = history.history\n return history.history\n\n def predict_directory(self, directory, probabilities=True):\n if directory[-1] != '\\\\' and directory[-1] != '/':\n directory += '/'\n predictions = {}\n onlyfiles = [f for f in os.listdir(directory) if os.path.isfile(os.\n path.join(directory, f))]\n for image_file in onlyfiles:\n img = tf.keras.preprocessing.image.load_img(directory +\n image_file, target_size=(150, 150), color_mode='grayscale')\n x = tf.keras.preprocessing.image.img_to_array(img)\n x = x.reshape((1,) + x.shape)\n x = x / 255\n y = self.model.predict(x)[0][0]\n if probabilities:\n predictions[image_file] = y\n else:\n predictions[image_file] = y > 0.5\n return predictions\n\n def predict_single_image(self, file_url):\n self.load_weights()\n self.load_last_training_history()\n file_name = 'image.jpg'\n urllib.request.urlretrieve(file_url, file_name)\n img = tf.keras.preprocessing.image.load_img(file_name, target_size=\n (150, 150), color_mode='grayscale')\n x = tf.keras.preprocessing.image.img_to_array(img)\n x = x.reshape((1,) + x.shape)\n x = x / 255\n prediction = self.model.predict(x)[0][0]\n is_default_image = prediction < 0.5\n print(prediction)\n os.remove(file_name)\n return json.dumps(True) if is_default_image else json.dumps(False)\n\n\ndef evaluate_on_directory(self, directory):\n val_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=\n 1.0 / 255)\n val_generator = val_datagen.flow_from_directory(directory, target_size=\n (150, 150), batch_size=32, color_mode='grayscale', class_mode='binary')\n return self.model.evaluate(val_generator)\n\n\ndef split_directory(directory, train_size=0.75, test_size=0.2, val_size=0.05):\n assert train_size + test_size + val_size == 1\n assert 0 <= train_size <= 1 and 0 <= test_size <= 1 and 0 <= val_size <= 1\n subdirs = next(os.walk(directory))[1]\n if train_size > 0:\n os.mkdir(directory + '/train')\n for subdir in subdirs:\n os.mkdir(directory + '/train/' + subdir)\n if test_size > 0:\n os.mkdir(directory + '/test')\n for subdir in subdirs:\n os.mkdir(directory + '/test/' + subdir)\n if val_size > 0:\n os.mkdir(directory + '/val')\n for subdir in subdirs:\n os.mkdir(directory + '/val/' + subdir)\n pathlist = Path(directory).rglob('*.*')\n for path in pathlist:\n instance_path = str(path)\n instance_properties = instance_path.split('/'\n ) if '/' in instance_path else instance_path.split('\\\\')\n instance_name = instance_properties[-1]\n instance_class = instance_properties[-2]\n r = random.random()\n if r < val_size:\n subfolder = '/val/'\n elif r < test_size + val_size:\n subfolder = '/test/'\n else:\n subfolder = '/train/'\n os.rename(instance_path, '/'.join(instance_properties[:-2]) +\n subfolder + instance_class + '/' + instance_name)\n\n\nif __name__ == '__main__':\n cnn = CNN()\n cnn.load_weights()\n cnn.load_last_training_history()\n cnn.print_model_info()\n",
"step-5": "import json\nimport os\nimport pickle\nimport random\nimport urllib.request\nfrom pathlib import Path\n\nimport tensorflow as tf\nfrom matplotlib import pyplot as plt\n\n\nclass CNN(object):\n\n def __init__(self):\n self.model = tf.keras.Sequential([\n tf.keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 1)),\n tf.keras.layers.MaxPool2D((2, 2)),\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\n tf.keras.layers.MaxPool2D(2, 2),\n tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),\n tf.keras.layers.MaxPool2D(2, 2),\n tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),\n tf.keras.layers.MaxPool2D(2, 2),\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.5),\n tf.keras.layers.Dense(512, activation='relu'),\n tf.keras.layers.Dense(1, activation='sigmoid')\n ])\n self.last_training_history = {}\n\n def print_model_info(self):\n print(self.model.summary())\n\n def get_model(self):\n return self.model\n\n def load_weights(self, filepath='model.h5'):\n self.model.load_weights(filepath)\n self.model.compile(\n optimizer='adam',\n loss='binary_crossentropy',\n metrics=['acc']\n )\n\n def load_last_training_history(self, filepath='result.pk'):\n with open(filepath, 'rb') as f:\n self.last_training_history = pickle.load(f)\n\n def get_last_training_history(self):\n return self.last_training_history\n\n def plot_last_training_history(self, save_plot=False):\n for key in self.last_training_history:\n y = self.last_training_history[key]\n plt.plot([i + 1 for i in range(len(y))], y, label=key)\n plt.legend()\n plt.grid()\n plt.xlabel('epoch')\n if save_plot:\n plt.savefig('training_history.png', dpi=300)\n else:\n plt.show()\n\n def train(self, directory, epochs=100, save_model=False, save_history=False):\n train_datagen = tf.keras.preprocessing.image.ImageDataGenerator(\n rescale=1. / 255,\n rotation_range=20,\n width_shift_range=0.15,\n height_shift_range=0.15,\n shear_range=0.15,\n zoom_range=0.15,\n fill_mode='nearest',\n horizontal_flip=True,\n vertical_flip=False,\n brightness_range=None,\n channel_shift_range=0\n )\n\n test_datagen = tf.keras.preprocessing.image.ImageDataGenerator(\n rescale=1. / 255) \n\n train_generator = train_datagen.flow_from_directory(\n directory,\n target_size=(150, 150),\n batch_size=32,\n color_mode='grayscale',\n class_mode='binary'\n )\n\n test_generator = test_datagen.flow_from_directory(\n directory,\n target_size=(150, 150),\n batch_size=32,\n color_mode='grayscale',\n class_mode='binary'\n )\n\n self.model.compile(\n optimizer='adam',\n loss='binary_crossentropy',\n metrics=['acc']\n )\n\n history = self.model.fit(\n train_generator,\n epochs=epochs,\n validation_data=test_generator\n )\n\n if save_model:\n self.model.save('model.h5')\n\n if save_history:\n with open('result.pk', 'wb') as f:\n pickle.dump(history.history, f)\n\n self.last_training_history = history.history\n\n return history.history\n\n def predict_directory(self, directory, probabilities=True):\n if directory[-1] != '\\\\' and directory[-1] != '/':\n directory += '/'\n predictions = {}\n onlyfiles = [f for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f))]\n for image_file in onlyfiles:\n img = tf.keras.preprocessing.image.load_img(directory + image_file, target_size=(150, 150),\n color_mode='grayscale')\n x = tf.keras.preprocessing.image.img_to_array(img, )\n x = x.reshape((1,) + x.shape)\n x = x / 255\n y = self.model.predict(x)[0][0]\n if probabilities:\n predictions[image_file] = y\n else:\n predictions[image_file] = y > 0.5\n return predictions\n\n def predict_single_image(self, file_url):\n self.load_weights()\n self.load_last_training_history()\n file_name = \"image.jpg\"\n urllib.request.urlretrieve(file_url, file_name)\n img = tf.keras.preprocessing.image.load_img(file_name, target_size=(150, 150),\n color_mode='grayscale')\n x = tf.keras.preprocessing.image.img_to_array(img, )\n x = x.reshape((1,) + x.shape)\n x = x / 255\n prediction = self.model.predict(x)[0][0]\n is_default_image = prediction < 0.5\n print(prediction)\n os.remove(file_name)\n\n return json.dumps(True) if is_default_image else json.dumps(False)\n\n\ndef evaluate_on_directory(self, directory):\n val_datagen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)\n val_generator = val_datagen.flow_from_directory(\n directory,\n target_size=(150, 150),\n batch_size=32,\n color_mode='grayscale',\n class_mode='binary'\n )\n return self.model.evaluate(val_generator)\n\n\ndef split_directory(directory, train_size=0.75, test_size=0.2, val_size=0.05):\n assert train_size + test_size + val_size == 1\n assert 0 <= train_size <= 1 and 0 <= test_size <= 1 and 0 <= val_size <= 1\n subdirs = next(os.walk(directory))[1]\n if train_size > 0:\n os.mkdir(directory + '/train')\n for subdir in subdirs:\n os.mkdir(directory + '/train/' + subdir)\n if test_size > 0:\n os.mkdir(directory + '/test')\n for subdir in subdirs:\n os.mkdir(directory + '/test/' + subdir)\n if val_size > 0:\n os.mkdir(directory + '/val')\n for subdir in subdirs:\n os.mkdir(directory + '/val/' + subdir)\n pathlist = Path(directory).rglob('*.*')\n for path in pathlist:\n instance_path = str(path)\n instance_properties = instance_path.split('/') if '/' in instance_path else instance_path.split('\\\\')\n instance_name = instance_properties[-1]\n instance_class = instance_properties[-2]\n r = random.random()\n if r < val_size:\n subfolder = '/val/'\n elif r < test_size + val_size:\n subfolder = '/test/'\n else:\n subfolder = '/train/'\n os.rename(instance_path, '/'.join(instance_properties[:-2]) + subfolder + instance_class + '/' + instance_name)\n\n\nif __name__ == '__main__':\n\n cnn = CNN()\n cnn.load_weights()\n cnn.load_last_training_history()\n cnn.print_model_info()\n",
"step-ids": [
12,
13,
14,
15,
16
]
}
|
[
12,
13,
14,
15,
16
] |
import torch
import argparse
from DialogGenerator import DialogGenerator
from DialogDataset import DialogDataset
from DialogDiscriminator import DialogDiscriminator
from transformers import GPT2Tokenizer
import os
def prep_folder(args):
""" Append to slash to filepath if needed, and generate folder if it doesn't exist"""
if(args.save_folder[-1]!='/'):
args.save_folder += '/'
if(not os.path.isdir(args.save_folder)):
os.mkdir(args.save_folder)
if(__name__=="__main__"):
parser = argparse.ArgumentParser()
parser.add_argument('--epochs', type=int, default=3, dest="epochs", help='Number of epochs to run')
parser.add_argument('--batch-size', type=int, default=50, dest="batch_size", help='Batch size')
parser.add_argument('--max-out-length', type=int, default=128, dest="max_out_length", help='Maximum output length (outputs truncated if longer)')
parser.add_argument('--adversarial-model', type=str, default=None, dest="adv_model", help='Type of adversarial model to use. Will use traditional teacher forcing if None.')
parser.add_argument('--train-disc-only-steps', type=int, default=0, dest="train_disc_only_steps", help='Number of steps for which to train discriminator only (without updating generator)')
parser.add_argument('--gen_weight_decay', type=float, default=0, dest="gen_weight_decay", help='Weight decay for the generator\'s training scheduler')
parser.add_argument('--gen_lr', type=float, default=2e-5, dest="gen_lr", help='Learning rate for generator')
parser.add_argument('--gen_epsilon', type=float, default=1e-8, dest="gen_epsilon", help='Epsilon parameter for generator optimizer')
parser.add_argument('--gen_warmup_steps', type=int, default=0, dest="gen_warmup_steps", help='Number of warmup steps for training generator')
parser.add_argument('--disc_weight_decay', type=float, default=0, dest="disc_weight_decay", help='Weight decay for the discriminator\'s training scheduler')
parser.add_argument('--disc_lr', type=float, default=2e-5, dest="disc_lr", help='Learning rate for discriminator')
parser.add_argument('--disc_epsilon', type=float, default=1e-8, dest="disc_epsilon", help='Epsilon parameter for discriminator optimizer')
parser.add_argument('--disc_warmup_steps', type=int, default=0, dest="disc_warmup_steps", help='Number of warmup steps for training discriminator')
parser.add_argument('--train-data-path', type=str, dest="train_data_path", help="Filepath to preprocessed data")
parser.add_argument('--save-folder', type=str, dest="save_folder", help="Filepath to folder where checkpoints should be saved")
parser.add_argument('--pretrained-gen', type=str, default=None, dest="pretrained_gen", help="Filepath to trained generator. If None, will instantiate a default pretrained generator.")
parser.add_argument('--pretrained-disc', type=str, default=None, dest="pretrained_disc", help="Filepath to trained discriminator. If None, will instantiate a default pretrained discriminator of type specified by --adversarial-model option.")
args = parser.parse_args()
assert args.train_data_path is not None
assert args.save_folder is not None
prep_folder(args)
eos_token_id = GPT2Tokenizer.from_pretrained("gpt2").eos_token_id
train_dataset = DialogDataset(args.train_data_path, eos_token_id)
train_loader = train_dataset.get_loader(args.batch_size, shuffle=True)
gen_opt_params = {"weight_decay": args.gen_weight_decay,
"lr": args.gen_lr,
"warmup_steps": args.gen_warmup_steps,
"epsilon": args.gen_epsilon,
"total_steps": int(len(train_dataset) / args.batch_size) * args.epochs }
generator = DialogGenerator(args.pretrained_gen, args.save_folder, gen_opt_params)
if(args.adv_model is not None):
disc_opt_params = {"weight_decay": args.disc_weight_decay,
"lr": args.disc_lr,
"warmup_steps": args.disc_warmup_steps,
"epsilon": args.disc_epsilon,
"total_steps": int(len(train_dataset) / args.batch_size) * args.epochs }
discriminator = DialogDiscriminator(args.adv_model, args.pretrained_disc, args.save_folder, disc_opt_params)
generator.train_adversarial(train_loader, args.epochs, args.max_out_length, discriminator, args.train_disc_only_steps)
else:
generator.train_traditional(train_loader, args.epochs, args.max_out_length)
|
normal
|
{
"blob_id": "18be97061c65185fcebf10c628e0e51bb08522cf",
"index": 3609,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef prep_folder(args):\n \"\"\" Append to slash to filepath if needed, and generate folder if it doesn't exist\"\"\"\n if args.save_folder[-1] != '/':\n args.save_folder += '/'\n if not os.path.isdir(args.save_folder):\n os.mkdir(args.save_folder)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef prep_folder(args):\n \"\"\" Append to slash to filepath if needed, and generate folder if it doesn't exist\"\"\"\n if args.save_folder[-1] != '/':\n args.save_folder += '/'\n if not os.path.isdir(args.save_folder):\n os.mkdir(args.save_folder)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=3, dest='epochs',\n help='Number of epochs to run')\n parser.add_argument('--batch-size', type=int, default=50, dest=\n 'batch_size', help='Batch size')\n parser.add_argument('--max-out-length', type=int, default=128, dest=\n 'max_out_length', help=\n 'Maximum output length (outputs truncated if longer)')\n parser.add_argument('--adversarial-model', type=str, default=None, dest\n ='adv_model', help=\n 'Type of adversarial model to use. Will use traditional teacher forcing if None.'\n )\n parser.add_argument('--train-disc-only-steps', type=int, default=0,\n dest='train_disc_only_steps', help=\n 'Number of steps for which to train discriminator only (without updating generator)'\n )\n parser.add_argument('--gen_weight_decay', type=float, default=0, dest=\n 'gen_weight_decay', help=\n \"Weight decay for the generator's training scheduler\")\n parser.add_argument('--gen_lr', type=float, default=2e-05, dest=\n 'gen_lr', help='Learning rate for generator')\n parser.add_argument('--gen_epsilon', type=float, default=1e-08, dest=\n 'gen_epsilon', help='Epsilon parameter for generator optimizer')\n parser.add_argument('--gen_warmup_steps', type=int, default=0, dest=\n 'gen_warmup_steps', help=\n 'Number of warmup steps for training generator')\n parser.add_argument('--disc_weight_decay', type=float, default=0, dest=\n 'disc_weight_decay', help=\n \"Weight decay for the discriminator's training scheduler\")\n parser.add_argument('--disc_lr', type=float, default=2e-05, dest=\n 'disc_lr', help='Learning rate for discriminator')\n parser.add_argument('--disc_epsilon', type=float, default=1e-08, dest=\n 'disc_epsilon', help='Epsilon parameter for discriminator optimizer')\n parser.add_argument('--disc_warmup_steps', type=int, default=0, dest=\n 'disc_warmup_steps', help=\n 'Number of warmup steps for training discriminator')\n parser.add_argument('--train-data-path', type=str, dest=\n 'train_data_path', help='Filepath to preprocessed data')\n parser.add_argument('--save-folder', type=str, dest='save_folder', help\n ='Filepath to folder where checkpoints should be saved')\n parser.add_argument('--pretrained-gen', type=str, default=None, dest=\n 'pretrained_gen', help=\n 'Filepath to trained generator. If None, will instantiate a default pretrained generator.'\n )\n parser.add_argument('--pretrained-disc', type=str, default=None, dest=\n 'pretrained_disc', help=\n 'Filepath to trained discriminator. If None, will instantiate a default pretrained discriminator of type specified by --adversarial-model option.'\n )\n args = parser.parse_args()\n assert args.train_data_path is not None\n assert args.save_folder is not None\n prep_folder(args)\n eos_token_id = GPT2Tokenizer.from_pretrained('gpt2').eos_token_id\n train_dataset = DialogDataset(args.train_data_path, eos_token_id)\n train_loader = train_dataset.get_loader(args.batch_size, shuffle=True)\n gen_opt_params = {'weight_decay': args.gen_weight_decay, 'lr': args.\n gen_lr, 'warmup_steps': args.gen_warmup_steps, 'epsilon': args.\n gen_epsilon, 'total_steps': int(len(train_dataset) / args.\n batch_size) * args.epochs}\n generator = DialogGenerator(args.pretrained_gen, args.save_folder,\n gen_opt_params)\n if args.adv_model is not None:\n disc_opt_params = {'weight_decay': args.disc_weight_decay, 'lr':\n args.disc_lr, 'warmup_steps': args.disc_warmup_steps, 'epsilon':\n args.disc_epsilon, 'total_steps': int(len(train_dataset) / args\n .batch_size) * args.epochs}\n discriminator = DialogDiscriminator(args.adv_model, args.\n pretrained_disc, args.save_folder, disc_opt_params)\n generator.train_adversarial(train_loader, args.epochs, args.\n max_out_length, discriminator, args.train_disc_only_steps)\n else:\n generator.train_traditional(train_loader, args.epochs, args.\n max_out_length)\n",
"step-4": "import torch\nimport argparse\nfrom DialogGenerator import DialogGenerator\nfrom DialogDataset import DialogDataset\nfrom DialogDiscriminator import DialogDiscriminator\nfrom transformers import GPT2Tokenizer\nimport os\n\n\ndef prep_folder(args):\n \"\"\" Append to slash to filepath if needed, and generate folder if it doesn't exist\"\"\"\n if args.save_folder[-1] != '/':\n args.save_folder += '/'\n if not os.path.isdir(args.save_folder):\n os.mkdir(args.save_folder)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=3, dest='epochs',\n help='Number of epochs to run')\n parser.add_argument('--batch-size', type=int, default=50, dest=\n 'batch_size', help='Batch size')\n parser.add_argument('--max-out-length', type=int, default=128, dest=\n 'max_out_length', help=\n 'Maximum output length (outputs truncated if longer)')\n parser.add_argument('--adversarial-model', type=str, default=None, dest\n ='adv_model', help=\n 'Type of adversarial model to use. Will use traditional teacher forcing if None.'\n )\n parser.add_argument('--train-disc-only-steps', type=int, default=0,\n dest='train_disc_only_steps', help=\n 'Number of steps for which to train discriminator only (without updating generator)'\n )\n parser.add_argument('--gen_weight_decay', type=float, default=0, dest=\n 'gen_weight_decay', help=\n \"Weight decay for the generator's training scheduler\")\n parser.add_argument('--gen_lr', type=float, default=2e-05, dest=\n 'gen_lr', help='Learning rate for generator')\n parser.add_argument('--gen_epsilon', type=float, default=1e-08, dest=\n 'gen_epsilon', help='Epsilon parameter for generator optimizer')\n parser.add_argument('--gen_warmup_steps', type=int, default=0, dest=\n 'gen_warmup_steps', help=\n 'Number of warmup steps for training generator')\n parser.add_argument('--disc_weight_decay', type=float, default=0, dest=\n 'disc_weight_decay', help=\n \"Weight decay for the discriminator's training scheduler\")\n parser.add_argument('--disc_lr', type=float, default=2e-05, dest=\n 'disc_lr', help='Learning rate for discriminator')\n parser.add_argument('--disc_epsilon', type=float, default=1e-08, dest=\n 'disc_epsilon', help='Epsilon parameter for discriminator optimizer')\n parser.add_argument('--disc_warmup_steps', type=int, default=0, dest=\n 'disc_warmup_steps', help=\n 'Number of warmup steps for training discriminator')\n parser.add_argument('--train-data-path', type=str, dest=\n 'train_data_path', help='Filepath to preprocessed data')\n parser.add_argument('--save-folder', type=str, dest='save_folder', help\n ='Filepath to folder where checkpoints should be saved')\n parser.add_argument('--pretrained-gen', type=str, default=None, dest=\n 'pretrained_gen', help=\n 'Filepath to trained generator. If None, will instantiate a default pretrained generator.'\n )\n parser.add_argument('--pretrained-disc', type=str, default=None, dest=\n 'pretrained_disc', help=\n 'Filepath to trained discriminator. If None, will instantiate a default pretrained discriminator of type specified by --adversarial-model option.'\n )\n args = parser.parse_args()\n assert args.train_data_path is not None\n assert args.save_folder is not None\n prep_folder(args)\n eos_token_id = GPT2Tokenizer.from_pretrained('gpt2').eos_token_id\n train_dataset = DialogDataset(args.train_data_path, eos_token_id)\n train_loader = train_dataset.get_loader(args.batch_size, shuffle=True)\n gen_opt_params = {'weight_decay': args.gen_weight_decay, 'lr': args.\n gen_lr, 'warmup_steps': args.gen_warmup_steps, 'epsilon': args.\n gen_epsilon, 'total_steps': int(len(train_dataset) / args.\n batch_size) * args.epochs}\n generator = DialogGenerator(args.pretrained_gen, args.save_folder,\n gen_opt_params)\n if args.adv_model is not None:\n disc_opt_params = {'weight_decay': args.disc_weight_decay, 'lr':\n args.disc_lr, 'warmup_steps': args.disc_warmup_steps, 'epsilon':\n args.disc_epsilon, 'total_steps': int(len(train_dataset) / args\n .batch_size) * args.epochs}\n discriminator = DialogDiscriminator(args.adv_model, args.\n pretrained_disc, args.save_folder, disc_opt_params)\n generator.train_adversarial(train_loader, args.epochs, args.\n max_out_length, discriminator, args.train_disc_only_steps)\n else:\n generator.train_traditional(train_loader, args.epochs, args.\n max_out_length)\n",
"step-5": "import torch\nimport argparse\nfrom DialogGenerator import DialogGenerator\nfrom DialogDataset import DialogDataset\nfrom DialogDiscriminator import DialogDiscriminator\nfrom transformers import GPT2Tokenizer\nimport os\n\ndef prep_folder(args):\n \"\"\" Append to slash to filepath if needed, and generate folder if it doesn't exist\"\"\"\n if(args.save_folder[-1]!='/'):\n args.save_folder += '/'\n if(not os.path.isdir(args.save_folder)):\n os.mkdir(args.save_folder)\n\nif(__name__==\"__main__\"):\n parser = argparse.ArgumentParser()\n parser.add_argument('--epochs', type=int, default=3, dest=\"epochs\", help='Number of epochs to run')\n parser.add_argument('--batch-size', type=int, default=50, dest=\"batch_size\", help='Batch size')\n parser.add_argument('--max-out-length', type=int, default=128, dest=\"max_out_length\", help='Maximum output length (outputs truncated if longer)')\n parser.add_argument('--adversarial-model', type=str, default=None, dest=\"adv_model\", help='Type of adversarial model to use. Will use traditional teacher forcing if None.')\n parser.add_argument('--train-disc-only-steps', type=int, default=0, dest=\"train_disc_only_steps\", help='Number of steps for which to train discriminator only (without updating generator)')\n\n parser.add_argument('--gen_weight_decay', type=float, default=0, dest=\"gen_weight_decay\", help='Weight decay for the generator\\'s training scheduler')\n parser.add_argument('--gen_lr', type=float, default=2e-5, dest=\"gen_lr\", help='Learning rate for generator')\n parser.add_argument('--gen_epsilon', type=float, default=1e-8, dest=\"gen_epsilon\", help='Epsilon parameter for generator optimizer')\n parser.add_argument('--gen_warmup_steps', type=int, default=0, dest=\"gen_warmup_steps\", help='Number of warmup steps for training generator')\n\n parser.add_argument('--disc_weight_decay', type=float, default=0, dest=\"disc_weight_decay\", help='Weight decay for the discriminator\\'s training scheduler')\n parser.add_argument('--disc_lr', type=float, default=2e-5, dest=\"disc_lr\", help='Learning rate for discriminator')\n parser.add_argument('--disc_epsilon', type=float, default=1e-8, dest=\"disc_epsilon\", help='Epsilon parameter for discriminator optimizer')\n parser.add_argument('--disc_warmup_steps', type=int, default=0, dest=\"disc_warmup_steps\", help='Number of warmup steps for training discriminator')\n\n parser.add_argument('--train-data-path', type=str, dest=\"train_data_path\", help=\"Filepath to preprocessed data\")\n parser.add_argument('--save-folder', type=str, dest=\"save_folder\", help=\"Filepath to folder where checkpoints should be saved\")\n parser.add_argument('--pretrained-gen', type=str, default=None, dest=\"pretrained_gen\", help=\"Filepath to trained generator. If None, will instantiate a default pretrained generator.\")\n parser.add_argument('--pretrained-disc', type=str, default=None, dest=\"pretrained_disc\", help=\"Filepath to trained discriminator. If None, will instantiate a default pretrained discriminator of type specified by --adversarial-model option.\")\n\n args = parser.parse_args()\n\n assert args.train_data_path is not None\n assert args.save_folder is not None\n\n prep_folder(args)\n \n eos_token_id = GPT2Tokenizer.from_pretrained(\"gpt2\").eos_token_id\n train_dataset = DialogDataset(args.train_data_path, eos_token_id)\n train_loader = train_dataset.get_loader(args.batch_size, shuffle=True)\n\n gen_opt_params = {\"weight_decay\": args.gen_weight_decay, \n \"lr\": args.gen_lr, \n \"warmup_steps\": args.gen_warmup_steps,\n \"epsilon\": args.gen_epsilon,\n \"total_steps\": int(len(train_dataset) / args.batch_size) * args.epochs }\n\n generator = DialogGenerator(args.pretrained_gen, args.save_folder, gen_opt_params)\n\n if(args.adv_model is not None):\n disc_opt_params = {\"weight_decay\": args.disc_weight_decay, \n \"lr\": args.disc_lr, \n \"warmup_steps\": args.disc_warmup_steps,\n \"epsilon\": args.disc_epsilon,\n \"total_steps\": int(len(train_dataset) / args.batch_size) * args.epochs }\n discriminator = DialogDiscriminator(args.adv_model, args.pretrained_disc, args.save_folder, disc_opt_params)\n \n generator.train_adversarial(train_loader, args.epochs, args.max_out_length, discriminator, args.train_disc_only_steps)\n else:\n generator.train_traditional(train_loader, args.epochs, args.max_out_length)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import json
import requests
import random
import boto3
from email.parser import BytesParser, Parser
from email.policy import default
##################################
endpoint = 'https://5295t8jcs0.execute-api.us-east-1.amazonaws.com/Prod'
##################################
def get_msg_body(msg):
type = msg.get_content_maintype()
if type == 'multipart':
for part in msg.get_payload():
if part.get_content_maintype() == 'text':
return part.get_payload()
elif type == 'text':
return msg.get_payload()
def lambda_handler(event, context):
s3_bucket = event['Records'][0]['s3']['bucket']['name']
s3_key = event['Records'][0]['s3']['object']['key']
# s3_bucket = 'hw3-storemails'
# s3_key = '097caauj2ee2puftdrlohllf5748p70e1seovc81'
client = boto3.client('s3')
data = client.get_object(Bucket=s3_bucket, Key=s3_key)
contents = data['Body'].read()
msg = Parser(policy=default).parsestr(contents.decode('ascii'))
frm = msg['from']
to = msg['to']
time = msg['date']
subject = msg['subject']
body = get_msg_body(msg)
body = " ".join(body.split()).strip()
print(time)
r = requests.post(endpoint, data = {'data':body}, headers = {'Content-Type': 'application/x-www-form-urlencoded'})
r = json.loads(r.text)
print(r)
label = int(float(r['predicted_label']))
if label == 1:
label = 'SPAM'
else: label = 'HAM'
p = float(r['predicted_probability'])
print(label, p)
if len(body)>250: body = body[0:250]
return_msg = 'We received your email sent at ' +\
time + 'with the subject \'' + subject +\
'\'.\n\nHere is a 240 character sample of the email body:\n\n' +\
body + '\n\nThe email was categorized as ' + label +\
' with a ' + str(p) + ' % confidence.'
client = boto3.client('ses')
status = client.send_email(
Source='[email protected]',
Destination={
'ToAddresses': [
frm,
],
},
Message={
'Subject': {
'Data': 'Ham/Spam Analysis'
},
'Body': {
'Text': {
'Data': return_msg,
}
}
},
)
print(status)
return {
'statusCode': 200,
'body': json.dumps('LF2 successfull!')
}
|
normal
|
{
"blob_id": "cc99811321083147540a00e8029b792c8afc2ada",
"index": 3233,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_msg_body(msg):\n type = msg.get_content_maintype()\n if type == 'multipart':\n for part in msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif type == 'text':\n return msg.get_payload()\n\n\ndef lambda_handler(event, context):\n s3_bucket = event['Records'][0]['s3']['bucket']['name']\n s3_key = event['Records'][0]['s3']['object']['key']\n client = boto3.client('s3')\n data = client.get_object(Bucket=s3_bucket, Key=s3_key)\n contents = data['Body'].read()\n msg = Parser(policy=default).parsestr(contents.decode('ascii'))\n frm = msg['from']\n to = msg['to']\n time = msg['date']\n subject = msg['subject']\n body = get_msg_body(msg)\n body = ' '.join(body.split()).strip()\n print(time)\n r = requests.post(endpoint, data={'data': body}, headers={\n 'Content-Type': 'application/x-www-form-urlencoded'})\n r = json.loads(r.text)\n print(r)\n label = int(float(r['predicted_label']))\n if label == 1:\n label = 'SPAM'\n else:\n label = 'HAM'\n p = float(r['predicted_probability'])\n print(label, p)\n if len(body) > 250:\n body = body[0:250]\n return_msg = ('We received your email sent at ' + time +\n \"with the subject '\" + subject +\n \"\"\"'.\n\nHere is a 240 character sample of the email body:\n\n\"\"\" +\n body + \"\"\"\n\nThe email was categorized as \"\"\" + label + ' with a ' +\n str(p) + ' % confidence.')\n client = boto3.client('ses')\n status = client.send_email(Source='[email protected]',\n Destination={'ToAddresses': [frm]}, Message={'Subject': {'Data':\n 'Ham/Spam Analysis'}, 'Body': {'Text': {'Data': return_msg}}})\n print(status)\n return {'statusCode': 200, 'body': json.dumps('LF2 successfull!')}\n",
"step-3": "<mask token>\nendpoint = 'https://5295t8jcs0.execute-api.us-east-1.amazonaws.com/Prod'\n\n\ndef get_msg_body(msg):\n type = msg.get_content_maintype()\n if type == 'multipart':\n for part in msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif type == 'text':\n return msg.get_payload()\n\n\ndef lambda_handler(event, context):\n s3_bucket = event['Records'][0]['s3']['bucket']['name']\n s3_key = event['Records'][0]['s3']['object']['key']\n client = boto3.client('s3')\n data = client.get_object(Bucket=s3_bucket, Key=s3_key)\n contents = data['Body'].read()\n msg = Parser(policy=default).parsestr(contents.decode('ascii'))\n frm = msg['from']\n to = msg['to']\n time = msg['date']\n subject = msg['subject']\n body = get_msg_body(msg)\n body = ' '.join(body.split()).strip()\n print(time)\n r = requests.post(endpoint, data={'data': body}, headers={\n 'Content-Type': 'application/x-www-form-urlencoded'})\n r = json.loads(r.text)\n print(r)\n label = int(float(r['predicted_label']))\n if label == 1:\n label = 'SPAM'\n else:\n label = 'HAM'\n p = float(r['predicted_probability'])\n print(label, p)\n if len(body) > 250:\n body = body[0:250]\n return_msg = ('We received your email sent at ' + time +\n \"with the subject '\" + subject +\n \"\"\"'.\n\nHere is a 240 character sample of the email body:\n\n\"\"\" +\n body + \"\"\"\n\nThe email was categorized as \"\"\" + label + ' with a ' +\n str(p) + ' % confidence.')\n client = boto3.client('ses')\n status = client.send_email(Source='[email protected]',\n Destination={'ToAddresses': [frm]}, Message={'Subject': {'Data':\n 'Ham/Spam Analysis'}, 'Body': {'Text': {'Data': return_msg}}})\n print(status)\n return {'statusCode': 200, 'body': json.dumps('LF2 successfull!')}\n",
"step-4": "import json\nimport requests\nimport random\nimport boto3\nfrom email.parser import BytesParser, Parser\nfrom email.policy import default\nendpoint = 'https://5295t8jcs0.execute-api.us-east-1.amazonaws.com/Prod'\n\n\ndef get_msg_body(msg):\n type = msg.get_content_maintype()\n if type == 'multipart':\n for part in msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif type == 'text':\n return msg.get_payload()\n\n\ndef lambda_handler(event, context):\n s3_bucket = event['Records'][0]['s3']['bucket']['name']\n s3_key = event['Records'][0]['s3']['object']['key']\n client = boto3.client('s3')\n data = client.get_object(Bucket=s3_bucket, Key=s3_key)\n contents = data['Body'].read()\n msg = Parser(policy=default).parsestr(contents.decode('ascii'))\n frm = msg['from']\n to = msg['to']\n time = msg['date']\n subject = msg['subject']\n body = get_msg_body(msg)\n body = ' '.join(body.split()).strip()\n print(time)\n r = requests.post(endpoint, data={'data': body}, headers={\n 'Content-Type': 'application/x-www-form-urlencoded'})\n r = json.loads(r.text)\n print(r)\n label = int(float(r['predicted_label']))\n if label == 1:\n label = 'SPAM'\n else:\n label = 'HAM'\n p = float(r['predicted_probability'])\n print(label, p)\n if len(body) > 250:\n body = body[0:250]\n return_msg = ('We received your email sent at ' + time +\n \"with the subject '\" + subject +\n \"\"\"'.\n\nHere is a 240 character sample of the email body:\n\n\"\"\" +\n body + \"\"\"\n\nThe email was categorized as \"\"\" + label + ' with a ' +\n str(p) + ' % confidence.')\n client = boto3.client('ses')\n status = client.send_email(Source='[email protected]',\n Destination={'ToAddresses': [frm]}, Message={'Subject': {'Data':\n 'Ham/Spam Analysis'}, 'Body': {'Text': {'Data': return_msg}}})\n print(status)\n return {'statusCode': 200, 'body': json.dumps('LF2 successfull!')}\n",
"step-5": "import json\nimport requests\nimport random\nimport boto3\nfrom email.parser import BytesParser, Parser\nfrom email.policy import default\n\n##################################\nendpoint = 'https://5295t8jcs0.execute-api.us-east-1.amazonaws.com/Prod'\n##################################\n\ndef get_msg_body(msg):\n type = msg.get_content_maintype()\n\n if type == 'multipart':\n for part in msg.get_payload():\n if part.get_content_maintype() == 'text':\n return part.get_payload()\n elif type == 'text':\n return msg.get_payload()\n\ndef lambda_handler(event, context):\n \n s3_bucket = event['Records'][0]['s3']['bucket']['name']\n s3_key = event['Records'][0]['s3']['object']['key']\n \n# s3_bucket = 'hw3-storemails'\n# s3_key = '097caauj2ee2puftdrlohllf5748p70e1seovc81'\n \n client = boto3.client('s3')\n data = client.get_object(Bucket=s3_bucket, Key=s3_key)\n contents = data['Body'].read()\n \n msg = Parser(policy=default).parsestr(contents.decode('ascii'))\n frm = msg['from']\n to = msg['to']\n time = msg['date']\n subject = msg['subject']\n \n body = get_msg_body(msg)\n body = \" \".join(body.split()).strip()\n \n print(time)\n \n r = requests.post(endpoint, data = {'data':body}, headers = {'Content-Type': 'application/x-www-form-urlencoded'})\n r = json.loads(r.text)\n \n print(r)\n\n label = int(float(r['predicted_label']))\n if label == 1:\n label = 'SPAM'\n else: label = 'HAM'\n p = float(r['predicted_probability'])\n \n print(label, p)\n \n if len(body)>250: body = body[0:250]\n \n return_msg = 'We received your email sent at ' +\\\n time + 'with the subject \\'' + subject +\\\n '\\'.\\n\\nHere is a 240 character sample of the email body:\\n\\n' +\\\n body + '\\n\\nThe email was categorized as ' + label +\\\n ' with a ' + str(p) + ' % confidence.'\n\n client = boto3.client('ses')\n\n status = client.send_email(\n Source='[email protected]',\n Destination={\n 'ToAddresses': [\n frm,\n ],\n },\n Message={\n 'Subject': {\n 'Data': 'Ham/Spam Analysis'\n \n },\n 'Body': {\n 'Text': {\n 'Data': return_msg,\n }\n }\n },\n )\n \n print(status)\n \n return {\n 'statusCode': 200,\n 'body': json.dumps('LF2 successfull!')\n }\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
# =============================================================================
# Created By : Mohsen Malmir
# Created Date: Fri Nov 09 8:10 PM EST 2018
# Purpose : this file implements the gui handling to interact with emulators
# =============================================================================
from AppKit import NSWorkspace,NSApplicationActivateIgnoringOtherApps
from Quartz import CGWindowListCopyWindowInfo,kCGWindowListOptionOnScreenOnly
from Quartz import kCGWindowListExcludeDesktopElements,kCGNullWindowID
# this is a list of pairs of (emulator, game) that is supported to interact with
supported_emus = ["OpenEmu"]
supported_games = ["Mortal Kombat 3"]
def activate_emu():
"""
This function scans all the open windows and returns a handle to the first known
and supported emulator-game pair.
Args:
None
Returns:
"""
# get a list of all open windows
windows = CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly&kCGWindowListExcludeDesktopElements,kCGNullWindowID)
winname_list = [w.get("kCGWindowName", u"Unknown") for w in windows]
winrect_list = [w["kCGWindowBounds"] for w in windows]
# first find the Emulator
ws = NSWorkspace.sharedWorkspace()
runningApps = ws.runningApplications()
# the running processes are checked by their localized name, e.g. "OpenEmu"
ra_names = [ra.localizedName() for ra in runningApps]
for ii, emu in enumerate(supported_emus):
if emu in ra_names: # if a supported emu is found, check for corresponding games
if supported_games[ii] in winname_list: # we foudn a supported game of the target emu
# activate the emu window
emu_idx = ra_names.index(emu)
runningApps[emu_idx].activateWithOptions_(NSApplicationActivateIgnoringOtherApps)
# get the window coordinates
idx = winname_list.index(supported_games[ii])
rect = winrect_list[idx]
rect = [rect.get("X"),rect.get("Y"),rect.get("Width"),rect.get("Height")]
rect = list(map(int,rect))
return rect, emu, supported_games[ii]
return None
if __name__ == "__main__":
print(activate_emu())
|
normal
|
{
"blob_id": "043ea0efd490522de4f6ee4913c8d66029b34ff5",
"index": 5136,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef activate_emu():\n \"\"\"\n This function scans all the open windows and returns a handle to the first known\n and supported emulator-game pair.\n Args:\n None\n Returns:\n \n \"\"\"\n windows = CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly &\n kCGWindowListExcludeDesktopElements, kCGNullWindowID)\n winname_list = [w.get('kCGWindowName', u'Unknown') for w in windows]\n winrect_list = [w['kCGWindowBounds'] for w in windows]\n ws = NSWorkspace.sharedWorkspace()\n runningApps = ws.runningApplications()\n ra_names = [ra.localizedName() for ra in runningApps]\n for ii, emu in enumerate(supported_emus):\n if emu in ra_names:\n if supported_games[ii] in winname_list:\n emu_idx = ra_names.index(emu)\n runningApps[emu_idx].activateWithOptions_(\n NSApplicationActivateIgnoringOtherApps)\n idx = winname_list.index(supported_games[ii])\n rect = winrect_list[idx]\n rect = [rect.get('X'), rect.get('Y'), rect.get('Width'),\n rect.get('Height')]\n rect = list(map(int, rect))\n return rect, emu, supported_games[ii]\n return None\n\n\nif __name__ == '__main__':\n print(activate_emu())\n",
"step-3": "<mask token>\nsupported_emus = ['OpenEmu']\nsupported_games = ['Mortal Kombat 3']\n\n\ndef activate_emu():\n \"\"\"\n This function scans all the open windows and returns a handle to the first known\n and supported emulator-game pair.\n Args:\n None\n Returns:\n \n \"\"\"\n windows = CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly &\n kCGWindowListExcludeDesktopElements, kCGNullWindowID)\n winname_list = [w.get('kCGWindowName', u'Unknown') for w in windows]\n winrect_list = [w['kCGWindowBounds'] for w in windows]\n ws = NSWorkspace.sharedWorkspace()\n runningApps = ws.runningApplications()\n ra_names = [ra.localizedName() for ra in runningApps]\n for ii, emu in enumerate(supported_emus):\n if emu in ra_names:\n if supported_games[ii] in winname_list:\n emu_idx = ra_names.index(emu)\n runningApps[emu_idx].activateWithOptions_(\n NSApplicationActivateIgnoringOtherApps)\n idx = winname_list.index(supported_games[ii])\n rect = winrect_list[idx]\n rect = [rect.get('X'), rect.get('Y'), rect.get('Width'),\n rect.get('Height')]\n rect = list(map(int, rect))\n return rect, emu, supported_games[ii]\n return None\n\n\nif __name__ == '__main__':\n print(activate_emu())\n",
"step-4": "from AppKit import NSWorkspace, NSApplicationActivateIgnoringOtherApps\nfrom Quartz import CGWindowListCopyWindowInfo, kCGWindowListOptionOnScreenOnly\nfrom Quartz import kCGWindowListExcludeDesktopElements, kCGNullWindowID\nsupported_emus = ['OpenEmu']\nsupported_games = ['Mortal Kombat 3']\n\n\ndef activate_emu():\n \"\"\"\n This function scans all the open windows and returns a handle to the first known\n and supported emulator-game pair.\n Args:\n None\n Returns:\n \n \"\"\"\n windows = CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly &\n kCGWindowListExcludeDesktopElements, kCGNullWindowID)\n winname_list = [w.get('kCGWindowName', u'Unknown') for w in windows]\n winrect_list = [w['kCGWindowBounds'] for w in windows]\n ws = NSWorkspace.sharedWorkspace()\n runningApps = ws.runningApplications()\n ra_names = [ra.localizedName() for ra in runningApps]\n for ii, emu in enumerate(supported_emus):\n if emu in ra_names:\n if supported_games[ii] in winname_list:\n emu_idx = ra_names.index(emu)\n runningApps[emu_idx].activateWithOptions_(\n NSApplicationActivateIgnoringOtherApps)\n idx = winname_list.index(supported_games[ii])\n rect = winrect_list[idx]\n rect = [rect.get('X'), rect.get('Y'), rect.get('Width'),\n rect.get('Height')]\n rect = list(map(int, rect))\n return rect, emu, supported_games[ii]\n return None\n\n\nif __name__ == '__main__':\n print(activate_emu())\n",
"step-5": "# =============================================================================\n# Created By : Mohsen Malmir\n# Created Date: Fri Nov 09 8:10 PM EST 2018\n# Purpose : this file implements the gui handling to interact with emulators\n# =============================================================================\n\nfrom AppKit import NSWorkspace,NSApplicationActivateIgnoringOtherApps\nfrom Quartz import CGWindowListCopyWindowInfo,kCGWindowListOptionOnScreenOnly\nfrom Quartz import kCGWindowListExcludeDesktopElements,kCGNullWindowID\n\n# this is a list of pairs of (emulator, game) that is supported to interact with\nsupported_emus = [\"OpenEmu\"]\nsupported_games = [\"Mortal Kombat 3\"]\n\n\ndef activate_emu():\n \"\"\"\n This function scans all the open windows and returns a handle to the first known\n and supported emulator-game pair.\n Args:\n None\n Returns:\n \n \"\"\"\n # get a list of all open windows\n windows = CGWindowListCopyWindowInfo(kCGWindowListOptionOnScreenOnly&kCGWindowListExcludeDesktopElements,kCGNullWindowID)\n winname_list = [w.get(\"kCGWindowName\", u\"Unknown\") for w in windows]\n winrect_list = [w[\"kCGWindowBounds\"] for w in windows]\n # first find the Emulator\n ws = NSWorkspace.sharedWorkspace()\n runningApps = ws.runningApplications()\n # the running processes are checked by their localized name, e.g. \"OpenEmu\"\n ra_names = [ra.localizedName() for ra in runningApps] \n for ii, emu in enumerate(supported_emus):\n if emu in ra_names: # if a supported emu is found, check for corresponding games\n if supported_games[ii] in winname_list: # we foudn a supported game of the target emu\n # activate the emu window\n emu_idx = ra_names.index(emu)\n runningApps[emu_idx].activateWithOptions_(NSApplicationActivateIgnoringOtherApps)\n # get the window coordinates\n idx = winname_list.index(supported_games[ii])\n rect = winrect_list[idx]\n rect = [rect.get(\"X\"),rect.get(\"Y\"),rect.get(\"Width\"),rect.get(\"Height\")]\n rect = list(map(int,rect))\n return rect, emu, supported_games[ii]\n return None\n\nif __name__ == \"__main__\":\n print(activate_emu())\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
y = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
x2 = np.array([11, 12, 13, 14, 15])
model = Sequential()
model.add(Dense(5, input_dim=1, activation='relu'))
model.add(Dense(3))
model.add(Dense(1))
model.summary()
'''
model.compile(loss='mse', optimizer='adam',
metrics=['accuracy'])
model.fit(x, y, epochs=100)
loss, acc = model.evaluate(x, y)
print("acc : ", acc)
print("loss : ", loss)
y_predict = model.predict(x2)
print(y_predict)
'''
|
normal
|
{
"blob_id": "43d9edd9120351ce5065eb266d482ccaa2e56177",
"index": 2416,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmodel.add(Dense(5, input_dim=1, activation='relu'))\nmodel.add(Dense(3))\nmodel.add(Dense(1))\nmodel.summary()\n<mask token>\n",
"step-3": "<mask token>\nx = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\ny = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\nx2 = np.array([11, 12, 13, 14, 15])\nmodel = Sequential()\nmodel.add(Dense(5, input_dim=1, activation='relu'))\nmodel.add(Dense(3))\nmodel.add(Dense(1))\nmodel.summary()\n<mask token>\n",
"step-4": "from keras.models import Sequential\nfrom keras.layers import Dense\nimport numpy as np\nx = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\ny = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\nx2 = np.array([11, 12, 13, 14, 15])\nmodel = Sequential()\nmodel.add(Dense(5, input_dim=1, activation='relu'))\nmodel.add(Dense(3))\nmodel.add(Dense(1))\nmodel.summary()\n<mask token>\n",
"step-5": "from keras.models import Sequential\nfrom keras.layers import Dense\n\nimport numpy as np\nx = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\ny = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\nx2 = np.array([11, 12, 13, 14, 15])\n\nmodel = Sequential()\nmodel.add(Dense(5, input_dim=1, activation='relu'))\nmodel.add(Dense(3))\nmodel.add(Dense(1))\n\nmodel.summary()\n\n'''\nmodel.compile(loss='mse', optimizer='adam',\n metrics=['accuracy'])\nmodel.fit(x, y, epochs=100)\n\nloss, acc = model.evaluate(x, y)\nprint(\"acc : \", acc)\nprint(\"loss : \", loss)\n\ny_predict = model.predict(x2)\nprint(y_predict)\n'''\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
password = ["123456", "1111"]
pw = input("รหัสผ่านคือ>>>")
for data in password:
if data != pw:
pass
else:
print("พบข้อมูลรหัสผ่านนี้")
print("แล้วเจอกันใหม่")
|
normal
|
{
"blob_id": "6f05b1352e776e20d6a9e0eb457d8914cbfc2d22",
"index": 2779,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor data in password:\n if data != pw:\n pass\n else:\n print('พบข้อมูลรหัสผ่านนี้')\nprint('แล้วเจอกันใหม่')\n",
"step-3": "password = ['123456', '1111']\npw = input('รหัสผ่านคือ>>>')\nfor data in password:\n if data != pw:\n pass\n else:\n print('พบข้อมูลรหัสผ่านนี้')\nprint('แล้วเจอกันใหม่')\n",
"step-4": "password = [\"123456\", \"1111\"]\r\npw = input(\"รหัสผ่านคือ>>>\")\r\nfor data in password:\r\n if data != pw:\r\n pass\r\n else:\r\n print(\"พบข้อมูลรหัสผ่านนี้\")\r\nprint(\"แล้วเจอกันใหม่\")\r\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from flask import *
app = Flask(__name__)
from app import views
from app import admin_views
from app import usr_reg
from app import cookie
from app import db_connect
|
normal
|
{
"blob_id": "e736991f364ba9ff709348e4b1f612b1e9673281",
"index": 252,
"step-1": "<mask token>\n",
"step-2": "<mask token>\napp = Flask(__name__)\n<mask token>\n",
"step-3": "from flask import *\napp = Flask(__name__)\nfrom app import views\nfrom app import admin_views\nfrom app import usr_reg\nfrom app import cookie\nfrom app import db_connect\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import sys
import logging
import copy
import socket
from . import game_map
class GameUnix:
"""
:ivar map: Current map representation
:ivar initial_map: The initial version of the map before game starts
"""
def _send_string(self, s):
"""
Send data to the game. Call :function:`done_sending` once finished.
:param str s: String to send
:return: nothing
"""
self.sfile.write(s)
def _done_sending(self):
"""
Finish sending commands to the game.
:return: nothing
"""
self.sfile.write('\n')
self.sfile.flush()
def _get_string(self):
"""
Read input from the game.
:return: The input read from the Halite engine
:rtype: str
"""
result = self.sfile.readline().rstrip('\n')
return result
def send_command_queue(self, command_queue):
"""
Issue the given list of commands.
:param list[str] command_queue: List of commands to send the Halite engine
:return: nothing
"""
for command in command_queue:
self._send_string(command)
self._done_sending()
@staticmethod
def _set_up_logging(tag, name):
"""
Set up and truncate the log
:param tag: The user tag (used for naming the log)
:param name: The bot name (used for naming the log)
:return: nothing
"""
log_file = "{}_{}.log".format(tag, name)
logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')
logging.info("Initialized bot {}".format(name))
def __init__(self, name, socket_path="/dev/shm/bot.sock"):
"""
Initialize the bot with the given name.
:param name: The name of the bot.
"""
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
connected = False
while not connected:
try:
self.s.connect(socket_path)
connected = True
except Exception:
pass # Do nothing, just try again
self.sfile = self.s.makefile('rw')
self._name = name
self._send_name = False
tag = int(self._get_string())
GameUnix._set_up_logging(tag, name)
width, height = [int(x) for x in self._get_string().strip().split()]
self.map = game_map.Map(tag, width, height)
self.update_map()
self.initial_map = copy.deepcopy(self.map)
self._send_name = True
self.done = False
def update_map(self):
"""
Parse the map given by the engine.
:return: new parsed map
:rtype: game_map.Map
"""
if self._send_name:
self._send_string(self._name)
self._done_sending()
self._send_name = False
logging.info("---NEW TURN---")
recv = self._get_string()
if recv == "":
self.close()
self.done = True
return self.map # last step map
self.map._parse(recv)
return self.map
def close(self):
self.sfile.close()
self.s.close()
class GameStdIO:
"""
:ivar map: Current map representation
:ivar initial_map: The initial version of the map before game starts
"""
def _send_string(self, s):
"""
Send data to the game. Call :function:`done_sending` once finished.
:param str s: String to send
:return: nothing
"""
sys.stdout.write(s)
def _done_sending(self):
"""
Finish sending commands to the game.
:return: nothing
"""
sys.stdout.write('\n')
sys.stdout.flush()
def _get_string(self):
"""
Read input from the game.
:return: The input read from the Halite engine
:rtype: str
"""
result = sys.stdin.readline().rstrip('\n')
return result
def send_command_queue(self, command_queue):
"""
Issue the given list of commands.
:param list[str] command_queue: List of commands to send the Halite engine
:return: nothing
"""
for command in command_queue:
self._send_string(command)
self._done_sending()
@staticmethod
def _set_up_logging(tag, name):
"""
Set up and truncate the log
:param tag: The user tag (used for naming the log)
:param name: The bot name (used for naming the log)
:return: nothing
"""
log_file = "{}_{}.log".format(tag, name)
logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')
logging.info("Initialized bot {}".format(name))
def __init__(self, name):
"""
Initialize the bot with the given name.
:param name: The name of the bot.
"""
self._name = name
self._send_name = False
tag = int(self._get_string())
GameStdIO._set_up_logging(tag, name)
width, height = [int(x) for x in self._get_string().strip().split()]
self.map = game_map.Map(tag, width, height)
self.update_map()
self.initial_map = copy.deepcopy(self.map)
self._send_name = True
self.done = False
def update_map(self):
"""
Parse the map given by the engine.
:return: new parsed map
:rtype: game_map.Map
"""
if self._send_name:
self._send_string(self._name)
self._done_sending()
self._send_name = False
logging.info("---NEW TURN---")
recv = self._get_string()
if recv == "":
self.close()
self.done = True
return self.map # last step map
self.map._parse(recv)
return self.map
def close(self):
pass
|
normal
|
{
"blob_id": "09d31df9c76975377b44470e1f2ba4a5c4b7bbde",
"index": 912,
"step-1": "<mask token>\n\n\nclass GameStdIO:\n <mask token>\n <mask token>\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"step-2": "<mask token>\n\n\nclass GameUnix:\n <mask token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n self.sfile.write('\\n')\n self.sfile.flush()\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n <mask token>\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"step-3": "<mask token>\n\n\nclass GameUnix:\n <mask token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n self.sfile.write('\\n')\n self.sfile.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name, socket_path='/dev/shm/bot.sock'):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n connected = False\n while not connected:\n try:\n self.s.connect(socket_path)\n connected = True\n except Exception:\n pass\n self.sfile = self.s.makefile('rw')\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameUnix._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n <mask token>\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"step-4": "import sys\nimport logging\nimport copy\nimport socket\nfrom . import game_map\n\n\nclass GameUnix:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n self.sfile.write('\\n')\n self.sfile.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name, socket_path='/dev/shm/bot.sock'):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n connected = False\n while not connected:\n try:\n self.s.connect(socket_path)\n connected = True\n except Exception:\n pass\n self.sfile = self.s.makefile('rw')\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameUnix._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n self.sfile.close()\n self.s.close()\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"step-5": "import sys\nimport logging\nimport copy\nimport socket\n\nfrom . import game_map\n\nclass GameUnix:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n self.sfile.write('\\n')\n self.sfile.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = \"{}_{}.log\".format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')\n logging.info(\"Initialized bot {}\".format(name))\n\n def __init__(self, name, socket_path=\"/dev/shm/bot.sock\"):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n connected = False\n while not connected:\n try:\n self.s.connect(socket_path)\n connected = True\n except Exception:\n pass # Do nothing, just try again\n self.sfile = self.s.makefile('rw')\n\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameUnix._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info(\"---NEW TURN---\")\n recv = self._get_string()\n\n if recv == \"\":\n self.close()\n self.done = True\n return self.map # last step map\n\n self.map._parse(recv)\n return self.map\n \n def close(self):\n self.sfile.close()\n self.s.close()\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = \"{}_{}.log\".format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')\n logging.info(\"Initialized bot {}\".format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info(\"---NEW TURN---\")\n recv = self._get_string()\n\n if recv == \"\":\n self.close()\n self.done = True\n return self.map # last step map\n\n self.map._parse(recv)\n return self.map\n \n def close(self):\n pass",
"step-ids": [
8,
14,
18,
21,
22
]
}
|
[
8,
14,
18,
21,
22
] |
"""
Contain meta-data related functions:
* accessing integration schema: fields, values, constraints on inputs/queries
* tracking fields available
* tracking known (input field) values
"""
# coding=utf-8
__author__ = 'vidma'
|
normal
|
{
"blob_id": "abdedad2c2b42b54cdba0e61e095ba3df0783b81",
"index": 1172,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n__author__ = 'vidma'\n",
"step-3": "\"\"\"\nContain meta-data related functions:\n\n* accessing integration schema: fields, values, constraints on inputs/queries\n* tracking fields available\n* tracking known (input field) values\n\"\"\"\n# coding=utf-8\n__author__ = 'vidma'\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import matplotlib.pyplot as plt
import numpy as np
import unittest
from ema_workbench.analysis import clusterer
from test import utilities
class ClusterTestCase(unittest.TestCase):
def test_cluster(self):
n = 10
experiments, outcomes = utilities.load_flu_data()
data = outcomes["infected fraction R1"][0:n, :]
distances = clusterer.calculate_cid(data)
self.assertEqual(distances.shape, (n, n))
clusterer.plot_dendrogram(distances)
plt.draw()
assignment = clusterer.apply_agglomerative_clustering(distances, 2)
self.assertEqual(assignment.shape, (10,))
distances = clusterer.calculate_cid(data, condensed_form=True)
self.assertEqual(distances.shape, sum(np.arange(0, n)))
clusterer.plot_dendrogram(distances)
plt.draw()
plt.close("all")
if __name__ == "__main__":
unittest.main()
|
normal
|
{
"blob_id": "a7e2b016131dfdb75e537e86875e1b2f19fb3d9d",
"index": 2580,
"step-1": "<mask token>\n\n\nclass ClusterTestCase(unittest.TestCase):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClusterTestCase(unittest.TestCase):\n\n def test_cluster(self):\n n = 10\n experiments, outcomes = utilities.load_flu_data()\n data = outcomes['infected fraction R1'][0:n, :]\n distances = clusterer.calculate_cid(data)\n self.assertEqual(distances.shape, (n, n))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n assignment = clusterer.apply_agglomerative_clustering(distances, 2)\n self.assertEqual(assignment.shape, (10,))\n distances = clusterer.calculate_cid(data, condensed_form=True)\n self.assertEqual(distances.shape, sum(np.arange(0, n)))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n plt.close('all')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass ClusterTestCase(unittest.TestCase):\n\n def test_cluster(self):\n n = 10\n experiments, outcomes = utilities.load_flu_data()\n data = outcomes['infected fraction R1'][0:n, :]\n distances = clusterer.calculate_cid(data)\n self.assertEqual(distances.shape, (n, n))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n assignment = clusterer.apply_agglomerative_clustering(distances, 2)\n self.assertEqual(assignment.shape, (10,))\n distances = clusterer.calculate_cid(data, condensed_form=True)\n self.assertEqual(distances.shape, sum(np.arange(0, n)))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n plt.close('all')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-4": "import matplotlib.pyplot as plt\nimport numpy as np\nimport unittest\nfrom ema_workbench.analysis import clusterer\nfrom test import utilities\n\n\nclass ClusterTestCase(unittest.TestCase):\n\n def test_cluster(self):\n n = 10\n experiments, outcomes = utilities.load_flu_data()\n data = outcomes['infected fraction R1'][0:n, :]\n distances = clusterer.calculate_cid(data)\n self.assertEqual(distances.shape, (n, n))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n assignment = clusterer.apply_agglomerative_clustering(distances, 2)\n self.assertEqual(assignment.shape, (10,))\n distances = clusterer.calculate_cid(data, condensed_form=True)\n self.assertEqual(distances.shape, sum(np.arange(0, n)))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n plt.close('all')\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"step-5": "import matplotlib.pyplot as plt\nimport numpy as np\nimport unittest\n\nfrom ema_workbench.analysis import clusterer\nfrom test import utilities\n\n\nclass ClusterTestCase(unittest.TestCase):\n def test_cluster(self):\n n = 10\n experiments, outcomes = utilities.load_flu_data()\n data = outcomes[\"infected fraction R1\"][0:n, :]\n\n distances = clusterer.calculate_cid(data)\n self.assertEqual(distances.shape, (n, n))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n\n assignment = clusterer.apply_agglomerative_clustering(distances, 2)\n self.assertEqual(assignment.shape, (10,))\n\n distances = clusterer.calculate_cid(data, condensed_form=True)\n self.assertEqual(distances.shape, sum(np.arange(0, n)))\n clusterer.plot_dendrogram(distances)\n plt.draw()\n\n plt.close(\"all\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
ba1466.pngMap = [
'11111111111111111111111111111100000000011111111111111111111111111000000000000000011111111111111111111111111111111111111111111111',
'11111111111111111111111111111110000000011111111111111111111111111000000000000000011111111111111111111111111111111111111111111111',
'11111111111111111111111111111110000000001111111111111111111111110000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111110000000001111111111111111111111000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111111000000000011111111111111111101000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111000000000011111111111111111100000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111100000000100111111111111100000000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111110000000000111111111111100000000000000000000000000000111111111111111111111111111111111111111111',
'11111111111111111111111111111111100000001111111111111100000000000000000000000000000111111111111111111111111111111111111111111111',
'11111111111111111111111111111111000000011111111111111100000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111110000000011111111111110000000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111111000000011111111111000000000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111111100000001111111100000000000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111111100000001111111100000000000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111111110000001111111100000000000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111111110000011111111000000000000000000000000000000000000011111111111111111111111111111111111111111111',
'11111111111111111111111111111111111110000000111000000000000000000000000000000000000111111111111111111111111111111111111111111100',
'11111111111111111111111111111111111100000000001000000000000000000000000000000000011111111111111111111111111111111111111110100000',
'11111111111111111111111111111111111111111111110000000000000000000000000000000001111111111111111111111111111111100000000000000000',
'11111111111111111111111111111111111111111111110000000000000000000000000000000000111111111111111111111111111110100000000000000000',
'11111111111111111111111111111111111111111111100000000000000000000000000000000000111111111111111111110000000000000000000000000000',
'11111111111111111111111111111111111111111111100000000000000000000000000000000001111111111111111111100000000000000000000000000000',
'11111111111111111111111111111111111111111111100001111111100000101100000000000000111111110000000000000000000000000000000000000000',
'11111111111111111111111111111111111111111111111110111111111111111110000000000000110111000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111111111111100110000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111111111111111111111111111000111000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11111111111111110000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'11010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111',
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001011111111111111',
'00000011000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111',
'11111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010111111111111111111',
'11111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111',
'11111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111',
'11111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111',
'11111111111111111111110010000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111',
'11111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111',
'11111111111111111111111111111111111000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111100000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111100111110000000000000000000001111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111100001000000011111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',
]
|
normal
|
{
"blob_id": "dbefca59376e567a6116dec4e07c44b1fe301ca9",
"index": 9911,
"step-1": "<mask token>\n",
"step-2": "ba1466.pngMap = [\n '11111111111111111111111111111100000000011111111111111111111111111000000000000000011111111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111110000000011111111111111111111111111000000000000000011111111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111110000000001111111111111111111111110000000000000000000011111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111110000000001111111111111111111111000000000000000000000011111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111000000000011111111111111111101000000000000000000000000111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111000000000011111111111111111100000000000000000000000000111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111100000000100111111111111100000000000000000000000000000111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111110000000000111111111111100000000000000000000000000000111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111100000001111111111111100000000000000000000000000000111111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111000000011111111111111100000000000000000000000000000011111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111110000000011111111111110000000000000000000000000000000011111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111000000011111111111000000000000000000000000000000000011111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111100000001111111100000000000000000000000000000000000011111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111100000001111111100000000000000000000000000000000000011111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111110000001111111100000000000000000000000000000000000011111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111110000011111111000000000000000000000000000000000000011111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111111110000000111000000000000000000000000000000000000111111111111111111111111111111111111111111100'\n ,\n '11111111111111111111111111111111111100000000001000000000000000000000000000000000011111111111111111111111111111111111111110100000'\n ,\n '11111111111111111111111111111111111111111111110000000000000000000000000000000001111111111111111111111111111111100000000000000000'\n ,\n '11111111111111111111111111111111111111111111110000000000000000000000000000000000111111111111111111111111111110100000000000000000'\n ,\n '11111111111111111111111111111111111111111111100000000000000000000000000000000000111111111111111111110000000000000000000000000000'\n ,\n '11111111111111111111111111111111111111111111100000000000000000000000000000000001111111111111111111100000000000000000000000000000'\n ,\n '11111111111111111111111111111111111111111111100001111111100000101100000000000000111111110000000000000000000000000000000000000000'\n ,\n '11111111111111111111111111111111111111111111111110111111111111111110000000000000110111000000000000000000000000000000000000000000'\n ,\n '11111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000'\n ,\n '11111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000'\n ,\n '11111111111111111111111111111111111111111111111100110000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '11111111111111111111111111111111111111000111000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '11111111111111110000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '11010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100111111'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111'\n ,\n '00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001011111111111111'\n ,\n '00000011000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111'\n ,\n '11111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010111111111111111111'\n ,\n '11111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111'\n ,\n '11111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111'\n ,\n '11111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111'\n ,\n '11111111111111111111110010000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111'\n ,\n '11111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111'\n ,\n '11111111111111111111111111111111111000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111111100000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111111111111111111100111110000000000000000000001111111111111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111111111111111111111111111111100001000000011111111111111111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'\n ,\n '11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111'\n ]\n",
"step-3": "ba1466.pngMap = [\n'11111111111111111111111111111100000000011111111111111111111111111000000000000000011111111111111111111111111111111111111111111111',\n'11111111111111111111111111111110000000011111111111111111111111111000000000000000011111111111111111111111111111111111111111111111',\n'11111111111111111111111111111110000000001111111111111111111111110000000000000000000011111111111111111111111111111111111111111111',\n'11111111111111111111111111111110000000001111111111111111111111000000000000000000000011111111111111111111111111111111111111111111',\n'11111111111111111111111111111111000000000011111111111111111101000000000000000000000000111111111111111111111111111111111111111111',\n'11111111111111111111111111111111000000000011111111111111111100000000000000000000000000111111111111111111111111111111111111111111',\n'11111111111111111111111111111111100000000100111111111111100000000000000000000000000000111111111111111111111111111111111111111111',\n'11111111111111111111111111111111110000000000111111111111100000000000000000000000000000111111111111111111111111111111111111111111',\n'11111111111111111111111111111111100000001111111111111100000000000000000000000000000111111111111111111111111111111111111111111111',\n'11111111111111111111111111111111000000011111111111111100000000000000000000000000000011111111111111111111111111111111111111111111',\n'11111111111111111111111111111110000000011111111111110000000000000000000000000000000011111111111111111111111111111111111111111111',\n'11111111111111111111111111111111000000011111111111000000000000000000000000000000000011111111111111111111111111111111111111111111',\n'11111111111111111111111111111111100000001111111100000000000000000000000000000000000011111111111111111111111111111111111111111111',\n'11111111111111111111111111111111100000001111111100000000000000000000000000000000000011111111111111111111111111111111111111111111',\n'11111111111111111111111111111111110000001111111100000000000000000000000000000000000011111111111111111111111111111111111111111111',\n'11111111111111111111111111111111110000011111111000000000000000000000000000000000000011111111111111111111111111111111111111111111',\n'11111111111111111111111111111111111110000000111000000000000000000000000000000000000111111111111111111111111111111111111111111100',\n'11111111111111111111111111111111111100000000001000000000000000000000000000000000011111111111111111111111111111111111111110100000',\n'11111111111111111111111111111111111111111111110000000000000000000000000000000001111111111111111111111111111111100000000000000000',\n'11111111111111111111111111111111111111111111110000000000000000000000000000000000111111111111111111111111111110100000000000000000',\n'11111111111111111111111111111111111111111111100000000000000000000000000000000000111111111111111111110000000000000000000000000000',\n'11111111111111111111111111111111111111111111100000000000000000000000000000000001111111111111111111100000000000000000000000000000',\n'11111111111111111111111111111111111111111111100001111111100000101100000000000000111111110000000000000000000000000000000000000000',\n'11111111111111111111111111111111111111111111111110111111111111111110000000000000110111000000000000000000000000000000000000000000',\n'11111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000',\n'11111111111111111111111111111111111111111111111111111111111111111110000000000000000000000000000000000000000000000000000000000000',\n'11111111111111111111111111111111111111111111111100110000000000000000000000000000000000000000000000000000000000000000000000000000',\n'11111111111111111111111111111111111111000111000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'11111111111111110000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'11010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100111111',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111',\n'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001011111111111111',\n'00000011000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111',\n'11111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010111111111111111111',\n'11111111111100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111',\n'11111111111111111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111',\n'11111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000000000000000001111111111111111111111',\n'11111111111111111111110010000000000000000000000000000000000000000000000000000000000000000000000000011111111111111111111111111111',\n'11111111111111111111111110000000000000000000000000000000000000000000000000000000000000000000000000111111111111111111111111111111',\n'11111111111111111111111111111111111000000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111',\n'11111111111111111111111111111111111100000000000000000000000000000000000000000000000000001111111111111111111111111111111111111111',\n'11111111111111111111111111111111111111111111111100111110000000000000000000001111111111111111111111111111111111111111111111111111',\n'11111111111111111111111111111111111111111111111111111111111100001000000011111111111111111111111111111111111111111111111111111111',\n'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',\n'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',\n'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',\n'11111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111111',\n]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.urls import path
from .views import FirstModelView
urlpatterns = [path('firstModel', FirstModelView.as_view())]
|
normal
|
{
"blob_id": "4efd22d132accd0f5945a0c911b73b67654b92e4",
"index": 9358,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nurlpatterns = [path('firstModel', FirstModelView.as_view())]\n",
"step-3": "from django.urls import path\nfrom .views import FirstModelView\nurlpatterns = [path('firstModel', FirstModelView.as_view())]\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 25 12:07:32 2021
@author: yashv
"""
import numpy as np
X= [0.7, 1.5]
Y= [3.9,0.2]
def f(w,b,x): #sigmoid logistic function
return 1.0/(1.0 + np.exp(-(w*x +b)))
def error(w,b): #loss function
err=0.0
for x,y in zip(X,Y):
fx= f(w,b,x)
err += 0.5 * (fx - y) **2
return err
def grad_b(w,b,x,y):
fx= f(w,b,x)
return (fx - y)* fx * (1-fx)
def grad_w(w,b,x,y):
fx= f(w,b,x)
return (fx - y)* fx * (1-fx) * x
def do_gradient_descent():
w, b, eta, max_epochs = 10, 10, 6.0, 1000
for i in range(max_epochs):
dw, db = 0,0
for x,y in zip(X,Y):
dw += grad_w(w,b,x,y)
db += grad_b(w,b,x,y)
w = w - eta * dw
b = b - eta * dw
print(w,b)
print("e:",error(w,b))
do_gradient_descent()
|
normal
|
{
"blob_id": "2387856757ad1c3ff911cf2a7537ca6df7786997",
"index": 9244,
"step-1": "<mask token>\n\n\ndef f(w, b, x):\n return 1.0 / (1.0 + np.exp(-(w * x + b)))\n\n\ndef error(w, b):\n err = 0.0\n for x, y in zip(X, Y):\n fx = f(w, b, x)\n err += 0.5 * (fx - y) ** 2\n return err\n\n\ndef grad_b(w, b, x, y):\n fx = f(w, b, x)\n return (fx - y) * fx * (1 - fx)\n\n\ndef grad_w(w, b, x, y):\n fx = f(w, b, x)\n return (fx - y) * fx * (1 - fx) * x\n\n\ndef do_gradient_descent():\n w, b, eta, max_epochs = 10, 10, 6.0, 1000\n for i in range(max_epochs):\n dw, db = 0, 0\n for x, y in zip(X, Y):\n dw += grad_w(w, b, x, y)\n db += grad_b(w, b, x, y)\n w = w - eta * dw\n b = b - eta * dw\n print(w, b)\n print('e:', error(w, b))\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef f(w, b, x):\n return 1.0 / (1.0 + np.exp(-(w * x + b)))\n\n\ndef error(w, b):\n err = 0.0\n for x, y in zip(X, Y):\n fx = f(w, b, x)\n err += 0.5 * (fx - y) ** 2\n return err\n\n\ndef grad_b(w, b, x, y):\n fx = f(w, b, x)\n return (fx - y) * fx * (1 - fx)\n\n\ndef grad_w(w, b, x, y):\n fx = f(w, b, x)\n return (fx - y) * fx * (1 - fx) * x\n\n\ndef do_gradient_descent():\n w, b, eta, max_epochs = 10, 10, 6.0, 1000\n for i in range(max_epochs):\n dw, db = 0, 0\n for x, y in zip(X, Y):\n dw += grad_w(w, b, x, y)\n db += grad_b(w, b, x, y)\n w = w - eta * dw\n b = b - eta * dw\n print(w, b)\n print('e:', error(w, b))\n\n\ndo_gradient_descent()\n",
"step-3": "<mask token>\nX = [0.7, 1.5]\nY = [3.9, 0.2]\n\n\ndef f(w, b, x):\n return 1.0 / (1.0 + np.exp(-(w * x + b)))\n\n\ndef error(w, b):\n err = 0.0\n for x, y in zip(X, Y):\n fx = f(w, b, x)\n err += 0.5 * (fx - y) ** 2\n return err\n\n\ndef grad_b(w, b, x, y):\n fx = f(w, b, x)\n return (fx - y) * fx * (1 - fx)\n\n\ndef grad_w(w, b, x, y):\n fx = f(w, b, x)\n return (fx - y) * fx * (1 - fx) * x\n\n\ndef do_gradient_descent():\n w, b, eta, max_epochs = 10, 10, 6.0, 1000\n for i in range(max_epochs):\n dw, db = 0, 0\n for x, y in zip(X, Y):\n dw += grad_w(w, b, x, y)\n db += grad_b(w, b, x, y)\n w = w - eta * dw\n b = b - eta * dw\n print(w, b)\n print('e:', error(w, b))\n\n\ndo_gradient_descent()\n",
"step-4": "<mask token>\nimport numpy as np\nX = [0.7, 1.5]\nY = [3.9, 0.2]\n\n\ndef f(w, b, x):\n return 1.0 / (1.0 + np.exp(-(w * x + b)))\n\n\ndef error(w, b):\n err = 0.0\n for x, y in zip(X, Y):\n fx = f(w, b, x)\n err += 0.5 * (fx - y) ** 2\n return err\n\n\ndef grad_b(w, b, x, y):\n fx = f(w, b, x)\n return (fx - y) * fx * (1 - fx)\n\n\ndef grad_w(w, b, x, y):\n fx = f(w, b, x)\n return (fx - y) * fx * (1 - fx) * x\n\n\ndef do_gradient_descent():\n w, b, eta, max_epochs = 10, 10, 6.0, 1000\n for i in range(max_epochs):\n dw, db = 0, 0\n for x, y in zip(X, Y):\n dw += grad_w(w, b, x, y)\n db += grad_b(w, b, x, y)\n w = w - eta * dw\n b = b - eta * dw\n print(w, b)\n print('e:', error(w, b))\n\n\ndo_gradient_descent()\n",
"step-5": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jan 25 12:07:32 2021\r\n\r\n@author: yashv\r\n\"\"\"\r\n\r\nimport numpy as np\r\n\r\nX= [0.7, 1.5]\r\nY= [3.9,0.2]\r\n\r\ndef f(w,b,x): #sigmoid logistic function\r\n return 1.0/(1.0 + np.exp(-(w*x +b)))\r\n\r\ndef error(w,b): #loss function\r\n err=0.0\r\n for x,y in zip(X,Y):\r\n fx= f(w,b,x)\r\n err += 0.5 * (fx - y) **2\r\n return err\r\n\r\ndef grad_b(w,b,x,y):\r\n fx= f(w,b,x)\r\n return (fx - y)* fx * (1-fx) \r\n\r\ndef grad_w(w,b,x,y):\r\n fx= f(w,b,x)\r\n return (fx - y)* fx * (1-fx) * x\r\n\r\ndef do_gradient_descent():\r\n w, b, eta, max_epochs = 10, 10, 6.0, 1000\r\n for i in range(max_epochs):\r\n dw, db = 0,0 \r\n for x,y in zip(X,Y):\r\n dw += grad_w(w,b,x,y)\r\n db += grad_b(w,b,x,y)\r\n w = w - eta * dw\r\n b = b - eta * dw\r\n print(w,b)\r\n print(\"e:\",error(w,b))\r\n \r\ndo_gradient_descent()\r\n\r\n\r\n\r\n",
"step-ids": [
5,
6,
7,
8,
9
]
}
|
[
5,
6,
7,
8,
9
] |
from datetime import datetime
from logging import Logger
from pathlib import Path
from typing import Dict
import ignite
import ignite.distributed as idist
import torch
from omegaconf import OmegaConf
from config_schema import ConfigSchema
def log_metrics(
logger: Logger, epoch: int, elapsed: float, tag: str, metrics: Dict[str, float]
):
logger.info(
"Epoch {} - elapsed: {:.5f} - {} metrics: {}".format(
epoch,
elapsed,
tag,
", ".join(["{}: {}".format(k, v) for k, v in metrics.items()]),
)
)
def log_basic_info(logger: Logger, config: ConfigSchema):
logger.info("Experiment: {}".format(config.experiment_name))
logger.info("- PyTorch version: {}".format(torch.__version__))
logger.info("- Ignite version: {}".format(ignite.__version__))
logger.info("\n")
logger.info("Configuration:")
for line in OmegaConf.to_yaml(config).split("\n"):
logger.info("\t" + line)
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info("\tbackend: {}".format(idist.backend()))
logger.info("\tworld size: {}".format(idist.get_world_size()))
logger.info("\n")
def prepare_output_directory(config: ConfigSchema) -> None:
formatted = datetime.now().strftime(config.output_path_format)
output_path = Path(formatted)
# force always to use a new directory to avoid overwriting existing ones
output_path.mkdir(parents=True, exist_ok=False)
config.output_path = output_path.as_posix()
|
normal
|
{
"blob_id": "d8fb5aeb5453b986cc698165749992e4a7677257",
"index": 1506,
"step-1": "<mask token>\n\n\ndef prepare_output_directory(config: ConfigSchema) ->None:\n formatted = datetime.now().strftime(config.output_path_format)\n output_path = Path(formatted)\n output_path.mkdir(parents=True, exist_ok=False)\n config.output_path = output_path.as_posix()\n",
"step-2": "<mask token>\n\n\ndef log_basic_info(logger: Logger, config: ConfigSchema):\n logger.info('Experiment: {}'.format(config.experiment_name))\n logger.info('- PyTorch version: {}'.format(torch.__version__))\n logger.info('- Ignite version: {}'.format(ignite.__version__))\n logger.info('\\n')\n logger.info('Configuration:')\n for line in OmegaConf.to_yaml(config).split('\\n'):\n logger.info('\\t' + line)\n logger.info('\\n')\n if idist.get_world_size() > 1:\n logger.info('\\nDistributed setting:')\n logger.info('\\tbackend: {}'.format(idist.backend()))\n logger.info('\\tworld size: {}'.format(idist.get_world_size()))\n logger.info('\\n')\n\n\ndef prepare_output_directory(config: ConfigSchema) ->None:\n formatted = datetime.now().strftime(config.output_path_format)\n output_path = Path(formatted)\n output_path.mkdir(parents=True, exist_ok=False)\n config.output_path = output_path.as_posix()\n",
"step-3": "<mask token>\n\n\ndef log_metrics(logger: Logger, epoch: int, elapsed: float, tag: str,\n metrics: Dict[str, float]):\n logger.info('Epoch {} - elapsed: {:.5f} - {} metrics: {}'.format(epoch,\n elapsed, tag, ', '.join(['{}: {}'.format(k, v) for k, v in metrics.\n items()])))\n\n\ndef log_basic_info(logger: Logger, config: ConfigSchema):\n logger.info('Experiment: {}'.format(config.experiment_name))\n logger.info('- PyTorch version: {}'.format(torch.__version__))\n logger.info('- Ignite version: {}'.format(ignite.__version__))\n logger.info('\\n')\n logger.info('Configuration:')\n for line in OmegaConf.to_yaml(config).split('\\n'):\n logger.info('\\t' + line)\n logger.info('\\n')\n if idist.get_world_size() > 1:\n logger.info('\\nDistributed setting:')\n logger.info('\\tbackend: {}'.format(idist.backend()))\n logger.info('\\tworld size: {}'.format(idist.get_world_size()))\n logger.info('\\n')\n\n\ndef prepare_output_directory(config: ConfigSchema) ->None:\n formatted = datetime.now().strftime(config.output_path_format)\n output_path = Path(formatted)\n output_path.mkdir(parents=True, exist_ok=False)\n config.output_path = output_path.as_posix()\n",
"step-4": "from datetime import datetime\nfrom logging import Logger\nfrom pathlib import Path\nfrom typing import Dict\nimport ignite\nimport ignite.distributed as idist\nimport torch\nfrom omegaconf import OmegaConf\nfrom config_schema import ConfigSchema\n\n\ndef log_metrics(logger: Logger, epoch: int, elapsed: float, tag: str,\n metrics: Dict[str, float]):\n logger.info('Epoch {} - elapsed: {:.5f} - {} metrics: {}'.format(epoch,\n elapsed, tag, ', '.join(['{}: {}'.format(k, v) for k, v in metrics.\n items()])))\n\n\ndef log_basic_info(logger: Logger, config: ConfigSchema):\n logger.info('Experiment: {}'.format(config.experiment_name))\n logger.info('- PyTorch version: {}'.format(torch.__version__))\n logger.info('- Ignite version: {}'.format(ignite.__version__))\n logger.info('\\n')\n logger.info('Configuration:')\n for line in OmegaConf.to_yaml(config).split('\\n'):\n logger.info('\\t' + line)\n logger.info('\\n')\n if idist.get_world_size() > 1:\n logger.info('\\nDistributed setting:')\n logger.info('\\tbackend: {}'.format(idist.backend()))\n logger.info('\\tworld size: {}'.format(idist.get_world_size()))\n logger.info('\\n')\n\n\ndef prepare_output_directory(config: ConfigSchema) ->None:\n formatted = datetime.now().strftime(config.output_path_format)\n output_path = Path(formatted)\n output_path.mkdir(parents=True, exist_ok=False)\n config.output_path = output_path.as_posix()\n",
"step-5": "from datetime import datetime\nfrom logging import Logger\nfrom pathlib import Path\nfrom typing import Dict\n\nimport ignite\nimport ignite.distributed as idist\nimport torch\nfrom omegaconf import OmegaConf\n\nfrom config_schema import ConfigSchema\n\n\ndef log_metrics(\n logger: Logger, epoch: int, elapsed: float, tag: str, metrics: Dict[str, float]\n):\n logger.info(\n \"Epoch {} - elapsed: {:.5f} - {} metrics: {}\".format(\n epoch,\n elapsed,\n tag,\n \", \".join([\"{}: {}\".format(k, v) for k, v in metrics.items()]),\n )\n )\n\n\ndef log_basic_info(logger: Logger, config: ConfigSchema):\n logger.info(\"Experiment: {}\".format(config.experiment_name))\n logger.info(\"- PyTorch version: {}\".format(torch.__version__))\n logger.info(\"- Ignite version: {}\".format(ignite.__version__))\n\n logger.info(\"\\n\")\n logger.info(\"Configuration:\")\n for line in OmegaConf.to_yaml(config).split(\"\\n\"):\n logger.info(\"\\t\" + line)\n logger.info(\"\\n\")\n\n if idist.get_world_size() > 1:\n logger.info(\"\\nDistributed setting:\")\n logger.info(\"\\tbackend: {}\".format(idist.backend()))\n logger.info(\"\\tworld size: {}\".format(idist.get_world_size()))\n logger.info(\"\\n\")\n\n\ndef prepare_output_directory(config: ConfigSchema) -> None:\n formatted = datetime.now().strftime(config.output_path_format)\n\n output_path = Path(formatted)\n # force always to use a new directory to avoid overwriting existing ones\n output_path.mkdir(parents=True, exist_ok=False)\n config.output_path = output_path.as_posix()\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
__author__ = 'simsun'
|
normal
|
{
"blob_id": "2b746d89d34435eb5f3a5b04da61c5cc88178852",
"index": 8784,
"step-1": "<mask token>\n",
"step-2": "__author__ = 'simsun'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from com.kakao.cafe.menu.tea.milkTea import MilkTea
class MatchaMilkTea(MilkTea):
def __init__(self):
super().__init__()
self.__matcha = 1
self.__condensedMilk = 1
self.name = "MatchaMilkTea"
self.__price = 4500
self.__milk = 400
self.__blackTea = 2
def getName(self) -> str:
return self.name
def setName(self, name: str) -> None:
self.name = name
def getPrice(self) -> int:
return self.__price
def setPrice(self, price: int) -> None:
self.__price = price
def isIced(self) -> bool:
return self.iced
def setIced(self, iced: bool) -> None:
self._iced = iced
def getWater(self) -> int:
pass
def setWater(self, water: int) -> None:
pass
def getMilk(self) -> int:
return self.__milk
def setMilk(self, milk: int) -> None:
self.__milk = milk
def getBlackTea(self) -> int:
return self.__blackTea
def setBlackTea(self, blacktea: int) -> None:
self.__blackTea = blacktea
def getMatcha(self) -> int:
return self.__matcha
def setMatcha(self, matcha: int) -> None:
self.__matcha = matcha
def getCondensedMilk(self) -> int:
return self.__condensedMilk
def setCondensedMilk(self, condensedMilk: int) -> None:
self.__condensedMilk = condensedMilk
def addBlackTea(self, amount: int) -> None:
self.setBlackTea(self.getBlackTea() + amount)
self.setPrice(self.getPrice() + amount * 500)
def subBlackTea(self, amount: int) -> None:
if amount > self.__blackTea:
raise ValueError
print("You can't subtract more blacktea.")
else:
self.setBlackTea(self.getBlackTea() - amount)
def addMatcha(self, amount: int) -> None:
self.setMatcha(self.getMatcha() + amount)
self.setPrice(self.getPrice() + amount * 400)
def addCondensedMilk(self, amount: int) -> None:
self.setCondensedMilk(self.getCondensedMilk() + amount)
self.setPrice(self.getPrice() + amount * 500)
|
normal
|
{
"blob_id": "96b113678a3453520cd2e62eb11efd9582710409",
"index": 2087,
"step-1": "<mask token>\n\n\nclass MatchaMilkTea(MilkTea):\n <mask token>\n\n def getName(self) ->str:\n return self.name\n <mask token>\n <mask token>\n\n def setPrice(self, price: int) ->None:\n self.__price = price\n <mask token>\n\n def setIced(self, iced: bool) ->None:\n self._iced = iced\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def getMatcha(self) ->int:\n return self.__matcha\n\n def setMatcha(self, matcha: int) ->None:\n self.__matcha = matcha\n <mask token>\n\n def setCondensedMilk(self, condensedMilk: int) ->None:\n self.__condensedMilk = condensedMilk\n <mask token>\n\n def subBlackTea(self, amount: int) ->None:\n if amount > self.__blackTea:\n raise ValueError\n print(\"You can't subtract more blacktea.\")\n else:\n self.setBlackTea(self.getBlackTea() - amount)\n <mask token>\n\n def addCondensedMilk(self, amount: int) ->None:\n self.setCondensedMilk(self.getCondensedMilk() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n",
"step-2": "<mask token>\n\n\nclass MatchaMilkTea(MilkTea):\n\n def __init__(self):\n super().__init__()\n self.__matcha = 1\n self.__condensedMilk = 1\n self.name = 'MatchaMilkTea'\n self.__price = 4500\n self.__milk = 400\n self.__blackTea = 2\n\n def getName(self) ->str:\n return self.name\n <mask token>\n\n def getPrice(self) ->int:\n return self.__price\n\n def setPrice(self, price: int) ->None:\n self.__price = price\n <mask token>\n\n def setIced(self, iced: bool) ->None:\n self._iced = iced\n\n def getWater(self) ->int:\n pass\n\n def setWater(self, water: int) ->None:\n pass\n\n def getMilk(self) ->int:\n return self.__milk\n <mask token>\n\n def getBlackTea(self) ->int:\n return self.__blackTea\n <mask token>\n\n def getMatcha(self) ->int:\n return self.__matcha\n\n def setMatcha(self, matcha: int) ->None:\n self.__matcha = matcha\n <mask token>\n\n def setCondensedMilk(self, condensedMilk: int) ->None:\n self.__condensedMilk = condensedMilk\n <mask token>\n\n def subBlackTea(self, amount: int) ->None:\n if amount > self.__blackTea:\n raise ValueError\n print(\"You can't subtract more blacktea.\")\n else:\n self.setBlackTea(self.getBlackTea() - amount)\n <mask token>\n\n def addCondensedMilk(self, amount: int) ->None:\n self.setCondensedMilk(self.getCondensedMilk() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n",
"step-3": "<mask token>\n\n\nclass MatchaMilkTea(MilkTea):\n\n def __init__(self):\n super().__init__()\n self.__matcha = 1\n self.__condensedMilk = 1\n self.name = 'MatchaMilkTea'\n self.__price = 4500\n self.__milk = 400\n self.__blackTea = 2\n\n def getName(self) ->str:\n return self.name\n\n def setName(self, name: str) ->None:\n self.name = name\n\n def getPrice(self) ->int:\n return self.__price\n\n def setPrice(self, price: int) ->None:\n self.__price = price\n <mask token>\n\n def setIced(self, iced: bool) ->None:\n self._iced = iced\n\n def getWater(self) ->int:\n pass\n\n def setWater(self, water: int) ->None:\n pass\n\n def getMilk(self) ->int:\n return self.__milk\n\n def setMilk(self, milk: int) ->None:\n self.__milk = milk\n\n def getBlackTea(self) ->int:\n return self.__blackTea\n\n def setBlackTea(self, blacktea: int) ->None:\n self.__blackTea = blacktea\n\n def getMatcha(self) ->int:\n return self.__matcha\n\n def setMatcha(self, matcha: int) ->None:\n self.__matcha = matcha\n <mask token>\n\n def setCondensedMilk(self, condensedMilk: int) ->None:\n self.__condensedMilk = condensedMilk\n\n def addBlackTea(self, amount: int) ->None:\n self.setBlackTea(self.getBlackTea() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n\n def subBlackTea(self, amount: int) ->None:\n if amount > self.__blackTea:\n raise ValueError\n print(\"You can't subtract more blacktea.\")\n else:\n self.setBlackTea(self.getBlackTea() - amount)\n\n def addMatcha(self, amount: int) ->None:\n self.setMatcha(self.getMatcha() + amount)\n self.setPrice(self.getPrice() + amount * 400)\n\n def addCondensedMilk(self, amount: int) ->None:\n self.setCondensedMilk(self.getCondensedMilk() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n",
"step-4": "from com.kakao.cafe.menu.tea.milkTea import MilkTea\n\n\nclass MatchaMilkTea(MilkTea):\n\n def __init__(self):\n super().__init__()\n self.__matcha = 1\n self.__condensedMilk = 1\n self.name = 'MatchaMilkTea'\n self.__price = 4500\n self.__milk = 400\n self.__blackTea = 2\n\n def getName(self) ->str:\n return self.name\n\n def setName(self, name: str) ->None:\n self.name = name\n\n def getPrice(self) ->int:\n return self.__price\n\n def setPrice(self, price: int) ->None:\n self.__price = price\n\n def isIced(self) ->bool:\n return self.iced\n\n def setIced(self, iced: bool) ->None:\n self._iced = iced\n\n def getWater(self) ->int:\n pass\n\n def setWater(self, water: int) ->None:\n pass\n\n def getMilk(self) ->int:\n return self.__milk\n\n def setMilk(self, milk: int) ->None:\n self.__milk = milk\n\n def getBlackTea(self) ->int:\n return self.__blackTea\n\n def setBlackTea(self, blacktea: int) ->None:\n self.__blackTea = blacktea\n\n def getMatcha(self) ->int:\n return self.__matcha\n\n def setMatcha(self, matcha: int) ->None:\n self.__matcha = matcha\n\n def getCondensedMilk(self) ->int:\n return self.__condensedMilk\n\n def setCondensedMilk(self, condensedMilk: int) ->None:\n self.__condensedMilk = condensedMilk\n\n def addBlackTea(self, amount: int) ->None:\n self.setBlackTea(self.getBlackTea() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n\n def subBlackTea(self, amount: int) ->None:\n if amount > self.__blackTea:\n raise ValueError\n print(\"You can't subtract more blacktea.\")\n else:\n self.setBlackTea(self.getBlackTea() - amount)\n\n def addMatcha(self, amount: int) ->None:\n self.setMatcha(self.getMatcha() + amount)\n self.setPrice(self.getPrice() + amount * 400)\n\n def addCondensedMilk(self, amount: int) ->None:\n self.setCondensedMilk(self.getCondensedMilk() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n",
"step-5": "from com.kakao.cafe.menu.tea.milkTea import MilkTea\n\n\nclass MatchaMilkTea(MilkTea):\n def __init__(self):\n super().__init__()\n\n self.__matcha = 1\n self.__condensedMilk = 1\n self.name = \"MatchaMilkTea\"\n self.__price = 4500\n self.__milk = 400\n self.__blackTea = 2\n\n def getName(self) -> str:\n return self.name\n\n def setName(self, name: str) -> None:\n self.name = name\n\n def getPrice(self) -> int:\n return self.__price\n\n def setPrice(self, price: int) -> None:\n self.__price = price\n\n def isIced(self) -> bool:\n return self.iced\n\n def setIced(self, iced: bool) -> None:\n self._iced = iced\n\n def getWater(self) -> int:\n pass\n\n def setWater(self, water: int) -> None:\n pass\n\n def getMilk(self) -> int:\n return self.__milk\n\n def setMilk(self, milk: int) -> None:\n self.__milk = milk\n\n def getBlackTea(self) -> int:\n return self.__blackTea\n\n def setBlackTea(self, blacktea: int) -> None:\n self.__blackTea = blacktea\n\n def getMatcha(self) -> int:\n return self.__matcha\n\n def setMatcha(self, matcha: int) -> None:\n self.__matcha = matcha\n\n def getCondensedMilk(self) -> int:\n return self.__condensedMilk\n\n def setCondensedMilk(self, condensedMilk: int) -> None:\n self.__condensedMilk = condensedMilk\n\n def addBlackTea(self, amount: int) -> None:\n self.setBlackTea(self.getBlackTea() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n\n def subBlackTea(self, amount: int) -> None:\n if amount > self.__blackTea:\n raise ValueError\n print(\"You can't subtract more blacktea.\")\n else:\n self.setBlackTea(self.getBlackTea() - amount)\n\n def addMatcha(self, amount: int) -> None:\n self.setMatcha(self.getMatcha() + amount)\n self.setPrice(self.getPrice() + amount * 400)\n\n def addCondensedMilk(self, amount: int) -> None:\n self.setCondensedMilk(self.getCondensedMilk() + amount)\n self.setPrice(self.getPrice() + amount * 500)\n",
"step-ids": [
9,
15,
20,
23,
24
]
}
|
[
9,
15,
20,
23,
24
] |
ALPACA_KEY = 'Enter your apaca key here'
ALPACA_SECRET_KEY = 'Enter your apaca secret key here'
ALPACA_MARKET = 'enter alpaca market link here'
TWILIO_KEY = 'enter your twilio key here'
TWILIO_SECRET_KEY = 'enter your twilio secret key here'
YOUR_PHONE_NUMBER = 'Enter your phone number'
YOUR_TWILIO_NUMBER = 'Enter your twilio phone number'
|
normal
|
{
"blob_id": "10cb4b59d1e1e823c56ae5ceea0514b1c1904292",
"index": 3769,
"step-1": "<mask token>\n",
"step-2": "ALPACA_KEY = 'Enter your apaca key here'\nALPACA_SECRET_KEY = 'Enter your apaca secret key here'\nALPACA_MARKET = 'enter alpaca market link here'\nTWILIO_KEY = 'enter your twilio key here'\nTWILIO_SECRET_KEY = 'enter your twilio secret key here'\nYOUR_PHONE_NUMBER = 'Enter your phone number'\nYOUR_TWILIO_NUMBER = 'Enter your twilio phone number'\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
class Solution:
def uniquePaths(self, A, B):
# A - rows
# B - columns
if A == 0 or B == 0:
return 0
grid = [[1 for _ in range(B)] for _ in range(A)]
for i in range(1, A):
for j in range(1, B):
grid[i][j] = grid[i-1][j] + grid[i][j-1]
return grid[A-1][B-1]
s = Solution()
print s.uniquePath(2, 2)
|
normal
|
{
"blob_id": "027e53d69cfece0672556e34fa901412e483bc3e",
"index": 8805,
"step-1": "class Solution:\n\n def uniquePaths(self, A, B):\n # A - rows\n # B - columns\n if A == 0 or B == 0:\n return 0\n\n grid = [[1 for _ in range(B)] for _ in range(A)]\n\n for i in range(1, A):\n for j in range(1, B):\n grid[i][j] = grid[i-1][j] + grid[i][j-1]\n\n return grid[A-1][B-1]\n\n\ns = Solution()\n\nprint s.uniquePath(2, 2)",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from flask import Flask, render_template, jsonify, request, make_response #BSD License
import requests #Apache 2.0
#StdLibs
import json
from os import path
import csv
###################################################
#Programmato da Alex Prosdocimo e Matteo Mirandola#
###################################################
application = Flask(__name__)
@application.route("/") # Index
def index():
return make_response(render_template("index.html"))
@application.route("/getGraph", methods=["POST", "GET"])
def getgraph():
#Metodo POST: responsabile di ottnere i dati in formato json dal server.
#Il server si aspetta un campo data che contenga il nome di un file esistente nel server nella cartella /static/json/
#Se non trova il file da un 404
#Se non trova il campo data da un 400
if request.method == "POST":
if('data' in request.form):
if(path.exists("static/jsons/" + request.form['data'] + ".json")):
with open("static/jsons/" + request.form['data'] + ".json", "r") as file:
jsonStr = file.read()
jsonStr = json.loads(jsonStr)
return jsonify(jsonStr)
else:
return "<h1>404 NOT FOUND"
else:
return "<h1>400 BAD REQUEST"
else:
#Metodo GET:
#si aspetta un campo graph che contenga uno dei nomi sotto presenti
#nel caso di mf e emig si aspetta anche un secondo campo che specifichi
#l'università o la provincia-
#Inoltre, iscrittiAtn e mf POSSONO (ma non devono necessariamente) avere
#un campo aggiuntivo che filtri i dati di uno specifico anno o per uno specifico sesso2
if 'graph' in request.args:
# HBar Graph per la paga oraria provinciale a seconda del livello di istruzione
if(request.args['graph'] == "pagaOra"):
return make_response(render_template("graphs/pagaOra.html"))
# Line Graph per gli iscritti alle università nel veneto per anno
elif(request.args['graph'] == "iscrittiAtn"):
if('sex' in request.args):
return make_response(render_template("graphs/iscrittiAtn.html", sex=int(request.args['sex'])))
else:
return make_response(render_template("graphs/iscrittiAtn.html", sex=0))
elif(request.args['graph'] == "disoccupati"):
return make_response(render_template("graphs/disoccupatiGraph.html"))
elif(request.args['graph'] == "iscrittiProv"):
return make_response(render_template("graphs/iscrittiProv.html"))
# Donut Graph per la distribuzione di m/f nelle università in veneto
elif(request.args['graph'] == "mf" and 'atn' in request.args):
dir = "graphs/mf/mf" + request.args['atn'] + ".html"
print(dir)
if(path.exists("templates/" + dir)):
if('year' in request.args):
return make_response(render_template(dir, year=int(request.args['year'])))
else:
return make_response(render_template(dir, year=0))
# Polar Area Graph per gli studenti emigrati in altre regioni
elif(request.args['graph'] == "emig" and "prov" in request.args):
dir = "graphs/emig/iscrittiEmig" + \
request.args['prov'] + ".html"
if(path.exists("templates/" + dir)):
return make_response(render_template(dir))
return "<h1>400 BAD REQUEST"
#Per aggiornare i dataset:
#A causa di un errore nella creazione del file riguardante gli iscritti per ogni ateneo da parte del MIUR il file
#riguardante gli iscritti per ateneo non sono scaricabili dinamicamente e va sostituito manualmente.
#Allo stesso modo, i dati ottenuti tramite l'istat non sono scaricabili dinamicamente tramite la api in quanto
#le sue prestazioni sono limitate (oltre a non permettere i filtri necessari per ottenere i file).
#Il dataset delle provincie viene aggiornato automaticamente ogni settimana. Gli altri vanno sostituiti manualmente.
#I dataset statici vanno inseriti nella cartella /static/notUpdating/
#Il dataset riguardante gli iscritti per ateneo va scaricato a questo link http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/32d26e28-a0b5-45f3-9152-6072164f3e63/download/iscrittixateneo.csv
#e rinominato iscrittiAteneo.csv
#Il dataset riguardante gli iscritti emigrati dalla regione è stato creato manualmente a partire da altri dati e non può essere aggiornato
#I dataset riguardanti la percentuale di disoccupazione e la retribuzione oraria media sono reperibili a questo portale http://dati.istat.it/
#Sfortunatamente la funzione di ricerca del sito è molto lenta e limitata, comunque sia i due data set sono "Tasso di Disoccupazione - Dati Provinciali"
#e "Retribuzione oraria media per titolo di studio". In entrambi i casi, è necessario filtrare i risultati per le sole provincie del Veneto.
#I file vanno rinominati retribuzioneMedia.csv e taxDisocc.csv
#Fortunatamente, si aggiornano solo annualmente
@application.route("/doUpdate")
def updateData():
#File iscritti per ateneo
#I dati vengono inseriti in un dizionario come array, il formato è più sotto
with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiAteneo = {
'Venezia CF': [],
'Verona': [],
'Venezia IUAV': [],
'Padova': []}
for row in data:
row = row[0].split(';')
if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1] == 'Venezia Iuav' or row[1] == 'Verona':
tmp = row[1]
if 'Venezia C' in row[1]:
tmp = 'Venezia CF'
if tmp == 'Venezia Iuav':
tmp = 'Venezia IUAV'
iscrittiAteneo[tmp].append(
row[0] + ';' + row[3] + ';' + row[4])
iscrittiAteneoJson = json.dumps(iscrittiAteneo)
# Formato: {"nomeAteneo" : ["annoScolastico;numeroIscrittiMaschi;numeroIscrittiFemmine",...,...],...,...}
open('static/jsons/iscrittiAteneo.json',
"wb").write(iscrittiAteneoJson.encode())
# File iscritti emigrati in altre regioni
with open('static/notUpdating/iscrittiEmig.json', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = json.load(f)
iscrittiEmig = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in reader['records']:
if row[4].lower() == 'padova' or row[4].lower() == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower() == 'verona' or row[4].lower() == 'treviso' or row[4].lower() == 'belluno' or row[4].lower() == 'rovigo':
iscrittiEmig[row[4].lower()].append(
row[1] + ';' + row[4] + ';' + row[2] + ';' + str(row[6]))
lista = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []
}
count = 0
for key in iscrittiEmig.keys():
while len(iscrittiEmig[key]) > 2:
tmp = iscrittiEmig[key].pop(0).split(';')
if count == 0:
count = int(tmp[3])
tmp2 = iscrittiEmig[key][0].split(';')[2]
if tmp[2] == tmp2:
count += int(tmp[3])
else:
lista[tmp[1].lower()].append(
tmp[0] + ';' + tmp[2] + ';' + str(count))
count = 0
iscrittiEmigJson = json.dumps(lista)
# Formato: {"cittàInMinuscolo" : ["annoScolastico;CittàDiProvenienzaInMaiuscolo;RegioneDiEsodo;NumeroStudenti",...,...],...,...}
open('static/jsons/iscrittiEmig.json',
"wb").write(iscrittiEmigJson.encode())
# File paga media oraria per titolo di studio
with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
retribuzione = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] == 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or row[1] == 'Belluno' or row[1] == 'Rovigo') and (row[5] != 'totale') and 'media)' in row[3]:
# La lista è divisa in titolo di studio, reddito medio orario
tmp = row[5]
if 'nessun' in tmp:
tmp = 'nessuno'
retribuzione[row[1]].append(tmp + ';' + str(row[8]))
retribuzioneMediaJson = json.dumps(retribuzione)
# Formato: {"nomeCittà" : ["laurea;media", "diploma;media", "nulla;media"],...,...}
open('static/jsons/retribuzioneMedia.json',
"wb").write(retribuzioneMediaJson.encode())
# File %disoccupazione
with open('static/notUpdating/taxDisocc.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
reader = csv.reader(f)
data = list(reader)[1:]
lavoro = {
'Vicenza': [],
'Verona': [],
'Venezia': [],
'Padova': [],
'Treviso': [],
'Belluno': [],
'Rovigo': []}
for row in data:
if (row[7] == '15-24 anni') and row[5] != 'totale':
if row[5] == 'femmine':
lavoro[row[1]].append(str(row[10]))
else:
lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))
for key in lavoro.keys():
tmp = lavoro[key][0] + ';' + lavoro[key][2]
tmp2 = lavoro[key][1] + ';' + lavoro[key][3]
lavoro[key].clear()
lavoro[key].append(tmp)
lavoro[key].append(tmp2)
disoccupazioneJson = json.dumps(lavoro)
# Formato: {"nomeCittà" : ["anno;percMaschi;percFemmine","anno;percMaschi;percFemmine"x],...,...}
open('static/jsons/disoccupazione.json',
"wb").write(disoccupazioneJson.encode())
# File iscritti totali per provincia
iscritti = requests.get(
'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv', allow_redirects=True)
open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content) #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto
with open('static/iscrittiProvincia.csv', newline='') as f:
reader = csv.reader(f)
data = list(reader)[1:]
iscrittiProvincia = {
'vicenza': [],
'verona': [],
'venezia': [],
'padova': [],
'treviso': [],
'belluno': [],
'rovigo': []}
for row in data:
row = row[0].split(';')
if row[2].lower() == 'padova' or row[2].lower() == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower() == 'verona' or row[2].lower() == 'treviso' or row[2].lower() == 'belluno' or row[2].lower() == 'rovigo':
iscrittiProvincia[row[2].lower()].append(
str(row[0]) + ';' + str(int(row[3])+int(row[4])))
iscrittiProvinciaJson = json.dumps(iscrittiProvincia)
# Formato: {"nomeCittà" : ["anno;numero"],...,...}
open('static/jsons/iscrittiProvincia.json',
"wb").write(iscrittiProvinciaJson.encode())
return "200"
#########
#Startup#
#########
#Ad ogni riavvio forzato dell'applicazione, i dati vengono aggiornati (ci impiega qualche secondo al maassimo)
updateData()
if __name__ == '__main__':
application.run(debug=True, port=80)
|
normal
|
{
"blob_id": "14b9927435536a4b29b0930791ab4525acd80bc9",
"index": 5783,
"step-1": "<mask token>\n\n\[email protected]('/')\ndef index():\n return make_response(render_template('index.html'))\n\n\[email protected]('/getGraph', methods=['POST', 'GET'])\ndef getgraph():\n if request.method == 'POST':\n if 'data' in request.form:\n if path.exists('static/jsons/' + request.form['data'] + '.json'):\n with open('static/jsons/' + request.form['data'] + '.json', 'r'\n ) as file:\n jsonStr = file.read()\n jsonStr = json.loads(jsonStr)\n return jsonify(jsonStr)\n else:\n return '<h1>404 NOT FOUND'\n else:\n return '<h1>400 BAD REQUEST'\n elif 'graph' in request.args:\n if request.args['graph'] == 'pagaOra':\n return make_response(render_template('graphs/pagaOra.html'))\n elif request.args['graph'] == 'iscrittiAtn':\n if 'sex' in request.args:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))\n else:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=0))\n elif request.args['graph'] == 'disoccupati':\n return make_response(render_template(\n 'graphs/disoccupatiGraph.html'))\n elif request.args['graph'] == 'iscrittiProv':\n return make_response(render_template('graphs/iscrittiProv.html'))\n elif request.args['graph'] == 'mf' and 'atn' in request.args:\n dir = 'graphs/mf/mf' + request.args['atn'] + '.html'\n print(dir)\n if path.exists('templates/' + dir):\n if 'year' in request.args:\n return make_response(render_template(dir, year=int(\n request.args['year'])))\n else:\n return make_response(render_template(dir, year=0))\n elif request.args['graph'] == 'emig' and 'prov' in request.args:\n dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'\n if path.exists('templates/' + dir):\n return make_response(render_template(dir))\n return '<h1>400 BAD REQUEST'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/')\ndef index():\n return make_response(render_template('index.html'))\n\n\[email protected]('/getGraph', methods=['POST', 'GET'])\ndef getgraph():\n if request.method == 'POST':\n if 'data' in request.form:\n if path.exists('static/jsons/' + request.form['data'] + '.json'):\n with open('static/jsons/' + request.form['data'] + '.json', 'r'\n ) as file:\n jsonStr = file.read()\n jsonStr = json.loads(jsonStr)\n return jsonify(jsonStr)\n else:\n return '<h1>404 NOT FOUND'\n else:\n return '<h1>400 BAD REQUEST'\n elif 'graph' in request.args:\n if request.args['graph'] == 'pagaOra':\n return make_response(render_template('graphs/pagaOra.html'))\n elif request.args['graph'] == 'iscrittiAtn':\n if 'sex' in request.args:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))\n else:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=0))\n elif request.args['graph'] == 'disoccupati':\n return make_response(render_template(\n 'graphs/disoccupatiGraph.html'))\n elif request.args['graph'] == 'iscrittiProv':\n return make_response(render_template('graphs/iscrittiProv.html'))\n elif request.args['graph'] == 'mf' and 'atn' in request.args:\n dir = 'graphs/mf/mf' + request.args['atn'] + '.html'\n print(dir)\n if path.exists('templates/' + dir):\n if 'year' in request.args:\n return make_response(render_template(dir, year=int(\n request.args['year'])))\n else:\n return make_response(render_template(dir, year=0))\n elif request.args['graph'] == 'emig' and 'prov' in request.args:\n dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'\n if path.exists('templates/' + dir):\n return make_response(render_template(dir))\n return '<h1>400 BAD REQUEST'\n\n\[email protected]('/doUpdate')\ndef updateData():\n with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiAteneo = {'Venezia CF': [], 'Verona': [], 'Venezia IUAV': [\n ], 'Padova': []}\n for row in data:\n row = row[0].split(';')\n if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1\n ] == 'Venezia Iuav' or row[1] == 'Verona':\n tmp = row[1]\n if 'Venezia C' in row[1]:\n tmp = 'Venezia CF'\n if tmp == 'Venezia Iuav':\n tmp = 'Venezia IUAV'\n iscrittiAteneo[tmp].append(row[0] + ';' + row[3] + ';' + row[4]\n )\n iscrittiAteneoJson = json.dumps(iscrittiAteneo)\n open('static/jsons/iscrittiAteneo.json', 'wb').write(iscrittiAteneoJson\n .encode())\n with open('static/notUpdating/iscrittiEmig.json', newline='') as f:\n reader = json.load(f)\n iscrittiEmig = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in reader['records']:\n if row[4].lower() == 'padova' or row[4].lower(\n ) == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower(\n ) == 'verona' or row[4].lower() == 'treviso' or row[4].lower(\n ) == 'belluno' or row[4].lower() == 'rovigo':\n iscrittiEmig[row[4].lower()].append(row[1] + ';' + row[4] +\n ';' + row[2] + ';' + str(row[6]))\n lista = {'vicenza': [], 'verona': [], 'venezia': [], 'padova': [],\n 'treviso': [], 'belluno': [], 'rovigo': []}\n count = 0\n for key in iscrittiEmig.keys():\n while len(iscrittiEmig[key]) > 2:\n tmp = iscrittiEmig[key].pop(0).split(';')\n if count == 0:\n count = int(tmp[3])\n tmp2 = iscrittiEmig[key][0].split(';')[2]\n if tmp[2] == tmp2:\n count += int(tmp[3])\n else:\n lista[tmp[1].lower()].append(tmp[0] + ';' + tmp[2] +\n ';' + str(count))\n count = 0\n iscrittiEmigJson = json.dumps(lista)\n open('static/jsons/iscrittiEmig.json', 'wb').write(iscrittiEmigJson.\n encode())\n with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n retribuzione = {'Vicenza': [], 'Verona': [], 'Venezia': [],\n 'Padova': [], 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] ==\n 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or \n row[1] == 'Belluno' or row[1] == 'Rovigo') and row[5\n ] != 'totale' and 'media)' in row[3]:\n tmp = row[5]\n if 'nessun' in tmp:\n tmp = 'nessuno'\n retribuzione[row[1]].append(tmp + ';' + str(row[8]))\n retribuzioneMediaJson = json.dumps(retribuzione)\n open('static/jsons/retribuzioneMedia.json', 'wb').write(\n retribuzioneMediaJson.encode())\n with open('static/notUpdating/taxDisocc.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n lavoro = {'Vicenza': [], 'Verona': [], 'Venezia': [], 'Padova': [],\n 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if row[7] == '15-24 anni' and row[5] != 'totale':\n if row[5] == 'femmine':\n lavoro[row[1]].append(str(row[10]))\n else:\n lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))\n for key in lavoro.keys():\n tmp = lavoro[key][0] + ';' + lavoro[key][2]\n tmp2 = lavoro[key][1] + ';' + lavoro[key][3]\n lavoro[key].clear()\n lavoro[key].append(tmp)\n lavoro[key].append(tmp2)\n disoccupazioneJson = json.dumps(lavoro)\n open('static/jsons/disoccupazione.json', 'wb').write(disoccupazioneJson\n .encode())\n iscritti = requests.get(\n 'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv'\n , allow_redirects=True)\n open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content)\n with open('static/iscrittiProvincia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiProvincia = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in data:\n row = row[0].split(';')\n if row[2].lower() == 'padova' or row[2].lower(\n ) == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower(\n ) == 'verona' or row[2].lower() == 'treviso' or row[2].lower(\n ) == 'belluno' or row[2].lower() == 'rovigo':\n iscrittiProvincia[row[2].lower()].append(str(row[0]) + ';' +\n str(int(row[3]) + int(row[4])))\n iscrittiProvinciaJson = json.dumps(iscrittiProvincia)\n open('static/jsons/iscrittiProvincia.json', 'wb').write(\n iscrittiProvinciaJson.encode())\n return '200'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\[email protected]('/')\ndef index():\n return make_response(render_template('index.html'))\n\n\[email protected]('/getGraph', methods=['POST', 'GET'])\ndef getgraph():\n if request.method == 'POST':\n if 'data' in request.form:\n if path.exists('static/jsons/' + request.form['data'] + '.json'):\n with open('static/jsons/' + request.form['data'] + '.json', 'r'\n ) as file:\n jsonStr = file.read()\n jsonStr = json.loads(jsonStr)\n return jsonify(jsonStr)\n else:\n return '<h1>404 NOT FOUND'\n else:\n return '<h1>400 BAD REQUEST'\n elif 'graph' in request.args:\n if request.args['graph'] == 'pagaOra':\n return make_response(render_template('graphs/pagaOra.html'))\n elif request.args['graph'] == 'iscrittiAtn':\n if 'sex' in request.args:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))\n else:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=0))\n elif request.args['graph'] == 'disoccupati':\n return make_response(render_template(\n 'graphs/disoccupatiGraph.html'))\n elif request.args['graph'] == 'iscrittiProv':\n return make_response(render_template('graphs/iscrittiProv.html'))\n elif request.args['graph'] == 'mf' and 'atn' in request.args:\n dir = 'graphs/mf/mf' + request.args['atn'] + '.html'\n print(dir)\n if path.exists('templates/' + dir):\n if 'year' in request.args:\n return make_response(render_template(dir, year=int(\n request.args['year'])))\n else:\n return make_response(render_template(dir, year=0))\n elif request.args['graph'] == 'emig' and 'prov' in request.args:\n dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'\n if path.exists('templates/' + dir):\n return make_response(render_template(dir))\n return '<h1>400 BAD REQUEST'\n\n\[email protected]('/doUpdate')\ndef updateData():\n with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiAteneo = {'Venezia CF': [], 'Verona': [], 'Venezia IUAV': [\n ], 'Padova': []}\n for row in data:\n row = row[0].split(';')\n if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1\n ] == 'Venezia Iuav' or row[1] == 'Verona':\n tmp = row[1]\n if 'Venezia C' in row[1]:\n tmp = 'Venezia CF'\n if tmp == 'Venezia Iuav':\n tmp = 'Venezia IUAV'\n iscrittiAteneo[tmp].append(row[0] + ';' + row[3] + ';' + row[4]\n )\n iscrittiAteneoJson = json.dumps(iscrittiAteneo)\n open('static/jsons/iscrittiAteneo.json', 'wb').write(iscrittiAteneoJson\n .encode())\n with open('static/notUpdating/iscrittiEmig.json', newline='') as f:\n reader = json.load(f)\n iscrittiEmig = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in reader['records']:\n if row[4].lower() == 'padova' or row[4].lower(\n ) == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower(\n ) == 'verona' or row[4].lower() == 'treviso' or row[4].lower(\n ) == 'belluno' or row[4].lower() == 'rovigo':\n iscrittiEmig[row[4].lower()].append(row[1] + ';' + row[4] +\n ';' + row[2] + ';' + str(row[6]))\n lista = {'vicenza': [], 'verona': [], 'venezia': [], 'padova': [],\n 'treviso': [], 'belluno': [], 'rovigo': []}\n count = 0\n for key in iscrittiEmig.keys():\n while len(iscrittiEmig[key]) > 2:\n tmp = iscrittiEmig[key].pop(0).split(';')\n if count == 0:\n count = int(tmp[3])\n tmp2 = iscrittiEmig[key][0].split(';')[2]\n if tmp[2] == tmp2:\n count += int(tmp[3])\n else:\n lista[tmp[1].lower()].append(tmp[0] + ';' + tmp[2] +\n ';' + str(count))\n count = 0\n iscrittiEmigJson = json.dumps(lista)\n open('static/jsons/iscrittiEmig.json', 'wb').write(iscrittiEmigJson.\n encode())\n with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n retribuzione = {'Vicenza': [], 'Verona': [], 'Venezia': [],\n 'Padova': [], 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] ==\n 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or \n row[1] == 'Belluno' or row[1] == 'Rovigo') and row[5\n ] != 'totale' and 'media)' in row[3]:\n tmp = row[5]\n if 'nessun' in tmp:\n tmp = 'nessuno'\n retribuzione[row[1]].append(tmp + ';' + str(row[8]))\n retribuzioneMediaJson = json.dumps(retribuzione)\n open('static/jsons/retribuzioneMedia.json', 'wb').write(\n retribuzioneMediaJson.encode())\n with open('static/notUpdating/taxDisocc.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n lavoro = {'Vicenza': [], 'Verona': [], 'Venezia': [], 'Padova': [],\n 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if row[7] == '15-24 anni' and row[5] != 'totale':\n if row[5] == 'femmine':\n lavoro[row[1]].append(str(row[10]))\n else:\n lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))\n for key in lavoro.keys():\n tmp = lavoro[key][0] + ';' + lavoro[key][2]\n tmp2 = lavoro[key][1] + ';' + lavoro[key][3]\n lavoro[key].clear()\n lavoro[key].append(tmp)\n lavoro[key].append(tmp2)\n disoccupazioneJson = json.dumps(lavoro)\n open('static/jsons/disoccupazione.json', 'wb').write(disoccupazioneJson\n .encode())\n iscritti = requests.get(\n 'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv'\n , allow_redirects=True)\n open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content)\n with open('static/iscrittiProvincia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiProvincia = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in data:\n row = row[0].split(';')\n if row[2].lower() == 'padova' or row[2].lower(\n ) == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower(\n ) == 'verona' or row[2].lower() == 'treviso' or row[2].lower(\n ) == 'belluno' or row[2].lower() == 'rovigo':\n iscrittiProvincia[row[2].lower()].append(str(row[0]) + ';' +\n str(int(row[3]) + int(row[4])))\n iscrittiProvinciaJson = json.dumps(iscrittiProvincia)\n open('static/jsons/iscrittiProvincia.json', 'wb').write(\n iscrittiProvinciaJson.encode())\n return '200'\n\n\nupdateData()\nif __name__ == '__main__':\n application.run(debug=True, port=80)\n",
"step-4": "from flask import Flask, render_template, jsonify, request, make_response\nimport requests\nimport json\nfrom os import path\nimport csv\napplication = Flask(__name__)\n\n\[email protected]('/')\ndef index():\n return make_response(render_template('index.html'))\n\n\[email protected]('/getGraph', methods=['POST', 'GET'])\ndef getgraph():\n if request.method == 'POST':\n if 'data' in request.form:\n if path.exists('static/jsons/' + request.form['data'] + '.json'):\n with open('static/jsons/' + request.form['data'] + '.json', 'r'\n ) as file:\n jsonStr = file.read()\n jsonStr = json.loads(jsonStr)\n return jsonify(jsonStr)\n else:\n return '<h1>404 NOT FOUND'\n else:\n return '<h1>400 BAD REQUEST'\n elif 'graph' in request.args:\n if request.args['graph'] == 'pagaOra':\n return make_response(render_template('graphs/pagaOra.html'))\n elif request.args['graph'] == 'iscrittiAtn':\n if 'sex' in request.args:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=int(request.args['sex'])))\n else:\n return make_response(render_template(\n 'graphs/iscrittiAtn.html', sex=0))\n elif request.args['graph'] == 'disoccupati':\n return make_response(render_template(\n 'graphs/disoccupatiGraph.html'))\n elif request.args['graph'] == 'iscrittiProv':\n return make_response(render_template('graphs/iscrittiProv.html'))\n elif request.args['graph'] == 'mf' and 'atn' in request.args:\n dir = 'graphs/mf/mf' + request.args['atn'] + '.html'\n print(dir)\n if path.exists('templates/' + dir):\n if 'year' in request.args:\n return make_response(render_template(dir, year=int(\n request.args['year'])))\n else:\n return make_response(render_template(dir, year=0))\n elif request.args['graph'] == 'emig' and 'prov' in request.args:\n dir = 'graphs/emig/iscrittiEmig' + request.args['prov'] + '.html'\n if path.exists('templates/' + dir):\n return make_response(render_template(dir))\n return '<h1>400 BAD REQUEST'\n\n\[email protected]('/doUpdate')\ndef updateData():\n with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiAteneo = {'Venezia CF': [], 'Verona': [], 'Venezia IUAV': [\n ], 'Padova': []}\n for row in data:\n row = row[0].split(';')\n if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1\n ] == 'Venezia Iuav' or row[1] == 'Verona':\n tmp = row[1]\n if 'Venezia C' in row[1]:\n tmp = 'Venezia CF'\n if tmp == 'Venezia Iuav':\n tmp = 'Venezia IUAV'\n iscrittiAteneo[tmp].append(row[0] + ';' + row[3] + ';' + row[4]\n )\n iscrittiAteneoJson = json.dumps(iscrittiAteneo)\n open('static/jsons/iscrittiAteneo.json', 'wb').write(iscrittiAteneoJson\n .encode())\n with open('static/notUpdating/iscrittiEmig.json', newline='') as f:\n reader = json.load(f)\n iscrittiEmig = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in reader['records']:\n if row[4].lower() == 'padova' or row[4].lower(\n ) == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower(\n ) == 'verona' or row[4].lower() == 'treviso' or row[4].lower(\n ) == 'belluno' or row[4].lower() == 'rovigo':\n iscrittiEmig[row[4].lower()].append(row[1] + ';' + row[4] +\n ';' + row[2] + ';' + str(row[6]))\n lista = {'vicenza': [], 'verona': [], 'venezia': [], 'padova': [],\n 'treviso': [], 'belluno': [], 'rovigo': []}\n count = 0\n for key in iscrittiEmig.keys():\n while len(iscrittiEmig[key]) > 2:\n tmp = iscrittiEmig[key].pop(0).split(';')\n if count == 0:\n count = int(tmp[3])\n tmp2 = iscrittiEmig[key][0].split(';')[2]\n if tmp[2] == tmp2:\n count += int(tmp[3])\n else:\n lista[tmp[1].lower()].append(tmp[0] + ';' + tmp[2] +\n ';' + str(count))\n count = 0\n iscrittiEmigJson = json.dumps(lista)\n open('static/jsons/iscrittiEmig.json', 'wb').write(iscrittiEmigJson.\n encode())\n with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n retribuzione = {'Vicenza': [], 'Verona': [], 'Venezia': [],\n 'Padova': [], 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] ==\n 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or \n row[1] == 'Belluno' or row[1] == 'Rovigo') and row[5\n ] != 'totale' and 'media)' in row[3]:\n tmp = row[5]\n if 'nessun' in tmp:\n tmp = 'nessuno'\n retribuzione[row[1]].append(tmp + ';' + str(row[8]))\n retribuzioneMediaJson = json.dumps(retribuzione)\n open('static/jsons/retribuzioneMedia.json', 'wb').write(\n retribuzioneMediaJson.encode())\n with open('static/notUpdating/taxDisocc.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n lavoro = {'Vicenza': [], 'Verona': [], 'Venezia': [], 'Padova': [],\n 'Treviso': [], 'Belluno': [], 'Rovigo': []}\n for row in data:\n if row[7] == '15-24 anni' and row[5] != 'totale':\n if row[5] == 'femmine':\n lavoro[row[1]].append(str(row[10]))\n else:\n lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))\n for key in lavoro.keys():\n tmp = lavoro[key][0] + ';' + lavoro[key][2]\n tmp2 = lavoro[key][1] + ';' + lavoro[key][3]\n lavoro[key].clear()\n lavoro[key].append(tmp)\n lavoro[key].append(tmp2)\n disoccupazioneJson = json.dumps(lavoro)\n open('static/jsons/disoccupazione.json', 'wb').write(disoccupazioneJson\n .encode())\n iscritti = requests.get(\n 'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv'\n , allow_redirects=True)\n open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content)\n with open('static/iscrittiProvincia.csv', newline='') as f:\n reader = csv.reader(f)\n data = list(reader)[1:]\n iscrittiProvincia = {'vicenza': [], 'verona': [], 'venezia': [],\n 'padova': [], 'treviso': [], 'belluno': [], 'rovigo': []}\n for row in data:\n row = row[0].split(';')\n if row[2].lower() == 'padova' or row[2].lower(\n ) == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower(\n ) == 'verona' or row[2].lower() == 'treviso' or row[2].lower(\n ) == 'belluno' or row[2].lower() == 'rovigo':\n iscrittiProvincia[row[2].lower()].append(str(row[0]) + ';' +\n str(int(row[3]) + int(row[4])))\n iscrittiProvinciaJson = json.dumps(iscrittiProvincia)\n open('static/jsons/iscrittiProvincia.json', 'wb').write(\n iscrittiProvinciaJson.encode())\n return '200'\n\n\nupdateData()\nif __name__ == '__main__':\n application.run(debug=True, port=80)\n",
"step-5": "from flask import Flask, render_template, jsonify, request, make_response #BSD License\r\nimport requests #Apache 2.0\r\n\r\n#StdLibs\r\nimport json\r\nfrom os import path\r\n\r\nimport csv\r\n\r\n###################################################\r\n#Programmato da Alex Prosdocimo e Matteo Mirandola#\r\n###################################################\r\n\r\napplication = Flask(__name__)\r\n\r\n\r\[email protected](\"/\") # Index\r\ndef index():\r\n return make_response(render_template(\"index.html\"))\r\n\r\n\r\[email protected](\"/getGraph\", methods=[\"POST\", \"GET\"])\r\ndef getgraph():\r\n #Metodo POST: responsabile di ottnere i dati in formato json dal server. \r\n #Il server si aspetta un campo data che contenga il nome di un file esistente nel server nella cartella /static/json/\r\n #Se non trova il file da un 404\r\n #Se non trova il campo data da un 400\r\n if request.method == \"POST\":\r\n if('data' in request.form):\r\n if(path.exists(\"static/jsons/\" + request.form['data'] + \".json\")):\r\n with open(\"static/jsons/\" + request.form['data'] + \".json\", \"r\") as file:\r\n jsonStr = file.read()\r\n jsonStr = json.loads(jsonStr)\r\n return jsonify(jsonStr)\r\n else:\r\n return \"<h1>404 NOT FOUND\"\r\n else:\r\n return \"<h1>400 BAD REQUEST\"\r\n else:\r\n #Metodo GET:\r\n #si aspetta un campo graph che contenga uno dei nomi sotto presenti\r\n #nel caso di mf e emig si aspetta anche un secondo campo che specifichi\r\n #l'università o la provincia-\r\n #Inoltre, iscrittiAtn e mf POSSONO (ma non devono necessariamente) avere\r\n #un campo aggiuntivo che filtri i dati di uno specifico anno o per uno specifico sesso2\r\n if 'graph' in request.args:\r\n\r\n # HBar Graph per la paga oraria provinciale a seconda del livello di istruzione\r\n if(request.args['graph'] == \"pagaOra\"):\r\n return make_response(render_template(\"graphs/pagaOra.html\"))\r\n\r\n # Line Graph per gli iscritti alle università nel veneto per anno\r\n elif(request.args['graph'] == \"iscrittiAtn\"):\r\n if('sex' in request.args):\r\n return make_response(render_template(\"graphs/iscrittiAtn.html\", sex=int(request.args['sex'])))\r\n else:\r\n return make_response(render_template(\"graphs/iscrittiAtn.html\", sex=0))\r\n\r\n elif(request.args['graph'] == \"disoccupati\"):\r\n return make_response(render_template(\"graphs/disoccupatiGraph.html\"))\r\n\r\n elif(request.args['graph'] == \"iscrittiProv\"):\r\n return make_response(render_template(\"graphs/iscrittiProv.html\"))\r\n\r\n # Donut Graph per la distribuzione di m/f nelle università in veneto\r\n elif(request.args['graph'] == \"mf\" and 'atn' in request.args):\r\n dir = \"graphs/mf/mf\" + request.args['atn'] + \".html\"\r\n print(dir)\r\n if(path.exists(\"templates/\" + dir)):\r\n if('year' in request.args):\r\n return make_response(render_template(dir, year=int(request.args['year'])))\r\n else:\r\n return make_response(render_template(dir, year=0))\r\n\r\n # Polar Area Graph per gli studenti emigrati in altre regioni\r\n elif(request.args['graph'] == \"emig\" and \"prov\" in request.args):\r\n dir = \"graphs/emig/iscrittiEmig\" + \\\r\n request.args['prov'] + \".html\"\r\n if(path.exists(\"templates/\" + dir)):\r\n return make_response(render_template(dir))\r\n\r\n return \"<h1>400 BAD REQUEST\"\r\n\r\n#Per aggiornare i dataset:\r\n#A causa di un errore nella creazione del file riguardante gli iscritti per ogni ateneo da parte del MIUR il file\r\n#riguardante gli iscritti per ateneo non sono scaricabili dinamicamente e va sostituito manualmente.\r\n#Allo stesso modo, i dati ottenuti tramite l'istat non sono scaricabili dinamicamente tramite la api in quanto\r\n#le sue prestazioni sono limitate (oltre a non permettere i filtri necessari per ottenere i file).\r\n#Il dataset delle provincie viene aggiornato automaticamente ogni settimana. Gli altri vanno sostituiti manualmente.\r\n\r\n#I dataset statici vanno inseriti nella cartella /static/notUpdating/\r\n#Il dataset riguardante gli iscritti per ateneo va scaricato a questo link http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/32d26e28-a0b5-45f3-9152-6072164f3e63/download/iscrittixateneo.csv\r\n#e rinominato iscrittiAteneo.csv\r\n\r\n#Il dataset riguardante gli iscritti emigrati dalla regione è stato creato manualmente a partire da altri dati e non può essere aggiornato\r\n\r\n#I dataset riguardanti la percentuale di disoccupazione e la retribuzione oraria media sono reperibili a questo portale http://dati.istat.it/\r\n#Sfortunatamente la funzione di ricerca del sito è molto lenta e limitata, comunque sia i due data set sono \"Tasso di Disoccupazione - Dati Provinciali\"\r\n#e \"Retribuzione oraria media per titolo di studio\". In entrambi i casi, è necessario filtrare i risultati per le sole provincie del Veneto.\r\n#I file vanno rinominati retribuzioneMedia.csv e taxDisocc.csv\r\n\r\n#Fortunatamente, si aggiornano solo annualmente\r\n\r\[email protected](\"/doUpdate\")\r\ndef updateData():\r\n #File iscritti per ateneo\r\n #I dati vengono inseriti in un dizionario come array, il formato è più sotto\r\n with open('static/notUpdating/iscrittiAteneo.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto\r\n reader = csv.reader(f)\r\n data = list(reader)[1:]\r\n iscrittiAteneo = {\r\n 'Venezia CF': [],\r\n 'Verona': [],\r\n 'Venezia IUAV': [],\r\n 'Padova': []}\r\n\r\n for row in data:\r\n row = row[0].split(';')\r\n if row[1] == 'Padova' or 'Venezia C' in row[1] or row[1] == 'Venezia Iuav' or row[1] == 'Verona':\r\n tmp = row[1]\r\n if 'Venezia C' in row[1]:\r\n tmp = 'Venezia CF'\r\n if tmp == 'Venezia Iuav':\r\n tmp = 'Venezia IUAV'\r\n iscrittiAteneo[tmp].append(\r\n row[0] + ';' + row[3] + ';' + row[4])\r\n\r\n iscrittiAteneoJson = json.dumps(iscrittiAteneo)\r\n # Formato: {\"nomeAteneo\" : [\"annoScolastico;numeroIscrittiMaschi;numeroIscrittiFemmine\",...,...],...,...}\r\n open('static/jsons/iscrittiAteneo.json',\r\n \"wb\").write(iscrittiAteneoJson.encode())\r\n\r\n # File iscritti emigrati in altre regioni\r\n with open('static/notUpdating/iscrittiEmig.json', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto\r\n\r\n reader = json.load(f)\r\n iscrittiEmig = {\r\n 'vicenza': [],\r\n 'verona': [],\r\n 'venezia': [],\r\n 'padova': [],\r\n 'treviso': [],\r\n 'belluno': [],\r\n 'rovigo': []}\r\n\r\n for row in reader['records']:\r\n if row[4].lower() == 'padova' or row[4].lower() == 'vicenza' or row[4].lower() == 'venezia' or row[4].lower() == 'verona' or row[4].lower() == 'treviso' or row[4].lower() == 'belluno' or row[4].lower() == 'rovigo':\r\n iscrittiEmig[row[4].lower()].append(\r\n row[1] + ';' + row[4] + ';' + row[2] + ';' + str(row[6]))\r\n lista = {\r\n 'vicenza': [],\r\n 'verona': [],\r\n 'venezia': [],\r\n 'padova': [],\r\n 'treviso': [],\r\n 'belluno': [],\r\n 'rovigo': []\r\n }\r\n count = 0\r\n\r\n for key in iscrittiEmig.keys():\r\n while len(iscrittiEmig[key]) > 2:\r\n tmp = iscrittiEmig[key].pop(0).split(';')\r\n if count == 0:\r\n count = int(tmp[3])\r\n tmp2 = iscrittiEmig[key][0].split(';')[2]\r\n if tmp[2] == tmp2:\r\n\r\n count += int(tmp[3])\r\n\r\n else:\r\n lista[tmp[1].lower()].append(\r\n tmp[0] + ';' + tmp[2] + ';' + str(count))\r\n count = 0\r\n\r\n iscrittiEmigJson = json.dumps(lista)\r\n # Formato: {\"cittàInMinuscolo\" : [\"annoScolastico;CittàDiProvenienzaInMaiuscolo;RegioneDiEsodo;NumeroStudenti\",...,...],...,...}\r\n open('static/jsons/iscrittiEmig.json',\r\n \"wb\").write(iscrittiEmigJson.encode())\r\n # File paga media oraria per titolo di studio\r\n with open('static/notUpdating/retribuzioneMedia.csv', newline='') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)[1:]\r\n retribuzione = {\r\n 'Vicenza': [],\r\n 'Verona': [],\r\n 'Venezia': [],\r\n 'Padova': [],\r\n 'Treviso': [],\r\n 'Belluno': [],\r\n 'Rovigo': []}\r\n\r\n for row in data:\r\n if (row[1] == 'Padova' or row[1] == 'Vicenza' or row[1] == 'Venezia' or row[1] == 'Verona' or row[1] == 'Treviso' or row[1] == 'Belluno' or row[1] == 'Rovigo') and (row[5] != 'totale') and 'media)' in row[3]:\r\n # La lista è divisa in titolo di studio, reddito medio orario\r\n tmp = row[5]\r\n if 'nessun' in tmp:\r\n tmp = 'nessuno'\r\n retribuzione[row[1]].append(tmp + ';' + str(row[8]))\r\n\r\n retribuzioneMediaJson = json.dumps(retribuzione)\r\n # Formato: {\"nomeCittà\" : [\"laurea;media\", \"diploma;media\", \"nulla;media\"],...,...}\r\n open('static/jsons/retribuzioneMedia.json',\r\n \"wb\").write(retribuzioneMediaJson.encode())\r\n\r\n # File %disoccupazione\r\n with open('static/notUpdating/taxDisocc.csv', newline='') as f: #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto\r\n reader = csv.reader(f)\r\n data = list(reader)[1:]\r\n lavoro = {\r\n 'Vicenza': [],\r\n 'Verona': [],\r\n 'Venezia': [],\r\n 'Padova': [],\r\n 'Treviso': [],\r\n 'Belluno': [],\r\n 'Rovigo': []}\r\n\r\n for row in data:\r\n if (row[7] == '15-24 anni') and row[5] != 'totale':\r\n if row[5] == 'femmine':\r\n lavoro[row[1]].append(str(row[10]))\r\n else:\r\n lavoro[row[1]].append(str(row[8]) + ';' + str(row[10]))\r\n for key in lavoro.keys():\r\n tmp = lavoro[key][0] + ';' + lavoro[key][2]\r\n tmp2 = lavoro[key][1] + ';' + lavoro[key][3]\r\n lavoro[key].clear()\r\n lavoro[key].append(tmp)\r\n lavoro[key].append(tmp2)\r\n\r\n disoccupazioneJson = json.dumps(lavoro)\r\n # Formato: {\"nomeCittà\" : [\"anno;percMaschi;percFemmine\",\"anno;percMaschi;percFemmine\"x],...,...}\r\n open('static/jsons/disoccupazione.json',\r\n \"wb\").write(disoccupazioneJson.encode())\r\n\r\n # File iscritti totali per provincia\r\n iscritti = requests.get(\r\n 'http://dati.ustat.miur.it/dataset/3dd9ca7f-9cc9-4a1a-915c-e569b181dbd5/resource/eae4ee94-0797-41d2-b007-bc6dad3ef3e2/download/iscrittixresidenza.csv', allow_redirects=True)\r\n open('static/iscrittiProvincia.csv', 'wb').write(iscritti.content) #Qui si può cambiare il nome del file se necessario, basta che sia in formato csv corretto\r\n with open('static/iscrittiProvincia.csv', newline='') as f:\r\n reader = csv.reader(f)\r\n data = list(reader)[1:]\r\n\r\n iscrittiProvincia = {\r\n 'vicenza': [],\r\n 'verona': [],\r\n 'venezia': [],\r\n 'padova': [],\r\n 'treviso': [],\r\n 'belluno': [],\r\n 'rovigo': []}\r\n\r\n for row in data:\r\n row = row[0].split(';')\r\n if row[2].lower() == 'padova' or row[2].lower() == 'vicenza' or row[2].lower() == 'venezia' or row[2].lower() == 'verona' or row[2].lower() == 'treviso' or row[2].lower() == 'belluno' or row[2].lower() == 'rovigo':\r\n iscrittiProvincia[row[2].lower()].append(\r\n str(row[0]) + ';' + str(int(row[3])+int(row[4])))\r\n iscrittiProvinciaJson = json.dumps(iscrittiProvincia)\r\n # Formato: {\"nomeCittà\" : [\"anno;numero\"],...,...}\r\n open('static/jsons/iscrittiProvincia.json',\r\n \"wb\").write(iscrittiProvinciaJson.encode())\r\n return \"200\"\r\n\r\n#########\r\n#Startup#\r\n#########\r\n\r\n#Ad ogni riavvio forzato dell'applicazione, i dati vengono aggiornati (ci impiega qualche secondo al maassimo)\r\n\r\nupdateData()\r\n\r\nif __name__ == '__main__':\r\n application.run(debug=True, port=80)\r\n",
"step-ids": [
2,
3,
4,
6,
7
]
}
|
[
2,
3,
4,
6,
7
] |
import hive
from ..bind import Instantiator as _Instantiator
from ..event import bind_info as event_bind_info
bind_infos = (event_bind_info,)
def build_scene_instantiator(i, ex, args, meta_args):
bind_bases = tuple((b_i.environment_hive for b_i in bind_infos if b_i.is_enabled(meta_args)))
# Update bind environment to use new bases
environment_class = i.bind_meta_class.start_value
i.bind_meta_class.start_value = environment_class.extend("SceneBindEnvironment", bases=tuple(bind_bases))
Instantiator = _Instantiator.extend("Instantiator", build_scene_instantiator,
bases=tuple(b_i.bind_hive for b_i in bind_infos))
class SceneClass:
def __init__(self):
self._entities = {}
self.scene = None
def get_entity_id(self, identifier):
return self._entities[identifier]
def get_position_absolute(self, entity):
return tuple(entity.worldPosition)
def get_orientation_absolute(self, entity):
return tuple(entity.worldOrientation.to_quaternion())
def get_position_relative(self, entity, other):
return tuple(entity.worldPosition - other.worldPosition)
def get_orientation_relative(self, entity, other):
return tuple(entity.worldOrientation.to_quaternion().rotation_difference(other.worldPosition.to_quaternion()))
def spawn_entity(self, class_name, identifier):
entity = self.scene.addObject(class_name, 'Empty')
# entity.worldTransform = entity.worldTransform.inverted() * entity.worldTransform
self._entities[identifier] = entity
return entity
def get_scene(self):
return self.scene
def build_scene(cls, i, ex, args):
i.bge_scene = hive.property(cls, "scene")
ex.get_entity_id = hive.plugin(cls.get_entity_id, identifier="entity.get")
ex.get_position_absolute = hive.plugin(cls.get_position_absolute, identifier="entity.position.absolute.get")
ex.get_position_relative = hive.plugin(cls.get_position_relative, identifier="entity.position.relative.get")
ex.get_orientation_absolute = hive.plugin(cls.get_orientation_absolute, identifier="entity.orientation.absolute.get")
ex.get_orientation_relative = hive.plugin(cls.get_orientation_relative, identifier="entity.orientation.relative.get")
ex.spawn_entity = hive.plugin(cls.spawn_entity, identifier="entity.spawn")
ex.get_scene = hive.plugin(cls.get_scene, identifier="entity.get_current")
import dragonfly
ex.on_tick = dragonfly.event.Tick()
def f(self):
print("I")
if not hasattr(self, 'a'):
self.a = 1
self.spawn_entity.plugin()("Cube", "c1")
i.mod_tick = hive.modifier(f)
hive.trigger(ex.on_tick, i.mod_tick)
Scene = hive.hive("Scene", build_scene, builder_cls=SceneClass)
|
normal
|
{
"blob_id": "23d4619527b5fce7fed0b0a66d834e26bb984129",
"index": 6443,
"step-1": "<mask token>\n\n\nclass SceneClass:\n\n def __init__(self):\n self._entities = {}\n self.scene = None\n\n def get_entity_id(self, identifier):\n return self._entities[identifier]\n\n def get_position_absolute(self, entity):\n return tuple(entity.worldPosition)\n\n def get_orientation_absolute(self, entity):\n return tuple(entity.worldOrientation.to_quaternion())\n\n def get_position_relative(self, entity, other):\n return tuple(entity.worldPosition - other.worldPosition)\n\n def get_orientation_relative(self, entity, other):\n return tuple(entity.worldOrientation.to_quaternion().\n rotation_difference(other.worldPosition.to_quaternion()))\n\n def spawn_entity(self, class_name, identifier):\n entity = self.scene.addObject(class_name, 'Empty')\n self._entities[identifier] = entity\n return entity\n\n def get_scene(self):\n return self.scene\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef build_scene_instantiator(i, ex, args, meta_args):\n bind_bases = tuple(b_i.environment_hive for b_i in bind_infos if b_i.\n is_enabled(meta_args))\n environment_class = i.bind_meta_class.start_value\n i.bind_meta_class.start_value = environment_class.extend(\n 'SceneBindEnvironment', bases=tuple(bind_bases))\n\n\n<mask token>\n\n\nclass SceneClass:\n\n def __init__(self):\n self._entities = {}\n self.scene = None\n\n def get_entity_id(self, identifier):\n return self._entities[identifier]\n\n def get_position_absolute(self, entity):\n return tuple(entity.worldPosition)\n\n def get_orientation_absolute(self, entity):\n return tuple(entity.worldOrientation.to_quaternion())\n\n def get_position_relative(self, entity, other):\n return tuple(entity.worldPosition - other.worldPosition)\n\n def get_orientation_relative(self, entity, other):\n return tuple(entity.worldOrientation.to_quaternion().\n rotation_difference(other.worldPosition.to_quaternion()))\n\n def spawn_entity(self, class_name, identifier):\n entity = self.scene.addObject(class_name, 'Empty')\n self._entities[identifier] = entity\n return entity\n\n def get_scene(self):\n return self.scene\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef build_scene_instantiator(i, ex, args, meta_args):\n bind_bases = tuple(b_i.environment_hive for b_i in bind_infos if b_i.\n is_enabled(meta_args))\n environment_class = i.bind_meta_class.start_value\n i.bind_meta_class.start_value = environment_class.extend(\n 'SceneBindEnvironment', bases=tuple(bind_bases))\n\n\n<mask token>\n\n\nclass SceneClass:\n\n def __init__(self):\n self._entities = {}\n self.scene = None\n\n def get_entity_id(self, identifier):\n return self._entities[identifier]\n\n def get_position_absolute(self, entity):\n return tuple(entity.worldPosition)\n\n def get_orientation_absolute(self, entity):\n return tuple(entity.worldOrientation.to_quaternion())\n\n def get_position_relative(self, entity, other):\n return tuple(entity.worldPosition - other.worldPosition)\n\n def get_orientation_relative(self, entity, other):\n return tuple(entity.worldOrientation.to_quaternion().\n rotation_difference(other.worldPosition.to_quaternion()))\n\n def spawn_entity(self, class_name, identifier):\n entity = self.scene.addObject(class_name, 'Empty')\n self._entities[identifier] = entity\n return entity\n\n def get_scene(self):\n return self.scene\n\n\ndef build_scene(cls, i, ex, args):\n i.bge_scene = hive.property(cls, 'scene')\n ex.get_entity_id = hive.plugin(cls.get_entity_id, identifier='entity.get')\n ex.get_position_absolute = hive.plugin(cls.get_position_absolute,\n identifier='entity.position.absolute.get')\n ex.get_position_relative = hive.plugin(cls.get_position_relative,\n identifier='entity.position.relative.get')\n ex.get_orientation_absolute = hive.plugin(cls.get_orientation_absolute,\n identifier='entity.orientation.absolute.get')\n ex.get_orientation_relative = hive.plugin(cls.get_orientation_relative,\n identifier='entity.orientation.relative.get')\n ex.spawn_entity = hive.plugin(cls.spawn_entity, identifier='entity.spawn')\n ex.get_scene = hive.plugin(cls.get_scene, identifier='entity.get_current')\n import dragonfly\n ex.on_tick = dragonfly.event.Tick()\n\n def f(self):\n print('I')\n if not hasattr(self, 'a'):\n self.a = 1\n self.spawn_entity.plugin()('Cube', 'c1')\n i.mod_tick = hive.modifier(f)\n hive.trigger(ex.on_tick, i.mod_tick)\n\n\n<mask token>\n",
"step-4": "<mask token>\nbind_infos = event_bind_info,\n\n\ndef build_scene_instantiator(i, ex, args, meta_args):\n bind_bases = tuple(b_i.environment_hive for b_i in bind_infos if b_i.\n is_enabled(meta_args))\n environment_class = i.bind_meta_class.start_value\n i.bind_meta_class.start_value = environment_class.extend(\n 'SceneBindEnvironment', bases=tuple(bind_bases))\n\n\nInstantiator = _Instantiator.extend('Instantiator',\n build_scene_instantiator, bases=tuple(b_i.bind_hive for b_i in bind_infos))\n\n\nclass SceneClass:\n\n def __init__(self):\n self._entities = {}\n self.scene = None\n\n def get_entity_id(self, identifier):\n return self._entities[identifier]\n\n def get_position_absolute(self, entity):\n return tuple(entity.worldPosition)\n\n def get_orientation_absolute(self, entity):\n return tuple(entity.worldOrientation.to_quaternion())\n\n def get_position_relative(self, entity, other):\n return tuple(entity.worldPosition - other.worldPosition)\n\n def get_orientation_relative(self, entity, other):\n return tuple(entity.worldOrientation.to_quaternion().\n rotation_difference(other.worldPosition.to_quaternion()))\n\n def spawn_entity(self, class_name, identifier):\n entity = self.scene.addObject(class_name, 'Empty')\n self._entities[identifier] = entity\n return entity\n\n def get_scene(self):\n return self.scene\n\n\ndef build_scene(cls, i, ex, args):\n i.bge_scene = hive.property(cls, 'scene')\n ex.get_entity_id = hive.plugin(cls.get_entity_id, identifier='entity.get')\n ex.get_position_absolute = hive.plugin(cls.get_position_absolute,\n identifier='entity.position.absolute.get')\n ex.get_position_relative = hive.plugin(cls.get_position_relative,\n identifier='entity.position.relative.get')\n ex.get_orientation_absolute = hive.plugin(cls.get_orientation_absolute,\n identifier='entity.orientation.absolute.get')\n ex.get_orientation_relative = hive.plugin(cls.get_orientation_relative,\n identifier='entity.orientation.relative.get')\n ex.spawn_entity = hive.plugin(cls.spawn_entity, identifier='entity.spawn')\n ex.get_scene = hive.plugin(cls.get_scene, identifier='entity.get_current')\n import dragonfly\n ex.on_tick = dragonfly.event.Tick()\n\n def f(self):\n print('I')\n if not hasattr(self, 'a'):\n self.a = 1\n self.spawn_entity.plugin()('Cube', 'c1')\n i.mod_tick = hive.modifier(f)\n hive.trigger(ex.on_tick, i.mod_tick)\n\n\nScene = hive.hive('Scene', build_scene, builder_cls=SceneClass)\n",
"step-5": "import hive\n\nfrom ..bind import Instantiator as _Instantiator\nfrom ..event import bind_info as event_bind_info\n\nbind_infos = (event_bind_info,)\n\n\ndef build_scene_instantiator(i, ex, args, meta_args):\n bind_bases = tuple((b_i.environment_hive for b_i in bind_infos if b_i.is_enabled(meta_args)))\n\n # Update bind environment to use new bases\n environment_class = i.bind_meta_class.start_value\n i.bind_meta_class.start_value = environment_class.extend(\"SceneBindEnvironment\", bases=tuple(bind_bases))\n\n\nInstantiator = _Instantiator.extend(\"Instantiator\", build_scene_instantiator,\n bases=tuple(b_i.bind_hive for b_i in bind_infos))\n\n\nclass SceneClass:\n\n def __init__(self):\n self._entities = {}\n self.scene = None\n\n def get_entity_id(self, identifier):\n return self._entities[identifier]\n\n def get_position_absolute(self, entity):\n return tuple(entity.worldPosition)\n\n def get_orientation_absolute(self, entity):\n return tuple(entity.worldOrientation.to_quaternion())\n\n def get_position_relative(self, entity, other):\n return tuple(entity.worldPosition - other.worldPosition)\n\n def get_orientation_relative(self, entity, other):\n return tuple(entity.worldOrientation.to_quaternion().rotation_difference(other.worldPosition.to_quaternion()))\n\n def spawn_entity(self, class_name, identifier):\n entity = self.scene.addObject(class_name, 'Empty')\n # entity.worldTransform = entity.worldTransform.inverted() * entity.worldTransform\n\n self._entities[identifier] = entity\n return entity\n\n def get_scene(self):\n return self.scene\n\n\ndef build_scene(cls, i, ex, args):\n i.bge_scene = hive.property(cls, \"scene\")\n\n ex.get_entity_id = hive.plugin(cls.get_entity_id, identifier=\"entity.get\")\n ex.get_position_absolute = hive.plugin(cls.get_position_absolute, identifier=\"entity.position.absolute.get\")\n ex.get_position_relative = hive.plugin(cls.get_position_relative, identifier=\"entity.position.relative.get\")\n ex.get_orientation_absolute = hive.plugin(cls.get_orientation_absolute, identifier=\"entity.orientation.absolute.get\")\n ex.get_orientation_relative = hive.plugin(cls.get_orientation_relative, identifier=\"entity.orientation.relative.get\")\n ex.spawn_entity = hive.plugin(cls.spawn_entity, identifier=\"entity.spawn\")\n ex.get_scene = hive.plugin(cls.get_scene, identifier=\"entity.get_current\")\n\n import dragonfly\n ex.on_tick = dragonfly.event.Tick()\n\n def f(self):\n print(\"I\")\n if not hasattr(self, 'a'):\n self.a = 1\n\n self.spawn_entity.plugin()(\"Cube\", \"c1\")\n\n i.mod_tick = hive.modifier(f)\n hive.trigger(ex.on_tick, i.mod_tick)\n\n\nScene = hive.hive(\"Scene\", build_scene, builder_cls=SceneClass)\n",
"step-ids": [
9,
10,
11,
12,
14
]
}
|
[
9,
10,
11,
12,
14
] |
import logging
from datetime import datetime
from preprocessing import death_preprocessing
from preprocessing_three_month import death_preprocessing_three_month
from death_rule_first_55 import death_rule_first_55
from death_rule_second import death_rule_second_new
from death_escalation import death_escalation
if __name__ == '__main__':
logging.basicConfig(filename='logfile.log', filemode='a', format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.INFO)
logging.info('Start of the mortality analysis algorithm')
start_time_ALL = datetime.now()
print('Start of the mortality analysis algorithm')
try:
print('The month is over. Start forming tasks ...')
# death_preprocessing(save_to_sql=True, save_to_excel=False)
death_preprocessing_three_month(save_to_sql=True, save_to_excel=False)
death_rule_first_55(save_to_sql=True, save_to_excel=True)
death_rule_second_new(save_to_sql=True, save_to_excel=True)
death_escalation(save_to_sql=True, save_to_excel=False)
print(f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}')
logging.info(f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}')
except Exception as e:
print('The execution of the mortality analysis algorithm was not completed due to an error')
logging.exception('Exception occurred')
logging.info('The execution of the mortality analysis algorithm was not completed due to an error')
|
normal
|
{
"blob_id": "f44a8837056eb77fbf0ff37b9c57891cc3a3d6b2",
"index": 6783,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n logging.basicConfig(filename='logfile.log', filemode='a', format=\n '%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\n logging.info('Start of the mortality analysis algorithm')\n start_time_ALL = datetime.now()\n print('Start of the mortality analysis algorithm')\n try:\n print('The month is over. Start forming tasks ...')\n death_preprocessing_three_month(save_to_sql=True, save_to_excel=False)\n death_rule_first_55(save_to_sql=True, save_to_excel=True)\n death_rule_second_new(save_to_sql=True, save_to_excel=True)\n death_escalation(save_to_sql=True, save_to_excel=False)\n print(\n f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}'\n )\n logging.info(\n f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}'\n )\n except Exception as e:\n print(\n 'The execution of the mortality analysis algorithm was not completed due to an error'\n )\n logging.exception('Exception occurred')\n logging.info(\n 'The execution of the mortality analysis algorithm was not completed due to an error'\n )\n",
"step-3": "import logging\nfrom datetime import datetime\nfrom preprocessing import death_preprocessing\nfrom preprocessing_three_month import death_preprocessing_three_month\nfrom death_rule_first_55 import death_rule_first_55\nfrom death_rule_second import death_rule_second_new\nfrom death_escalation import death_escalation\nif __name__ == '__main__':\n logging.basicConfig(filename='logfile.log', filemode='a', format=\n '%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)\n logging.info('Start of the mortality analysis algorithm')\n start_time_ALL = datetime.now()\n print('Start of the mortality analysis algorithm')\n try:\n print('The month is over. Start forming tasks ...')\n death_preprocessing_three_month(save_to_sql=True, save_to_excel=False)\n death_rule_first_55(save_to_sql=True, save_to_excel=True)\n death_rule_second_new(save_to_sql=True, save_to_excel=True)\n death_escalation(save_to_sql=True, save_to_excel=False)\n print(\n f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}'\n )\n logging.info(\n f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}'\n )\n except Exception as e:\n print(\n 'The execution of the mortality analysis algorithm was not completed due to an error'\n )\n logging.exception('Exception occurred')\n logging.info(\n 'The execution of the mortality analysis algorithm was not completed due to an error'\n )\n",
"step-4": "import logging\nfrom datetime import datetime\n\nfrom preprocessing import death_preprocessing\nfrom preprocessing_three_month import death_preprocessing_three_month\nfrom death_rule_first_55 import death_rule_first_55\nfrom death_rule_second import death_rule_second_new\nfrom death_escalation import death_escalation\n\n\nif __name__ == '__main__':\n logging.basicConfig(filename='logfile.log', filemode='a', format='%(asctime)s - %(levelname)s - %(message)s',\n level=logging.INFO)\n logging.info('Start of the mortality analysis algorithm')\n start_time_ALL = datetime.now()\n print('Start of the mortality analysis algorithm')\n\n try:\n print('The month is over. Start forming tasks ...')\n # death_preprocessing(save_to_sql=True, save_to_excel=False)\n death_preprocessing_three_month(save_to_sql=True, save_to_excel=False)\n death_rule_first_55(save_to_sql=True, save_to_excel=True)\n death_rule_second_new(save_to_sql=True, save_to_excel=True)\n death_escalation(save_to_sql=True, save_to_excel=False)\n print(f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}')\n logging.info(f'The end of the mortality analysis algorithm. elapsed time {datetime.now() - start_time_ALL}')\n\n except Exception as e:\n print('The execution of the mortality analysis algorithm was not completed due to an error')\n logging.exception('Exception occurred')\n logging.info('The execution of the mortality analysis algorithm was not completed due to an error')\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python3
# encoding: utf-8
"""
@version: ??
@author: ami
@license: Apache Licence
@file: dictTest.py
@time: 2019/9/25 18:26
@tools: PyCharm
"""
def func():
pass
class Main():
def __init__(self):
pass
if __name__ == '__main__':
pass
d = {'name': 'Bob', 'age': 40}
print(d)
d = {'spam': 2, 'ham': 1, 'eggs': 3}
print(d['spam'])
print(d)
print(len(d))
print('ham' in d)
print(list(d.keys()))
print(list(d.values()))
print(list(d.items()))
for i in d.items():
print(i)
d['ham'] = ['grill', 'bake', 'fry']
print(d)
del d['eggs']
print(d)
d['brunch'] = 'Bacon'
print(d)
print(list(d.values()))
print(list(d.keys()))
print(list(d.items()))
print(d.get('ham'))
print(d.get('toast'))
print(d.get('toast', 88))
print(d)
d2 = {'toast': 4, 'muffin': 5}
d.update(d2)
print(d)
print(d.pop('muffin'))
print(d.pop('toast'))
print(d)
table = {
'1975': 'Holy Grail',
'1979': 'Life of Brain',
'1983': 'The Meaning of Life'
}
year = '1983'
movie = table[year]
print(movie)
for year in table:
print(year + '\t' + table[year])
table2 = {
'Holy Grail': '1975',
'Life of Brain': '1979',
'The Meaning of Life': '1983'
}
print(table2['Holy Grail'])
print(list(table2.items()))
year_ = [title for (title, year) in table2.items() if year == '1975']
print(year_)
K = 'Holy Grail'
print(table2[K])
V = '1975'
key = [key for (key, value) in table2.items() if value == V]
print(key)
key = [key for key in table2.keys() if table2[key] == V]
print(key)
Matrix = {}
Matrix[(2, 3, 4)] = 88
Matrix[(7, 8, 9)] = 99
X = 2
Y = 3
Z = 4
z_ = Matrix[(X, Y, Z)]
print(z_)
print(Matrix)
if (2, 3, 6) in Matrix:
print(Matrix[(2, 3, 6)])
else:
print(0)
try:
print(Matrix[(2, 3, 6)])
except KeyError:
print(0)
print(Matrix.get((2, 3, 4), 0))
print(Matrix.get((2, 3, 6), 0))
rec = {}
rec['name'] = 'Bob'
rec['age'] = 40.5
rec['job'] = 'developer/manager'
print(rec['name'])
rec = {
'name': 'Bob',
'jobs': ['developer', 'manager'],
'web': 'www.bobs.org/?Bob',
'home': {'state': 'Overworked', 'zip': 12345}
}
print(rec['name'])
print(rec['jobs'])
print(rec['jobs'][1])
print(rec['home']['zip'])
db = []
other = {
'name': 'other',
'jobs': ['hr', 'manager'],
'web': 'www.hr.org',
'home': {'state': 'Overworked', 'zip': 55555}
}
db.append(rec)
db.append(other)
print(db[0]['jobs'])
db = {}
db['bob'] = rec
db['sue'] = other
db['bob']['jobs']
age_ = {'name': 'Bob', 'age': 40}
print(age_)
d = {}
d['name'] = 'sue'
d['age'] = 50
print(d)
di = dict(name='Bob', age=56)
print(di)
di = dict([('name', 'Bob'), ('age', 55)])
print(di)
fromkeys = dict.fromkeys(['a', 'b'], 0)
print(fromkeys)
iterator = zip(['a', 'b', 'c'], [1, 2, 3])
print(iterator)
d = dict(zip(['a', 'b', 'c'], [1, 2, 3]))
print(d)
d = {k: v for (k, v) in zip(['a', 'b', 'c'], [1, 2, 3])}
print(d)
d = {x: x ** 2 for x in [1, 2, 3, 4]}
print(d)
d2 = {x: x ** 2 for x in range(4)}
print(d2)
d = {c: c * 4 for c in 'SPAM'}
print(d)
d = {c.lower(): c + '!' for c in ['spam', 'eggs', 'ham']}
print(d)
d = dict.fromkeys(['a', 'b', 'c'], 0)
print(d)
d = {k: 0 for k in ['a', 'b', 'c']}
print(d)
d = dict.fromkeys('spam')
print(d)
d = dict.fromkeys('spam', 0)
print(d)
d = {k: None for k in 'spam'}
print(d)
d = dict(a=1, b=2, c=3)
print(d)
k = d.keys()
print(k)
# print(k[0])
print(list(k)[0])
v = d.values()
print(v)
print(list(v))
print(d.items())
print(list(d.items()))
for k in d.keys(): print(k)
for key in d: print(key)
# 排序{'a': 1, 'b': 2, 'c': 3}
print(d)
Ks = d.keys()
print(Ks)
Ks = list(Ks)
Ks.sort()
print(Ks)
for k in Ks: print(k, d[k])
print("-------"*6)
D = {'b': 2, 'c': 3, 'a': 1}
Ks = D.keys()
for k in sorted(Ks): print(k, D[k])
|
normal
|
{
"blob_id": "797cedc9dc2a47713b9554e4f5975a4505ecf6d3",
"index": 9568,
"step-1": "<mask token>\n\n\nclass Main:\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef func():\n pass\n\n\nclass Main:\n\n def __init__(self):\n pass\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef func():\n pass\n\n\nclass Main:\n\n def __init__(self):\n pass\n\n\nif __name__ == '__main__':\n pass\n<mask token>\nprint(d)\n<mask token>\nprint(d['spam'])\nprint(d)\nprint(len(d))\nprint('ham' in d)\nprint(list(d.keys()))\nprint(list(d.values()))\nprint(list(d.items()))\nfor i in d.items():\n print(i)\n<mask token>\nprint(d)\ndel d['eggs']\nprint(d)\n<mask token>\nprint(d)\nprint(list(d.values()))\nprint(list(d.keys()))\nprint(list(d.items()))\nprint(d.get('ham'))\nprint(d.get('toast'))\nprint(d.get('toast', 88))\nprint(d)\n<mask token>\nd.update(d2)\nprint(d)\nprint(d.pop('muffin'))\nprint(d.pop('toast'))\nprint(d)\n<mask token>\nprint(movie)\nfor year in table:\n print(year + '\\t' + table[year])\n<mask token>\nprint(table2['Holy Grail'])\nprint(list(table2.items()))\n<mask token>\nprint(year_)\n<mask token>\nprint(table2[K])\n<mask token>\nprint(key)\n<mask token>\nprint(key)\n<mask token>\nprint(z_)\nprint(Matrix)\nif (2, 3, 6) in Matrix:\n print(Matrix[2, 3, 6])\nelse:\n print(0)\ntry:\n print(Matrix[2, 3, 6])\nexcept KeyError:\n print(0)\nprint(Matrix.get((2, 3, 4), 0))\nprint(Matrix.get((2, 3, 6), 0))\n<mask token>\nprint(rec['name'])\n<mask token>\nprint(rec['name'])\nprint(rec['jobs'])\nprint(rec['jobs'][1])\nprint(rec['home']['zip'])\n<mask token>\ndb.append(rec)\ndb.append(other)\nprint(db[0]['jobs'])\n<mask token>\ndb['bob']['jobs']\n<mask token>\nprint(age_)\n<mask token>\nprint(d)\n<mask token>\nprint(di)\n<mask token>\nprint(di)\n<mask token>\nprint(fromkeys)\n<mask token>\nprint(iterator)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d2)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(d)\n<mask token>\nprint(k)\nprint(list(k)[0])\n<mask token>\nprint(v)\nprint(list(v))\nprint(d.items())\nprint(list(d.items()))\nfor k in d.keys():\n print(k)\nfor key in d:\n print(key)\nprint(d)\n<mask token>\nprint(Ks)\n<mask token>\nKs.sort()\nprint(Ks)\nfor k in Ks:\n print(k, d[k])\nprint('-------' * 6)\n<mask token>\nfor k in sorted(Ks):\n print(k, D[k])\n",
"step-4": "<mask token>\n\n\ndef func():\n pass\n\n\nclass Main:\n\n def __init__(self):\n pass\n\n\nif __name__ == '__main__':\n pass\nd = {'name': 'Bob', 'age': 40}\nprint(d)\nd = {'spam': 2, 'ham': 1, 'eggs': 3}\nprint(d['spam'])\nprint(d)\nprint(len(d))\nprint('ham' in d)\nprint(list(d.keys()))\nprint(list(d.values()))\nprint(list(d.items()))\nfor i in d.items():\n print(i)\nd['ham'] = ['grill', 'bake', 'fry']\nprint(d)\ndel d['eggs']\nprint(d)\nd['brunch'] = 'Bacon'\nprint(d)\nprint(list(d.values()))\nprint(list(d.keys()))\nprint(list(d.items()))\nprint(d.get('ham'))\nprint(d.get('toast'))\nprint(d.get('toast', 88))\nprint(d)\nd2 = {'toast': 4, 'muffin': 5}\nd.update(d2)\nprint(d)\nprint(d.pop('muffin'))\nprint(d.pop('toast'))\nprint(d)\ntable = {'1975': 'Holy Grail', '1979': 'Life of Brain', '1983':\n 'The Meaning of Life'}\nyear = '1983'\nmovie = table[year]\nprint(movie)\nfor year in table:\n print(year + '\\t' + table[year])\ntable2 = {'Holy Grail': '1975', 'Life of Brain': '1979',\n 'The Meaning of Life': '1983'}\nprint(table2['Holy Grail'])\nprint(list(table2.items()))\nyear_ = [title for title, year in table2.items() if year == '1975']\nprint(year_)\nK = 'Holy Grail'\nprint(table2[K])\nV = '1975'\nkey = [key for key, value in table2.items() if value == V]\nprint(key)\nkey = [key for key in table2.keys() if table2[key] == V]\nprint(key)\nMatrix = {}\nMatrix[2, 3, 4] = 88\nMatrix[7, 8, 9] = 99\nX = 2\nY = 3\nZ = 4\nz_ = Matrix[X, Y, Z]\nprint(z_)\nprint(Matrix)\nif (2, 3, 6) in Matrix:\n print(Matrix[2, 3, 6])\nelse:\n print(0)\ntry:\n print(Matrix[2, 3, 6])\nexcept KeyError:\n print(0)\nprint(Matrix.get((2, 3, 4), 0))\nprint(Matrix.get((2, 3, 6), 0))\nrec = {}\nrec['name'] = 'Bob'\nrec['age'] = 40.5\nrec['job'] = 'developer/manager'\nprint(rec['name'])\nrec = {'name': 'Bob', 'jobs': ['developer', 'manager'], 'web':\n 'www.bobs.org/?Bob', 'home': {'state': 'Overworked', 'zip': 12345}}\nprint(rec['name'])\nprint(rec['jobs'])\nprint(rec['jobs'][1])\nprint(rec['home']['zip'])\ndb = []\nother = {'name': 'other', 'jobs': ['hr', 'manager'], 'web': 'www.hr.org',\n 'home': {'state': 'Overworked', 'zip': 55555}}\ndb.append(rec)\ndb.append(other)\nprint(db[0]['jobs'])\ndb = {}\ndb['bob'] = rec\ndb['sue'] = other\ndb['bob']['jobs']\nage_ = {'name': 'Bob', 'age': 40}\nprint(age_)\nd = {}\nd['name'] = 'sue'\nd['age'] = 50\nprint(d)\ndi = dict(name='Bob', age=56)\nprint(di)\ndi = dict([('name', 'Bob'), ('age', 55)])\nprint(di)\nfromkeys = dict.fromkeys(['a', 'b'], 0)\nprint(fromkeys)\niterator = zip(['a', 'b', 'c'], [1, 2, 3])\nprint(iterator)\nd = dict(zip(['a', 'b', 'c'], [1, 2, 3]))\nprint(d)\nd = {k: v for k, v in zip(['a', 'b', 'c'], [1, 2, 3])}\nprint(d)\nd = {x: (x ** 2) for x in [1, 2, 3, 4]}\nprint(d)\nd2 = {x: (x ** 2) for x in range(4)}\nprint(d2)\nd = {c: (c * 4) for c in 'SPAM'}\nprint(d)\nd = {c.lower(): (c + '!') for c in ['spam', 'eggs', 'ham']}\nprint(d)\nd = dict.fromkeys(['a', 'b', 'c'], 0)\nprint(d)\nd = {k: (0) for k in ['a', 'b', 'c']}\nprint(d)\nd = dict.fromkeys('spam')\nprint(d)\nd = dict.fromkeys('spam', 0)\nprint(d)\nd = {k: None for k in 'spam'}\nprint(d)\nd = dict(a=1, b=2, c=3)\nprint(d)\nk = d.keys()\nprint(k)\nprint(list(k)[0])\nv = d.values()\nprint(v)\nprint(list(v))\nprint(d.items())\nprint(list(d.items()))\nfor k in d.keys():\n print(k)\nfor key in d:\n print(key)\nprint(d)\nKs = d.keys()\nprint(Ks)\nKs = list(Ks)\nKs.sort()\nprint(Ks)\nfor k in Ks:\n print(k, d[k])\nprint('-------' * 6)\nD = {'b': 2, 'c': 3, 'a': 1}\nKs = D.keys()\nfor k in sorted(Ks):\n print(k, D[k])\n",
"step-5": "#!/usr/bin/env python3\r\n# encoding: utf-8\r\n\r\n\"\"\"\r\n@version: ??\r\n@author: ami\r\n@license: Apache Licence \r\n@file: dictTest.py\r\n@time: 2019/9/25 18:26\r\n@tools: PyCharm\r\n\"\"\"\r\n\r\n\r\ndef func():\r\n pass\r\n\r\n\r\nclass Main():\r\n def __init__(self):\r\n pass\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n\r\nd = {'name': 'Bob', 'age': 40}\r\nprint(d)\r\n\r\nd = {'spam': 2, 'ham': 1, 'eggs': 3}\r\nprint(d['spam'])\r\nprint(d)\r\n\r\nprint(len(d))\r\nprint('ham' in d)\r\nprint(list(d.keys()))\r\nprint(list(d.values()))\r\nprint(list(d.items()))\r\n\r\nfor i in d.items():\r\n print(i)\r\n\r\nd['ham'] = ['grill', 'bake', 'fry']\r\nprint(d)\r\n\r\ndel d['eggs']\r\nprint(d)\r\nd['brunch'] = 'Bacon'\r\nprint(d)\r\n\r\nprint(list(d.values()))\r\nprint(list(d.keys()))\r\nprint(list(d.items()))\r\n\r\nprint(d.get('ham'))\r\nprint(d.get('toast'))\r\nprint(d.get('toast', 88))\r\nprint(d)\r\nd2 = {'toast': 4, 'muffin': 5}\r\nd.update(d2)\r\nprint(d)\r\nprint(d.pop('muffin'))\r\nprint(d.pop('toast'))\r\nprint(d)\r\n\r\ntable = {\r\n '1975': 'Holy Grail',\r\n '1979': 'Life of Brain',\r\n '1983': 'The Meaning of Life'\r\n}\r\nyear = '1983'\r\nmovie = table[year]\r\nprint(movie)\r\n\r\nfor year in table:\r\n print(year + '\\t' + table[year])\r\n\r\ntable2 = {\r\n 'Holy Grail': '1975',\r\n 'Life of Brain': '1979',\r\n 'The Meaning of Life': '1983'\r\n}\r\nprint(table2['Holy Grail'])\r\nprint(list(table2.items()))\r\n\r\nyear_ = [title for (title, year) in table2.items() if year == '1975']\r\nprint(year_)\r\n\r\nK = 'Holy Grail'\r\nprint(table2[K])\r\nV = '1975'\r\nkey = [key for (key, value) in table2.items() if value == V]\r\nprint(key)\r\nkey = [key for key in table2.keys() if table2[key] == V]\r\nprint(key)\r\n\r\nMatrix = {}\r\nMatrix[(2, 3, 4)] = 88\r\nMatrix[(7, 8, 9)] = 99\r\nX = 2\r\nY = 3\r\nZ = 4\r\nz_ = Matrix[(X, Y, Z)]\r\nprint(z_)\r\nprint(Matrix)\r\n\r\nif (2, 3, 6) in Matrix:\r\n print(Matrix[(2, 3, 6)])\r\nelse:\r\n print(0)\r\n\r\ntry:\r\n print(Matrix[(2, 3, 6)])\r\nexcept KeyError:\r\n print(0)\r\n\r\nprint(Matrix.get((2, 3, 4), 0))\r\nprint(Matrix.get((2, 3, 6), 0))\r\n\r\nrec = {}\r\nrec['name'] = 'Bob'\r\nrec['age'] = 40.5\r\nrec['job'] = 'developer/manager'\r\nprint(rec['name'])\r\n\r\nrec = {\r\n 'name': 'Bob',\r\n 'jobs': ['developer', 'manager'],\r\n 'web': 'www.bobs.org/?Bob',\r\n 'home': {'state': 'Overworked', 'zip': 12345}\r\n}\r\nprint(rec['name'])\r\nprint(rec['jobs'])\r\nprint(rec['jobs'][1])\r\nprint(rec['home']['zip'])\r\ndb = []\r\nother = {\r\n 'name': 'other',\r\n 'jobs': ['hr', 'manager'],\r\n 'web': 'www.hr.org',\r\n 'home': {'state': 'Overworked', 'zip': 55555}\r\n}\r\ndb.append(rec)\r\ndb.append(other)\r\nprint(db[0]['jobs'])\r\n\r\ndb = {}\r\ndb['bob'] = rec\r\ndb['sue'] = other\r\ndb['bob']['jobs']\r\n\r\nage_ = {'name': 'Bob', 'age': 40}\r\nprint(age_)\r\n\r\nd = {}\r\nd['name'] = 'sue'\r\nd['age'] = 50\r\nprint(d)\r\n\r\ndi = dict(name='Bob', age=56)\r\nprint(di)\r\n\r\ndi = dict([('name', 'Bob'), ('age', 55)])\r\nprint(di)\r\n\r\nfromkeys = dict.fromkeys(['a', 'b'], 0)\r\nprint(fromkeys)\r\n\r\niterator = zip(['a', 'b', 'c'], [1, 2, 3])\r\nprint(iterator)\r\nd = dict(zip(['a', 'b', 'c'], [1, 2, 3]))\r\nprint(d)\r\n\r\nd = {k: v for (k, v) in zip(['a', 'b', 'c'], [1, 2, 3])}\r\nprint(d)\r\n\r\nd = {x: x ** 2 for x in [1, 2, 3, 4]}\r\nprint(d)\r\nd2 = {x: x ** 2 for x in range(4)}\r\nprint(d2)\r\n\r\nd = {c: c * 4 for c in 'SPAM'}\r\nprint(d)\r\n\r\nd = {c.lower(): c + '!' for c in ['spam', 'eggs', 'ham']}\r\nprint(d)\r\n\r\nd = dict.fromkeys(['a', 'b', 'c'], 0)\r\nprint(d)\r\n\r\nd = {k: 0 for k in ['a', 'b', 'c']}\r\nprint(d)\r\n\r\nd = dict.fromkeys('spam')\r\nprint(d)\r\nd = dict.fromkeys('spam', 0)\r\nprint(d)\r\n\r\nd = {k: None for k in 'spam'}\r\nprint(d)\r\n\r\nd = dict(a=1, b=2, c=3)\r\nprint(d)\r\nk = d.keys()\r\nprint(k)\r\n# print(k[0])\r\nprint(list(k)[0])\r\nv = d.values()\r\nprint(v)\r\nprint(list(v))\r\nprint(d.items())\r\nprint(list(d.items()))\r\n\r\nfor k in d.keys(): print(k)\r\n\r\nfor key in d: print(key)\r\n\r\n# 排序{'a': 1, 'b': 2, 'c': 3}\r\nprint(d)\r\nKs = d.keys()\r\nprint(Ks)\r\nKs = list(Ks)\r\nKs.sort()\r\nprint(Ks)\r\nfor k in Ks: print(k, d[k])\r\n\r\nprint(\"-------\"*6)\r\nD = {'b': 2, 'c': 3, 'a': 1}\r\nKs = D.keys()\r\nfor k in sorted(Ks): print(k, D[k])\r\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from datetime import datetime
from sqlalchemy import Column, Integer, String, ForeignKey, DateTime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
class BusLine(Base):
__tablename__ = "bus_lines"
id = Column(Integer, primary_key=True)
line_id = Column(Integer)
line_description = Column(String)
class BusRoute(Base):
__tablename__ = "bus_routes"
id = Column(Integer, primary_key=True)
bus_line_id = Column(Integer)
route_id = Column(Integer)
route_description = Column(String)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusRoutePos(Base):
__tablename__ = "bus_route_pos"
id = Column(Integer, primary_key=True)
route_id = Column(Integer, ForeignKey("bus_routes.route_id"), nullable=False)
lat = Column(String)
lon = Column(String)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusPos(Base):
__tablename__ = "bus_pos"
id = Column(Integer, primary_key=True)
bus_line_id = Column(Integer, ForeignKey("bus_lines.line_id"), nullable=False)
bus_internal_id = Column(Integer)
lat = Column(String)
lon = Column(String)
orientation = Column(Integer)
timestamp = Column(Integer)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusStop(Base):
__tablename__ = "bus_stops"
id = Column(Integer, primary_key=True)
route_id = Column(Integer, ForeignKey("bus_routes.route_id"), nullable=False)
lat = Column(String)
lon = Column(String)
stop_code = Column(String)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class BusTrip(Base):
__tablename__ = "bus_trip"
id = Column(Integer, primary_key=True)
bus_line_id = Column(Integer)
bus_internal_id = Column(Integer)
route_id = Column(Integer)
last_updated = Column(DateTime, default=datetime.utcnow)
last_pos_timestamp = Column(Integer, default=0)
|
normal
|
{
"blob_id": "9e896d935cc57e580ed46cd501b41053bbaab38f",
"index": 6490,
"step-1": "<mask token>\n\n\nclass BusRoute(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BusRoutePos(Base):\n __tablename__ = 'bus_route_pos'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusPos(Base):\n __tablename__ = 'bus_pos'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer, ForeignKey('bus_lines.line_id'), nullable\n =False)\n bus_internal_id = Column(Integer)\n lat = Column(String)\n lon = Column(String)\n orientation = Column(Integer)\n timestamp = Column(Integer)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusStop(Base):\n __tablename__ = 'bus_stops'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n stop_code = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusTrip(Base):\n __tablename__ = 'bus_trip'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n bus_internal_id = Column(Integer)\n route_id = Column(Integer)\n last_updated = Column(DateTime, default=datetime.utcnow)\n last_pos_timestamp = Column(Integer, default=0)\n",
"step-2": "<mask token>\n\n\nclass BusLine(Base):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass BusRoute(Base):\n __tablename__ = 'bus_routes'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n route_id = Column(Integer)\n route_description = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusRoutePos(Base):\n __tablename__ = 'bus_route_pos'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusPos(Base):\n __tablename__ = 'bus_pos'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer, ForeignKey('bus_lines.line_id'), nullable\n =False)\n bus_internal_id = Column(Integer)\n lat = Column(String)\n lon = Column(String)\n orientation = Column(Integer)\n timestamp = Column(Integer)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusStop(Base):\n __tablename__ = 'bus_stops'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n stop_code = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusTrip(Base):\n __tablename__ = 'bus_trip'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n bus_internal_id = Column(Integer)\n route_id = Column(Integer)\n last_updated = Column(DateTime, default=datetime.utcnow)\n last_pos_timestamp = Column(Integer, default=0)\n",
"step-3": "<mask token>\n\n\nclass BusLine(Base):\n __tablename__ = 'bus_lines'\n id = Column(Integer, primary_key=True)\n line_id = Column(Integer)\n line_description = Column(String)\n\n\nclass BusRoute(Base):\n __tablename__ = 'bus_routes'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n route_id = Column(Integer)\n route_description = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusRoutePos(Base):\n __tablename__ = 'bus_route_pos'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusPos(Base):\n __tablename__ = 'bus_pos'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer, ForeignKey('bus_lines.line_id'), nullable\n =False)\n bus_internal_id = Column(Integer)\n lat = Column(String)\n lon = Column(String)\n orientation = Column(Integer)\n timestamp = Column(Integer)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusStop(Base):\n __tablename__ = 'bus_stops'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n stop_code = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusTrip(Base):\n __tablename__ = 'bus_trip'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n bus_internal_id = Column(Integer)\n route_id = Column(Integer)\n last_updated = Column(DateTime, default=datetime.utcnow)\n last_pos_timestamp = Column(Integer, default=0)\n",
"step-4": "<mask token>\nBase = declarative_base()\n\n\nclass BusLine(Base):\n __tablename__ = 'bus_lines'\n id = Column(Integer, primary_key=True)\n line_id = Column(Integer)\n line_description = Column(String)\n\n\nclass BusRoute(Base):\n __tablename__ = 'bus_routes'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n route_id = Column(Integer)\n route_description = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusRoutePos(Base):\n __tablename__ = 'bus_route_pos'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusPos(Base):\n __tablename__ = 'bus_pos'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer, ForeignKey('bus_lines.line_id'), nullable\n =False)\n bus_internal_id = Column(Integer)\n lat = Column(String)\n lon = Column(String)\n orientation = Column(Integer)\n timestamp = Column(Integer)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusStop(Base):\n __tablename__ = 'bus_stops'\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey('bus_routes.route_id'), nullable=\n False)\n lat = Column(String)\n lon = Column(String)\n stop_code = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusTrip(Base):\n __tablename__ = 'bus_trip'\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n bus_internal_id = Column(Integer)\n route_id = Column(Integer)\n last_updated = Column(DateTime, default=datetime.utcnow)\n last_pos_timestamp = Column(Integer, default=0)\n",
"step-5": "from datetime import datetime\n\nfrom sqlalchemy import Column, Integer, String, ForeignKey, DateTime\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\n\nBase = declarative_base()\n\n\nclass BusLine(Base):\n __tablename__ = \"bus_lines\"\n id = Column(Integer, primary_key=True)\n line_id = Column(Integer)\n line_description = Column(String)\n\n\nclass BusRoute(Base):\n __tablename__ = \"bus_routes\"\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer)\n route_id = Column(Integer)\n route_description = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusRoutePos(Base):\n __tablename__ = \"bus_route_pos\"\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey(\"bus_routes.route_id\"), nullable=False)\n lat = Column(String)\n lon = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusPos(Base):\n __tablename__ = \"bus_pos\"\n id = Column(Integer, primary_key=True)\n bus_line_id = Column(Integer, ForeignKey(\"bus_lines.line_id\"), nullable=False)\n bus_internal_id = Column(Integer)\n lat = Column(String)\n lon = Column(String)\n orientation = Column(Integer)\n timestamp = Column(Integer)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusStop(Base):\n __tablename__ = \"bus_stops\"\n id = Column(Integer, primary_key=True)\n route_id = Column(Integer, ForeignKey(\"bus_routes.route_id\"), nullable=False)\n lat = Column(String)\n lon = Column(String)\n stop_code = Column(String)\n\n def as_dict(self):\n return {c.name: getattr(self, c.name) for c in self.__table__.columns}\n\n\nclass BusTrip(Base):\n __tablename__ = \"bus_trip\"\n id = Column(Integer, primary_key=True)\n\n bus_line_id = Column(Integer)\n bus_internal_id = Column(Integer)\n\n route_id = Column(Integer)\n last_updated = Column(DateTime, default=datetime.utcnow)\n last_pos_timestamp = Column(Integer, default=0)\n",
"step-ids": [
12,
15,
16,
17,
19
]
}
|
[
12,
15,
16,
17,
19
] |
#coding:utf-8
#base string opeate
#rstrip()删除字符串末尾被指定的字符,默认是空格,如末尾有多个相同的字符,则一并删除
str1="djcc"
str2="adcd"
print("this's rstrip() function---------")
print(str1.rstrip("c"))
print(str1.rstrip("d"))
#replace()用新字符替换字符串中被指定的字符,str.replace(old, new[, max]),max表示替换多少个,如不指定,全部替换
str3="this is history,it is not fake"
print("this's replace function----------")
print(str3.replace("is","was"))
print(str3.replace("is","was",3))#索引从1开始,0不算
#
|
normal
|
{
"blob_id": "59170e6b0b0705b9908ed1c32bbea87373126594",
"index": 9484,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(\"this's rstrip() function---------\")\nprint(str1.rstrip('c'))\nprint(str1.rstrip('d'))\n<mask token>\nprint(\"this's replace function----------\")\nprint(str3.replace('is', 'was'))\nprint(str3.replace('is', 'was', 3))\n",
"step-3": "str1 = 'djcc'\nstr2 = 'adcd'\nprint(\"this's rstrip() function---------\")\nprint(str1.rstrip('c'))\nprint(str1.rstrip('d'))\nstr3 = 'this is history,it is not fake'\nprint(\"this's replace function----------\")\nprint(str3.replace('is', 'was'))\nprint(str3.replace('is', 'was', 3))\n",
"step-4": "#coding:utf-8\n\n#base string opeate\n\n#rstrip()删除字符串末尾被指定的字符,默认是空格,如末尾有多个相同的字符,则一并删除\nstr1=\"djcc\"\nstr2=\"adcd\"\nprint(\"this's rstrip() function---------\")\nprint(str1.rstrip(\"c\"))\nprint(str1.rstrip(\"d\"))\n\n\n#replace()用新字符替换字符串中被指定的字符,str.replace(old, new[, max]),max表示替换多少个,如不指定,全部替换\n\nstr3=\"this is history,it is not fake\"\nprint(\"this's replace function----------\")\nprint(str3.replace(\"is\",\"was\"))\nprint(str3.replace(\"is\",\"was\",3))#索引从1开始,0不算\n\n#\n\n\n\n\n\n\n\n\n\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import logging
import ibmsecurity.utilities.tools
import os.path
logger = logging.getLogger(__name__)
def get(isamAppliance, check_mode=False, force=False):
"""
Get information on existing snapshots
"""
return isamAppliance.invoke_get("Retrieving snapshots", "/snapshots")
def get_latest(isamAppliance, check_mode=False, force=False):
"""
Retrieve id of latest found snapshot
"""
ret_obj_id = isamAppliance.create_return_object()
ret_obj = get(isamAppliance)
# Get snapshot with lowest 'id' value - that will be latest one
snaps = min(ret_obj['data'], key=lambda snap: snap['index'])
ret_obj_id['data'] = snaps['id']
return ret_obj_id
def search(isamAppliance, comment, check_mode=False, force=False):
"""
Retrieve snapshots with given comment contained
"""
ret_obj = isamAppliance.create_return_object()
ret_obj_all = get(isamAppliance)
for obj in ret_obj_all['data']:
if comment in obj['comment']:
logger.debug("Snapshot comment \"{0}\" has this string \"{1}\" in it.".format(obj['comment'], comment))
if ret_obj['data'] == {}:
ret_obj['data'] = [obj['id']]
else:
ret_obj['data'].append(obj['id'])
return ret_obj
def create(isamAppliance, comment='', check_mode=False, force=False):
"""
Create a new snapshot
"""
if force is True or _check(isamAppliance, comment=comment) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post("Creating snapshot", "/snapshots",
{
'comment': comment
})
return isamAppliance.create_return_object()
def _check(isamAppliance, comment='', id=None):
"""
Check if the last created snapshot has the exact same comment or id exists
:param isamAppliance:
:param comment:
:return:
"""
ret_obj = get(isamAppliance)
if id != None:
for snaps in ret_obj['data']:
if snaps['id'] == id:
logger.debug("Found id: {}".format(id))
return True
else:
for snaps in ret_obj['data']:
if snaps['comment'] == comment:
logger.debug("Found comment: {}".format(comment))
return True
return False
def delete(isamAppliance, id=None, comment=None, check_mode=False, force=False):
"""
Delete snapshot(s) - check id before processing comment. id can be a list
"""
ids = []
delete_flag = False
if (isinstance(id, list)):
for i in id:
if _check(isamAppliance, id=i) is True:
delete_flag = True
ids.append(i)
elif (_check(isamAppliance, id=id) is True):
delete_flag = True
ids.append(id)
elif (comment is not None):
ret_obj = search(isamAppliance, comment=comment)
if ret_obj != {} and ret_obj['data'] != {}:
delete_flag = True
ids = ret_obj['data']
logger.info("Deleting the following list of IDs: {}".format(ids))
if force is True or delete_flag is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete("Deleting snapshot",
"/snapshots/multi_destroy?record_ids=" + ",".join(ids))
return isamAppliance.create_return_object()
def multi_delete(isamAppliance, ids=[], comment=None, check_mode=False, force=False):
"""
Delete multiple snapshots based on id or comment
"""
if comment != None:
ret_obj = search(isamAppliance, comment=comment)
if ret_obj['data'] == {}:
return isamAppliance.create_return_object(changed=False)
else:
if ids == []:
ids = ret_obj['data']
else:
for snaps in ret_obj['data']:
ids.append(snaps)
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_delete("Deleting one or multiple snapshots", "/snapshots/multi_destroy?record_ids=" + ",".join(ids))
return isamAppliance.create_return_object()
def modify(isamAppliance, id, comment, check_mode=False, force=False):
"""
Modify the snapshot comment
"""
if force is True or _check(isamAppliance, id=id) is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_put("Modifying snapshot", "/snapshots/" + id,
{
'comment': comment
})
return isamAppliance.create_return_object()
def apply(isamAppliance, id=None, comment=None, check_mode=False, force=False):
"""
Apply a snapshot
There is a priority in the parameter to be used for snapshot applying: id > comment
"""
apply_flag = False
if id is not None:
apply_flag = _check(isamAppliance, id=id)
elif comment is not None:
ret_obj = search(isamAppliance, comment)
if ret_obj['data'] != {}:
if len(ret_obj['data']) == 1:
id = ret_obj['data'][0]
apply_flag = True
else:
logger.warn(
"There are multiple files with matching comments. Only one snapshot at a time can be applied !")
else:
logger.warn("No snapshot detail provided - no id nor comment.")
if force is True or apply_flag is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post_snapshot_id("Applying snapshot", "/snapshots/apply/" + id,
{"snapshot_id": id})
return isamAppliance.create_return_object()
def download(isamAppliance, filename, id=None, comment=None, check_mode=False, force=False):
"""
Download one snapshot file to a zip file.
Multiple file download is now supported. Simply pass a list of id.
For backwards compatibility the id parameter and old behaviour is checked at the beginning.
"""
ids = []
download_flag = False
if (isinstance(id, list)):
for i in id:
if _check(isamAppliance, id=i) is True:
download_flag = True
ids.append(i)
elif (_check(isamAppliance, id=id) is True):
download_flag = True
ids.append(id)
elif (comment is not None):
ret_obj = search(isamAppliance, comment=comment)
if ret_obj != {} and ret_obj['data'] != {}:
download_flag = True
ids = ret_obj['data']
logger.info("Downloading the following list of IDs: {}".format(ids))
if force is True or (
os.path.exists(filename) is False and download_flag is True): # Don't overwrite if not forced to
if check_mode is False: # We are in check_mode but would try to download named ids
# Download all ids known so far
return isamAppliance.invoke_get_file("Downloading multiple snapshots",
"/snapshots/download?record_ids=" + ",".join(ids), filename)
return isamAppliance.create_return_object()
def download_latest(isamAppliance, dir='.', check_mode=False, force=False):
"""
Download latest snapshot file to a zip file.
"""
ret_obj = get(isamAppliance)
# Get snapshot with lowest 'id' value - that will be latest one
snaps = min(ret_obj['data'], key=lambda snap: snap['index'])
id = snaps['id']
file = snaps['filename']
filename = os.path.join(dir, file)
return download(isamAppliance, filename, id, check_mode, force)
def apply_latest(isamAppliance, check_mode=False, force=False):
"""
Apply latest snapshot file (revert to latest)
"""
ret_obj = get(isamAppliance)
# Get snapshot with lowest 'id' value - that will be latest one
snaps = min(ret_obj['data'], key=lambda snap: snap['index'])
id = snaps['id']
return apply(isamAppliance, id, check_mode, force)
def upload(isamAppliance, file, comment=None, check_mode=False, force=False):
"""
Upload Snapshot file
"""
if comment is None:
import zipfile
zFile = zipfile.ZipFile(file)
if "Comment" in zFile.namelist():
comment = zFile.open("Comment")
if force is True or _check(isamAppliance, comment=comment) is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True)
else:
return isamAppliance.invoke_post_files(
"Upload Snapshot",
"/snapshots",
[{
'file_formfield': 'uploadedfile',
'filename': file,
'mimetype': 'application/octet-stream'
}],
{
'comment': comment if comment != None else ''
}, json_response=False)
return isamAppliance.create_return_object()
def compare(isamAppliance1, isamAppliance2):
"""
Compare list of snapshots between 2 appliances
"""
ret_obj1 = get(isamAppliance1)
ret_obj2 = get(isamAppliance2)
# id of snapshot is uniquely generated on appliance and should therefore be ignored in comparison.
# filename of snapshot is generated based on exact date/time and will differ even if 2 snapshots are taken near the
# same time. Therefore, filename should be ignored in comparison
for snapshot in ret_obj1['data']:
del snapshot['id']
del snapshot['filename']
for snapshot in ret_obj2['data']:
del snapshot['id']
del snapshot['filename']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['id', 'filename'])
|
normal
|
{
"blob_id": "23066cd644826bcfef1ef41f154924ac89e12069",
"index": 2081,
"step-1": "<mask token>\n\n\ndef get(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Get information on existing snapshots\n \"\"\"\n return isamAppliance.invoke_get('Retrieving snapshots', '/snapshots')\n\n\n<mask token>\n\n\ndef search(isamAppliance, comment, check_mode=False, force=False):\n \"\"\"\n Retrieve snapshots with given comment contained\n \"\"\"\n ret_obj = isamAppliance.create_return_object()\n ret_obj_all = get(isamAppliance)\n for obj in ret_obj_all['data']:\n if comment in obj['comment']:\n logger.debug('Snapshot comment \"{0}\" has this string \"{1}\" in it.'\n .format(obj['comment'], comment))\n if ret_obj['data'] == {}:\n ret_obj['data'] = [obj['id']]\n else:\n ret_obj['data'].append(obj['id'])\n return ret_obj\n\n\n<mask token>\n\n\ndef _check(isamAppliance, comment='', id=None):\n \"\"\"\n Check if the last created snapshot has the exact same comment or id exists\n\n :param isamAppliance:\n :param comment:\n :return:\n \"\"\"\n ret_obj = get(isamAppliance)\n if id != None:\n for snaps in ret_obj['data']:\n if snaps['id'] == id:\n logger.debug('Found id: {}'.format(id))\n return True\n else:\n for snaps in ret_obj['data']:\n if snaps['comment'] == comment:\n logger.debug('Found comment: {}'.format(comment))\n return True\n return False\n\n\ndef delete(isamAppliance, id=None, comment=None, check_mode=False, force=False\n ):\n \"\"\"\n Delete snapshot(s) - check id before processing comment. id can be a list\n \"\"\"\n ids = []\n delete_flag = False\n if isinstance(id, list):\n for i in id:\n if _check(isamAppliance, id=i) is True:\n delete_flag = True\n ids.append(i)\n elif _check(isamAppliance, id=id) is True:\n delete_flag = True\n ids.append(id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj != {} and ret_obj['data'] != {}:\n delete_flag = True\n ids = ret_obj['data']\n logger.info('Deleting the following list of IDs: {}'.format(ids))\n if force is True or delete_flag is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete('Deleting snapshot', \n '/snapshots/multi_destroy?record_ids=' + ','.join(ids))\n return isamAppliance.create_return_object()\n\n\n<mask token>\n\n\ndef modify(isamAppliance, id, comment, check_mode=False, force=False):\n \"\"\"\n Modify the snapshot comment\n \"\"\"\n if force is True or _check(isamAppliance, id=id) is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_put('Modifying snapshot', \n '/snapshots/' + id, {'comment': comment})\n return isamAppliance.create_return_object()\n\n\ndef apply(isamAppliance, id=None, comment=None, check_mode=False, force=False):\n \"\"\"\n Apply a snapshot\n There is a priority in the parameter to be used for snapshot applying: id > comment\n \"\"\"\n apply_flag = False\n if id is not None:\n apply_flag = _check(isamAppliance, id=id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment)\n if ret_obj['data'] != {}:\n if len(ret_obj['data']) == 1:\n id = ret_obj['data'][0]\n apply_flag = True\n else:\n logger.warn(\n 'There are multiple files with matching comments. Only one snapshot at a time can be applied !'\n )\n else:\n logger.warn('No snapshot detail provided - no id nor comment.')\n if force is True or apply_flag is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post_snapshot_id('Applying snapshot',\n '/snapshots/apply/' + id, {'snapshot_id': id})\n return isamAppliance.create_return_object()\n\n\ndef download(isamAppliance, filename, id=None, comment=None, check_mode=\n False, force=False):\n \"\"\"\n Download one snapshot file to a zip file.\n Multiple file download is now supported. Simply pass a list of id.\n For backwards compatibility the id parameter and old behaviour is checked at the beginning.\n \"\"\"\n ids = []\n download_flag = False\n if isinstance(id, list):\n for i in id:\n if _check(isamAppliance, id=i) is True:\n download_flag = True\n ids.append(i)\n elif _check(isamAppliance, id=id) is True:\n download_flag = True\n ids.append(id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj != {} and ret_obj['data'] != {}:\n download_flag = True\n ids = ret_obj['data']\n logger.info('Downloading the following list of IDs: {}'.format(ids))\n if force is True or os.path.exists(filename\n ) is False and download_flag is True:\n if check_mode is False:\n return isamAppliance.invoke_get_file(\n 'Downloading multiple snapshots', \n '/snapshots/download?record_ids=' + ','.join(ids), filename)\n return isamAppliance.create_return_object()\n\n\n<mask token>\n\n\ndef apply_latest(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Apply latest snapshot file (revert to latest)\n \"\"\"\n ret_obj = get(isamAppliance)\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n id = snaps['id']\n return apply(isamAppliance, id, check_mode, force)\n\n\ndef upload(isamAppliance, file, comment=None, check_mode=False, force=False):\n \"\"\"\n Upload Snapshot file\n \"\"\"\n if comment is None:\n import zipfile\n zFile = zipfile.ZipFile(file)\n if 'Comment' in zFile.namelist():\n comment = zFile.open('Comment')\n if force is True or _check(isamAppliance, comment=comment) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post_files('Upload Snapshot',\n '/snapshots', [{'file_formfield': 'uploadedfile',\n 'filename': file, 'mimetype': 'application/octet-stream'}],\n {'comment': comment if comment != None else ''},\n json_response=False)\n return isamAppliance.create_return_object()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Get information on existing snapshots\n \"\"\"\n return isamAppliance.invoke_get('Retrieving snapshots', '/snapshots')\n\n\ndef get_latest(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Retrieve id of latest found snapshot\n \"\"\"\n ret_obj_id = isamAppliance.create_return_object()\n ret_obj = get(isamAppliance)\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n ret_obj_id['data'] = snaps['id']\n return ret_obj_id\n\n\ndef search(isamAppliance, comment, check_mode=False, force=False):\n \"\"\"\n Retrieve snapshots with given comment contained\n \"\"\"\n ret_obj = isamAppliance.create_return_object()\n ret_obj_all = get(isamAppliance)\n for obj in ret_obj_all['data']:\n if comment in obj['comment']:\n logger.debug('Snapshot comment \"{0}\" has this string \"{1}\" in it.'\n .format(obj['comment'], comment))\n if ret_obj['data'] == {}:\n ret_obj['data'] = [obj['id']]\n else:\n ret_obj['data'].append(obj['id'])\n return ret_obj\n\n\ndef create(isamAppliance, comment='', check_mode=False, force=False):\n \"\"\"\n Create a new snapshot\n \"\"\"\n if force is True or _check(isamAppliance, comment=comment) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post('Creating snapshot',\n '/snapshots', {'comment': comment})\n return isamAppliance.create_return_object()\n\n\ndef _check(isamAppliance, comment='', id=None):\n \"\"\"\n Check if the last created snapshot has the exact same comment or id exists\n\n :param isamAppliance:\n :param comment:\n :return:\n \"\"\"\n ret_obj = get(isamAppliance)\n if id != None:\n for snaps in ret_obj['data']:\n if snaps['id'] == id:\n logger.debug('Found id: {}'.format(id))\n return True\n else:\n for snaps in ret_obj['data']:\n if snaps['comment'] == comment:\n logger.debug('Found comment: {}'.format(comment))\n return True\n return False\n\n\ndef delete(isamAppliance, id=None, comment=None, check_mode=False, force=False\n ):\n \"\"\"\n Delete snapshot(s) - check id before processing comment. id can be a list\n \"\"\"\n ids = []\n delete_flag = False\n if isinstance(id, list):\n for i in id:\n if _check(isamAppliance, id=i) is True:\n delete_flag = True\n ids.append(i)\n elif _check(isamAppliance, id=id) is True:\n delete_flag = True\n ids.append(id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj != {} and ret_obj['data'] != {}:\n delete_flag = True\n ids = ret_obj['data']\n logger.info('Deleting the following list of IDs: {}'.format(ids))\n if force is True or delete_flag is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete('Deleting snapshot', \n '/snapshots/multi_destroy?record_ids=' + ','.join(ids))\n return isamAppliance.create_return_object()\n\n\ndef multi_delete(isamAppliance, ids=[], comment=None, check_mode=False,\n force=False):\n \"\"\"\n Delete multiple snapshots based on id or comment\n \"\"\"\n if comment != None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj['data'] == {}:\n return isamAppliance.create_return_object(changed=False)\n elif ids == []:\n ids = ret_obj['data']\n else:\n for snaps in ret_obj['data']:\n ids.append(snaps)\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete('Deleting one or multiple snapshots'\n , '/snapshots/multi_destroy?record_ids=' + ','.join(ids))\n return isamAppliance.create_return_object()\n\n\ndef modify(isamAppliance, id, comment, check_mode=False, force=False):\n \"\"\"\n Modify the snapshot comment\n \"\"\"\n if force is True or _check(isamAppliance, id=id) is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_put('Modifying snapshot', \n '/snapshots/' + id, {'comment': comment})\n return isamAppliance.create_return_object()\n\n\ndef apply(isamAppliance, id=None, comment=None, check_mode=False, force=False):\n \"\"\"\n Apply a snapshot\n There is a priority in the parameter to be used for snapshot applying: id > comment\n \"\"\"\n apply_flag = False\n if id is not None:\n apply_flag = _check(isamAppliance, id=id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment)\n if ret_obj['data'] != {}:\n if len(ret_obj['data']) == 1:\n id = ret_obj['data'][0]\n apply_flag = True\n else:\n logger.warn(\n 'There are multiple files with matching comments. Only one snapshot at a time can be applied !'\n )\n else:\n logger.warn('No snapshot detail provided - no id nor comment.')\n if force is True or apply_flag is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post_snapshot_id('Applying snapshot',\n '/snapshots/apply/' + id, {'snapshot_id': id})\n return isamAppliance.create_return_object()\n\n\ndef download(isamAppliance, filename, id=None, comment=None, check_mode=\n False, force=False):\n \"\"\"\n Download one snapshot file to a zip file.\n Multiple file download is now supported. Simply pass a list of id.\n For backwards compatibility the id parameter and old behaviour is checked at the beginning.\n \"\"\"\n ids = []\n download_flag = False\n if isinstance(id, list):\n for i in id:\n if _check(isamAppliance, id=i) is True:\n download_flag = True\n ids.append(i)\n elif _check(isamAppliance, id=id) is True:\n download_flag = True\n ids.append(id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj != {} and ret_obj['data'] != {}:\n download_flag = True\n ids = ret_obj['data']\n logger.info('Downloading the following list of IDs: {}'.format(ids))\n if force is True or os.path.exists(filename\n ) is False and download_flag is True:\n if check_mode is False:\n return isamAppliance.invoke_get_file(\n 'Downloading multiple snapshots', \n '/snapshots/download?record_ids=' + ','.join(ids), filename)\n return isamAppliance.create_return_object()\n\n\n<mask token>\n\n\ndef apply_latest(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Apply latest snapshot file (revert to latest)\n \"\"\"\n ret_obj = get(isamAppliance)\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n id = snaps['id']\n return apply(isamAppliance, id, check_mode, force)\n\n\ndef upload(isamAppliance, file, comment=None, check_mode=False, force=False):\n \"\"\"\n Upload Snapshot file\n \"\"\"\n if comment is None:\n import zipfile\n zFile = zipfile.ZipFile(file)\n if 'Comment' in zFile.namelist():\n comment = zFile.open('Comment')\n if force is True or _check(isamAppliance, comment=comment) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post_files('Upload Snapshot',\n '/snapshots', [{'file_formfield': 'uploadedfile',\n 'filename': file, 'mimetype': 'application/octet-stream'}],\n {'comment': comment if comment != None else ''},\n json_response=False)\n return isamAppliance.create_return_object()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef get(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Get information on existing snapshots\n \"\"\"\n return isamAppliance.invoke_get('Retrieving snapshots', '/snapshots')\n\n\ndef get_latest(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Retrieve id of latest found snapshot\n \"\"\"\n ret_obj_id = isamAppliance.create_return_object()\n ret_obj = get(isamAppliance)\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n ret_obj_id['data'] = snaps['id']\n return ret_obj_id\n\n\ndef search(isamAppliance, comment, check_mode=False, force=False):\n \"\"\"\n Retrieve snapshots with given comment contained\n \"\"\"\n ret_obj = isamAppliance.create_return_object()\n ret_obj_all = get(isamAppliance)\n for obj in ret_obj_all['data']:\n if comment in obj['comment']:\n logger.debug('Snapshot comment \"{0}\" has this string \"{1}\" in it.'\n .format(obj['comment'], comment))\n if ret_obj['data'] == {}:\n ret_obj['data'] = [obj['id']]\n else:\n ret_obj['data'].append(obj['id'])\n return ret_obj\n\n\ndef create(isamAppliance, comment='', check_mode=False, force=False):\n \"\"\"\n Create a new snapshot\n \"\"\"\n if force is True or _check(isamAppliance, comment=comment) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post('Creating snapshot',\n '/snapshots', {'comment': comment})\n return isamAppliance.create_return_object()\n\n\ndef _check(isamAppliance, comment='', id=None):\n \"\"\"\n Check if the last created snapshot has the exact same comment or id exists\n\n :param isamAppliance:\n :param comment:\n :return:\n \"\"\"\n ret_obj = get(isamAppliance)\n if id != None:\n for snaps in ret_obj['data']:\n if snaps['id'] == id:\n logger.debug('Found id: {}'.format(id))\n return True\n else:\n for snaps in ret_obj['data']:\n if snaps['comment'] == comment:\n logger.debug('Found comment: {}'.format(comment))\n return True\n return False\n\n\ndef delete(isamAppliance, id=None, comment=None, check_mode=False, force=False\n ):\n \"\"\"\n Delete snapshot(s) - check id before processing comment. id can be a list\n \"\"\"\n ids = []\n delete_flag = False\n if isinstance(id, list):\n for i in id:\n if _check(isamAppliance, id=i) is True:\n delete_flag = True\n ids.append(i)\n elif _check(isamAppliance, id=id) is True:\n delete_flag = True\n ids.append(id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj != {} and ret_obj['data'] != {}:\n delete_flag = True\n ids = ret_obj['data']\n logger.info('Deleting the following list of IDs: {}'.format(ids))\n if force is True or delete_flag is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete('Deleting snapshot', \n '/snapshots/multi_destroy?record_ids=' + ','.join(ids))\n return isamAppliance.create_return_object()\n\n\ndef multi_delete(isamAppliance, ids=[], comment=None, check_mode=False,\n force=False):\n \"\"\"\n Delete multiple snapshots based on id or comment\n \"\"\"\n if comment != None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj['data'] == {}:\n return isamAppliance.create_return_object(changed=False)\n elif ids == []:\n ids = ret_obj['data']\n else:\n for snaps in ret_obj['data']:\n ids.append(snaps)\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete('Deleting one or multiple snapshots'\n , '/snapshots/multi_destroy?record_ids=' + ','.join(ids))\n return isamAppliance.create_return_object()\n\n\ndef modify(isamAppliance, id, comment, check_mode=False, force=False):\n \"\"\"\n Modify the snapshot comment\n \"\"\"\n if force is True or _check(isamAppliance, id=id) is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_put('Modifying snapshot', \n '/snapshots/' + id, {'comment': comment})\n return isamAppliance.create_return_object()\n\n\ndef apply(isamAppliance, id=None, comment=None, check_mode=False, force=False):\n \"\"\"\n Apply a snapshot\n There is a priority in the parameter to be used for snapshot applying: id > comment\n \"\"\"\n apply_flag = False\n if id is not None:\n apply_flag = _check(isamAppliance, id=id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment)\n if ret_obj['data'] != {}:\n if len(ret_obj['data']) == 1:\n id = ret_obj['data'][0]\n apply_flag = True\n else:\n logger.warn(\n 'There are multiple files with matching comments. Only one snapshot at a time can be applied !'\n )\n else:\n logger.warn('No snapshot detail provided - no id nor comment.')\n if force is True or apply_flag is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post_snapshot_id('Applying snapshot',\n '/snapshots/apply/' + id, {'snapshot_id': id})\n return isamAppliance.create_return_object()\n\n\ndef download(isamAppliance, filename, id=None, comment=None, check_mode=\n False, force=False):\n \"\"\"\n Download one snapshot file to a zip file.\n Multiple file download is now supported. Simply pass a list of id.\n For backwards compatibility the id parameter and old behaviour is checked at the beginning.\n \"\"\"\n ids = []\n download_flag = False\n if isinstance(id, list):\n for i in id:\n if _check(isamAppliance, id=i) is True:\n download_flag = True\n ids.append(i)\n elif _check(isamAppliance, id=id) is True:\n download_flag = True\n ids.append(id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj != {} and ret_obj['data'] != {}:\n download_flag = True\n ids = ret_obj['data']\n logger.info('Downloading the following list of IDs: {}'.format(ids))\n if force is True or os.path.exists(filename\n ) is False and download_flag is True:\n if check_mode is False:\n return isamAppliance.invoke_get_file(\n 'Downloading multiple snapshots', \n '/snapshots/download?record_ids=' + ','.join(ids), filename)\n return isamAppliance.create_return_object()\n\n\n<mask token>\n\n\ndef apply_latest(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Apply latest snapshot file (revert to latest)\n \"\"\"\n ret_obj = get(isamAppliance)\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n id = snaps['id']\n return apply(isamAppliance, id, check_mode, force)\n\n\ndef upload(isamAppliance, file, comment=None, check_mode=False, force=False):\n \"\"\"\n Upload Snapshot file\n \"\"\"\n if comment is None:\n import zipfile\n zFile = zipfile.ZipFile(file)\n if 'Comment' in zFile.namelist():\n comment = zFile.open('Comment')\n if force is True or _check(isamAppliance, comment=comment) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post_files('Upload Snapshot',\n '/snapshots', [{'file_formfield': 'uploadedfile',\n 'filename': file, 'mimetype': 'application/octet-stream'}],\n {'comment': comment if comment != None else ''},\n json_response=False)\n return isamAppliance.create_return_object()\n\n\ndef compare(isamAppliance1, isamAppliance2):\n \"\"\"\n Compare list of snapshots between 2 appliances\n \"\"\"\n ret_obj1 = get(isamAppliance1)\n ret_obj2 = get(isamAppliance2)\n for snapshot in ret_obj1['data']:\n del snapshot['id']\n del snapshot['filename']\n for snapshot in ret_obj2['data']:\n del snapshot['id']\n del snapshot['filename']\n return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2,\n deleted_keys=['id', 'filename'])\n",
"step-4": "<mask token>\n\n\ndef get(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Get information on existing snapshots\n \"\"\"\n return isamAppliance.invoke_get('Retrieving snapshots', '/snapshots')\n\n\ndef get_latest(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Retrieve id of latest found snapshot\n \"\"\"\n ret_obj_id = isamAppliance.create_return_object()\n ret_obj = get(isamAppliance)\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n ret_obj_id['data'] = snaps['id']\n return ret_obj_id\n\n\ndef search(isamAppliance, comment, check_mode=False, force=False):\n \"\"\"\n Retrieve snapshots with given comment contained\n \"\"\"\n ret_obj = isamAppliance.create_return_object()\n ret_obj_all = get(isamAppliance)\n for obj in ret_obj_all['data']:\n if comment in obj['comment']:\n logger.debug('Snapshot comment \"{0}\" has this string \"{1}\" in it.'\n .format(obj['comment'], comment))\n if ret_obj['data'] == {}:\n ret_obj['data'] = [obj['id']]\n else:\n ret_obj['data'].append(obj['id'])\n return ret_obj\n\n\ndef create(isamAppliance, comment='', check_mode=False, force=False):\n \"\"\"\n Create a new snapshot\n \"\"\"\n if force is True or _check(isamAppliance, comment=comment) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post('Creating snapshot',\n '/snapshots', {'comment': comment})\n return isamAppliance.create_return_object()\n\n\ndef _check(isamAppliance, comment='', id=None):\n \"\"\"\n Check if the last created snapshot has the exact same comment or id exists\n\n :param isamAppliance:\n :param comment:\n :return:\n \"\"\"\n ret_obj = get(isamAppliance)\n if id != None:\n for snaps in ret_obj['data']:\n if snaps['id'] == id:\n logger.debug('Found id: {}'.format(id))\n return True\n else:\n for snaps in ret_obj['data']:\n if snaps['comment'] == comment:\n logger.debug('Found comment: {}'.format(comment))\n return True\n return False\n\n\ndef delete(isamAppliance, id=None, comment=None, check_mode=False, force=False\n ):\n \"\"\"\n Delete snapshot(s) - check id before processing comment. id can be a list\n \"\"\"\n ids = []\n delete_flag = False\n if isinstance(id, list):\n for i in id:\n if _check(isamAppliance, id=i) is True:\n delete_flag = True\n ids.append(i)\n elif _check(isamAppliance, id=id) is True:\n delete_flag = True\n ids.append(id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj != {} and ret_obj['data'] != {}:\n delete_flag = True\n ids = ret_obj['data']\n logger.info('Deleting the following list of IDs: {}'.format(ids))\n if force is True or delete_flag is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete('Deleting snapshot', \n '/snapshots/multi_destroy?record_ids=' + ','.join(ids))\n return isamAppliance.create_return_object()\n\n\ndef multi_delete(isamAppliance, ids=[], comment=None, check_mode=False,\n force=False):\n \"\"\"\n Delete multiple snapshots based on id or comment\n \"\"\"\n if comment != None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj['data'] == {}:\n return isamAppliance.create_return_object(changed=False)\n elif ids == []:\n ids = ret_obj['data']\n else:\n for snaps in ret_obj['data']:\n ids.append(snaps)\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete('Deleting one or multiple snapshots'\n , '/snapshots/multi_destroy?record_ids=' + ','.join(ids))\n return isamAppliance.create_return_object()\n\n\ndef modify(isamAppliance, id, comment, check_mode=False, force=False):\n \"\"\"\n Modify the snapshot comment\n \"\"\"\n if force is True or _check(isamAppliance, id=id) is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_put('Modifying snapshot', \n '/snapshots/' + id, {'comment': comment})\n return isamAppliance.create_return_object()\n\n\ndef apply(isamAppliance, id=None, comment=None, check_mode=False, force=False):\n \"\"\"\n Apply a snapshot\n There is a priority in the parameter to be used for snapshot applying: id > comment\n \"\"\"\n apply_flag = False\n if id is not None:\n apply_flag = _check(isamAppliance, id=id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment)\n if ret_obj['data'] != {}:\n if len(ret_obj['data']) == 1:\n id = ret_obj['data'][0]\n apply_flag = True\n else:\n logger.warn(\n 'There are multiple files with matching comments. Only one snapshot at a time can be applied !'\n )\n else:\n logger.warn('No snapshot detail provided - no id nor comment.')\n if force is True or apply_flag is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post_snapshot_id('Applying snapshot',\n '/snapshots/apply/' + id, {'snapshot_id': id})\n return isamAppliance.create_return_object()\n\n\ndef download(isamAppliance, filename, id=None, comment=None, check_mode=\n False, force=False):\n \"\"\"\n Download one snapshot file to a zip file.\n Multiple file download is now supported. Simply pass a list of id.\n For backwards compatibility the id parameter and old behaviour is checked at the beginning.\n \"\"\"\n ids = []\n download_flag = False\n if isinstance(id, list):\n for i in id:\n if _check(isamAppliance, id=i) is True:\n download_flag = True\n ids.append(i)\n elif _check(isamAppliance, id=id) is True:\n download_flag = True\n ids.append(id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj != {} and ret_obj['data'] != {}:\n download_flag = True\n ids = ret_obj['data']\n logger.info('Downloading the following list of IDs: {}'.format(ids))\n if force is True or os.path.exists(filename\n ) is False and download_flag is True:\n if check_mode is False:\n return isamAppliance.invoke_get_file(\n 'Downloading multiple snapshots', \n '/snapshots/download?record_ids=' + ','.join(ids), filename)\n return isamAppliance.create_return_object()\n\n\ndef download_latest(isamAppliance, dir='.', check_mode=False, force=False):\n \"\"\"\n Download latest snapshot file to a zip file.\n \"\"\"\n ret_obj = get(isamAppliance)\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n id = snaps['id']\n file = snaps['filename']\n filename = os.path.join(dir, file)\n return download(isamAppliance, filename, id, check_mode, force)\n\n\ndef apply_latest(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Apply latest snapshot file (revert to latest)\n \"\"\"\n ret_obj = get(isamAppliance)\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n id = snaps['id']\n return apply(isamAppliance, id, check_mode, force)\n\n\ndef upload(isamAppliance, file, comment=None, check_mode=False, force=False):\n \"\"\"\n Upload Snapshot file\n \"\"\"\n if comment is None:\n import zipfile\n zFile = zipfile.ZipFile(file)\n if 'Comment' in zFile.namelist():\n comment = zFile.open('Comment')\n if force is True or _check(isamAppliance, comment=comment) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post_files('Upload Snapshot',\n '/snapshots', [{'file_formfield': 'uploadedfile',\n 'filename': file, 'mimetype': 'application/octet-stream'}],\n {'comment': comment if comment != None else ''},\n json_response=False)\n return isamAppliance.create_return_object()\n\n\ndef compare(isamAppliance1, isamAppliance2):\n \"\"\"\n Compare list of snapshots between 2 appliances\n \"\"\"\n ret_obj1 = get(isamAppliance1)\n ret_obj2 = get(isamAppliance2)\n for snapshot in ret_obj1['data']:\n del snapshot['id']\n del snapshot['filename']\n for snapshot in ret_obj2['data']:\n del snapshot['id']\n del snapshot['filename']\n return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2,\n deleted_keys=['id', 'filename'])\n",
"step-5": "import logging\nimport ibmsecurity.utilities.tools\nimport os.path\n\nlogger = logging.getLogger(__name__)\n\n\ndef get(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Get information on existing snapshots\n \"\"\"\n return isamAppliance.invoke_get(\"Retrieving snapshots\", \"/snapshots\")\n\n\ndef get_latest(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Retrieve id of latest found snapshot\n \"\"\"\n ret_obj_id = isamAppliance.create_return_object()\n ret_obj = get(isamAppliance)\n\n # Get snapshot with lowest 'id' value - that will be latest one\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n ret_obj_id['data'] = snaps['id']\n\n return ret_obj_id\n\n\ndef search(isamAppliance, comment, check_mode=False, force=False):\n \"\"\"\n Retrieve snapshots with given comment contained\n \"\"\"\n ret_obj = isamAppliance.create_return_object()\n ret_obj_all = get(isamAppliance)\n\n for obj in ret_obj_all['data']:\n if comment in obj['comment']:\n logger.debug(\"Snapshot comment \\\"{0}\\\" has this string \\\"{1}\\\" in it.\".format(obj['comment'], comment))\n if ret_obj['data'] == {}:\n ret_obj['data'] = [obj['id']]\n else:\n ret_obj['data'].append(obj['id'])\n\n return ret_obj\n\n\ndef create(isamAppliance, comment='', check_mode=False, force=False):\n \"\"\"\n Create a new snapshot\n \"\"\"\n if force is True or _check(isamAppliance, comment=comment) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post(\"Creating snapshot\", \"/snapshots\",\n {\n 'comment': comment\n })\n\n return isamAppliance.create_return_object()\n\n\ndef _check(isamAppliance, comment='', id=None):\n \"\"\"\n Check if the last created snapshot has the exact same comment or id exists\n\n :param isamAppliance:\n :param comment:\n :return:\n \"\"\"\n ret_obj = get(isamAppliance)\n\n if id != None:\n for snaps in ret_obj['data']:\n if snaps['id'] == id:\n logger.debug(\"Found id: {}\".format(id))\n return True\n else:\n for snaps in ret_obj['data']:\n if snaps['comment'] == comment:\n logger.debug(\"Found comment: {}\".format(comment))\n return True\n\n return False\n\n\ndef delete(isamAppliance, id=None, comment=None, check_mode=False, force=False):\n \"\"\"\n Delete snapshot(s) - check id before processing comment. id can be a list\n \"\"\"\n ids = []\n delete_flag = False\n if (isinstance(id, list)):\n for i in id:\n if _check(isamAppliance, id=i) is True:\n delete_flag = True\n ids.append(i)\n elif (_check(isamAppliance, id=id) is True):\n delete_flag = True\n ids.append(id)\n elif (comment is not None):\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj != {} and ret_obj['data'] != {}:\n delete_flag = True\n ids = ret_obj['data']\n logger.info(\"Deleting the following list of IDs: {}\".format(ids))\n if force is True or delete_flag is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\"Deleting snapshot\",\n \"/snapshots/multi_destroy?record_ids=\" + \",\".join(ids))\n\n return isamAppliance.create_return_object()\n\ndef multi_delete(isamAppliance, ids=[], comment=None, check_mode=False, force=False):\n \"\"\"\n Delete multiple snapshots based on id or comment\n \"\"\"\n if comment != None:\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj['data'] == {}:\n return isamAppliance.create_return_object(changed=False)\n else:\n if ids == []:\n ids = ret_obj['data']\n else:\n for snaps in ret_obj['data']:\n ids.append(snaps)\n\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\"Deleting one or multiple snapshots\", \"/snapshots/multi_destroy?record_ids=\" + \",\".join(ids))\n\n return isamAppliance.create_return_object()\n\ndef modify(isamAppliance, id, comment, check_mode=False, force=False):\n \"\"\"\n Modify the snapshot comment\n \"\"\"\n if force is True or _check(isamAppliance, id=id) is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_put(\"Modifying snapshot\", \"/snapshots/\" + id,\n {\n 'comment': comment\n })\n\n return isamAppliance.create_return_object()\n\n\ndef apply(isamAppliance, id=None, comment=None, check_mode=False, force=False):\n \"\"\"\n Apply a snapshot\n There is a priority in the parameter to be used for snapshot applying: id > comment\n \"\"\"\n apply_flag = False\n if id is not None:\n apply_flag = _check(isamAppliance, id=id)\n elif comment is not None:\n ret_obj = search(isamAppliance, comment)\n if ret_obj['data'] != {}:\n if len(ret_obj['data']) == 1:\n id = ret_obj['data'][0]\n apply_flag = True\n else:\n logger.warn(\n \"There are multiple files with matching comments. Only one snapshot at a time can be applied !\")\n else:\n logger.warn(\"No snapshot detail provided - no id nor comment.\")\n if force is True or apply_flag is True:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_post_snapshot_id(\"Applying snapshot\", \"/snapshots/apply/\" + id,\n {\"snapshot_id\": id})\n\n return isamAppliance.create_return_object()\n\n\ndef download(isamAppliance, filename, id=None, comment=None, check_mode=False, force=False):\n \"\"\"\n Download one snapshot file to a zip file.\n Multiple file download is now supported. Simply pass a list of id.\n For backwards compatibility the id parameter and old behaviour is checked at the beginning.\n \"\"\"\n ids = []\n download_flag = False\n if (isinstance(id, list)):\n for i in id:\n if _check(isamAppliance, id=i) is True:\n download_flag = True\n ids.append(i)\n elif (_check(isamAppliance, id=id) is True):\n download_flag = True\n ids.append(id)\n elif (comment is not None):\n ret_obj = search(isamAppliance, comment=comment)\n if ret_obj != {} and ret_obj['data'] != {}:\n download_flag = True\n ids = ret_obj['data']\n logger.info(\"Downloading the following list of IDs: {}\".format(ids))\n\n if force is True or (\n os.path.exists(filename) is False and download_flag is True): # Don't overwrite if not forced to\n if check_mode is False: # We are in check_mode but would try to download named ids\n # Download all ids known so far\n return isamAppliance.invoke_get_file(\"Downloading multiple snapshots\",\n \"/snapshots/download?record_ids=\" + \",\".join(ids), filename)\n\n return isamAppliance.create_return_object()\n\n\ndef download_latest(isamAppliance, dir='.', check_mode=False, force=False):\n \"\"\"\n Download latest snapshot file to a zip file.\n \"\"\"\n ret_obj = get(isamAppliance)\n\n # Get snapshot with lowest 'id' value - that will be latest one\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n id = snaps['id']\n file = snaps['filename']\n filename = os.path.join(dir, file)\n\n return download(isamAppliance, filename, id, check_mode, force)\n\n\ndef apply_latest(isamAppliance, check_mode=False, force=False):\n \"\"\"\n Apply latest snapshot file (revert to latest)\n \"\"\"\n ret_obj = get(isamAppliance)\n\n # Get snapshot with lowest 'id' value - that will be latest one\n snaps = min(ret_obj['data'], key=lambda snap: snap['index'])\n id = snaps['id']\n\n return apply(isamAppliance, id, check_mode, force)\n\n\ndef upload(isamAppliance, file, comment=None, check_mode=False, force=False):\n \"\"\"\n Upload Snapshot file\n \"\"\"\n if comment is None:\n import zipfile\n\n zFile = zipfile.ZipFile(file)\n if \"Comment\" in zFile.namelist():\n comment = zFile.open(\"Comment\")\n\n if force is True or _check(isamAppliance, comment=comment) is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n\n return isamAppliance.invoke_post_files(\n \"Upload Snapshot\",\n \"/snapshots\",\n [{\n 'file_formfield': 'uploadedfile',\n 'filename': file,\n 'mimetype': 'application/octet-stream'\n }],\n {\n 'comment': comment if comment != None else ''\n }, json_response=False)\n\n return isamAppliance.create_return_object()\n\n\ndef compare(isamAppliance1, isamAppliance2):\n \"\"\"\n Compare list of snapshots between 2 appliances\n \"\"\"\n ret_obj1 = get(isamAppliance1)\n ret_obj2 = get(isamAppliance2)\n\n # id of snapshot is uniquely generated on appliance and should therefore be ignored in comparison.\n # filename of snapshot is generated based on exact date/time and will differ even if 2 snapshots are taken near the\n # same time. Therefore, filename should be ignored in comparison\n for snapshot in ret_obj1['data']:\n del snapshot['id']\n del snapshot['filename']\n\n for snapshot in ret_obj2['data']:\n del snapshot['id']\n del snapshot['filename']\n\n return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['id', 'filename'])\n",
"step-ids": [
9,
12,
13,
14,
17
]
}
|
[
9,
12,
13,
14,
17
] |
from django.urls import path, include
from django.conf.urls import url, re_path
#from rest_framework.urlpatterns import format_suffix_patterns
from .views import (HomePageView,
WordViewSet, WordNormalViewSet,
TextViewSet, TextNormalViewSet, TextTagViewSet,
TagSetViewSet, TagViewSet, TokenViewSet, TokenTagViewSet,
ValidatorViewSet, NormalizerViewSet,
TaggerViewSet,
)
from rest_framework.routers import DefaultRouter, SimpleRouter
class OptionalSlashRouter(DefaultRouter):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.trailing_slash = '/?'
router = OptionalSlashRouter()
router.register(r'words', WordViewSet)
router.register(r'word-normals', WordNormalViewSet)
router.register(r'texts', TextViewSet)
router.register(r'text-normals', TextNormalViewSet)
router.register(r'text-tags', TextTagViewSet)
router.register(r'tag-sets', TagSetViewSet)
router.register(r'tags', TagViewSet)
router.register(r'tokens', TokenViewSet)
router.register(r'token-tags', TokenTagViewSet)
router.register(r'validators', ValidatorViewSet)
router.register(r'normalizers', NormalizerViewSet)
router.register(r'taggers', TaggerViewSet)
# router.register(r'sentences', SentenceViewSet)
# router.register(r'normal-sentences', NormalSentenceViewSet)
# router.register(r'tagged-sentences', TaggedSentenceViewSet)
# router.register(r'rules/translation-characters', TranslationCharacterViewSet)
# router.register(r'rules/refinement-patt/erns', RefinementPatternViewSet)
urlpatterns = [
re_path(r'^$', HomePageView.as_view(), name='home'),
re_path(r'^api/', include(router.urls)),
]
# urlpatterns = [
# # url('', HomePageView.as_view(), name = 'home'),
# path('', views.index, name='home'),
# path('word/', WordCreateView.as_view(), name="words"),
# path('word/<int:pk>/', WordDetailsView.as_view(), name="word"),
# path('text/fix/', views.fix_text, name="fix_text"),
# ]
#urlpatterns = format_suffix_patterns(urlpatterns)
# class OptionalSlashRouter(SimpleRouter):
# def __init__(self):
# self.trailing_slash = '/?'
# super().__init__()
# # super(SimpleRouter, self).__init__()
# router.register(r'', HomeViewSet, basename='home')
# router.register(r'api', router.APIRootView, basename='api')
# router.register(r'schema', router.APISchemaView, basename='schema')
|
normal
|
{
"blob_id": "991b124d365443744c946b258504c97e9076dcea",
"index": 7627,
"step-1": "<mask token>\n\n\nclass OptionalSlashRouter(DefaultRouter):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.trailing_slash = '/?'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass OptionalSlashRouter(DefaultRouter):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.trailing_slash = '/?'\n\n\n<mask token>\nrouter.register('words', WordViewSet)\nrouter.register('word-normals', WordNormalViewSet)\nrouter.register('texts', TextViewSet)\nrouter.register('text-normals', TextNormalViewSet)\nrouter.register('text-tags', TextTagViewSet)\nrouter.register('tag-sets', TagSetViewSet)\nrouter.register('tags', TagViewSet)\nrouter.register('tokens', TokenViewSet)\nrouter.register('token-tags', TokenTagViewSet)\nrouter.register('validators', ValidatorViewSet)\nrouter.register('normalizers', NormalizerViewSet)\nrouter.register('taggers', TaggerViewSet)\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass OptionalSlashRouter(DefaultRouter):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.trailing_slash = '/?'\n\n\nrouter = OptionalSlashRouter()\nrouter.register('words', WordViewSet)\nrouter.register('word-normals', WordNormalViewSet)\nrouter.register('texts', TextViewSet)\nrouter.register('text-normals', TextNormalViewSet)\nrouter.register('text-tags', TextTagViewSet)\nrouter.register('tag-sets', TagSetViewSet)\nrouter.register('tags', TagViewSet)\nrouter.register('tokens', TokenViewSet)\nrouter.register('token-tags', TokenTagViewSet)\nrouter.register('validators', ValidatorViewSet)\nrouter.register('normalizers', NormalizerViewSet)\nrouter.register('taggers', TaggerViewSet)\nurlpatterns = [re_path('^$', HomePageView.as_view(), name='home'), re_path(\n '^api/', include(router.urls))]\n",
"step-4": "from django.urls import path, include\nfrom django.conf.urls import url, re_path\nfrom .views import HomePageView, WordViewSet, WordNormalViewSet, TextViewSet, TextNormalViewSet, TextTagViewSet, TagSetViewSet, TagViewSet, TokenViewSet, TokenTagViewSet, ValidatorViewSet, NormalizerViewSet, TaggerViewSet\nfrom rest_framework.routers import DefaultRouter, SimpleRouter\n\n\nclass OptionalSlashRouter(DefaultRouter):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.trailing_slash = '/?'\n\n\nrouter = OptionalSlashRouter()\nrouter.register('words', WordViewSet)\nrouter.register('word-normals', WordNormalViewSet)\nrouter.register('texts', TextViewSet)\nrouter.register('text-normals', TextNormalViewSet)\nrouter.register('text-tags', TextTagViewSet)\nrouter.register('tag-sets', TagSetViewSet)\nrouter.register('tags', TagViewSet)\nrouter.register('tokens', TokenViewSet)\nrouter.register('token-tags', TokenTagViewSet)\nrouter.register('validators', ValidatorViewSet)\nrouter.register('normalizers', NormalizerViewSet)\nrouter.register('taggers', TaggerViewSet)\nurlpatterns = [re_path('^$', HomePageView.as_view(), name='home'), re_path(\n '^api/', include(router.urls))]\n",
"step-5": "from django.urls import path, include\nfrom django.conf.urls import url, re_path\n#from rest_framework.urlpatterns import format_suffix_patterns\nfrom .views import (HomePageView, \n WordViewSet, WordNormalViewSet,\n TextViewSet, TextNormalViewSet, TextTagViewSet, \n TagSetViewSet, TagViewSet, TokenViewSet, TokenTagViewSet,\n ValidatorViewSet, NormalizerViewSet, \n TaggerViewSet,\n )\nfrom rest_framework.routers import DefaultRouter, SimpleRouter\n\n\nclass OptionalSlashRouter(DefaultRouter): \n def __init__(self, *args, **kwargs): \n super().__init__(*args, **kwargs)\n self.trailing_slash = '/?'\n\nrouter = OptionalSlashRouter()\nrouter.register(r'words', WordViewSet)\nrouter.register(r'word-normals', WordNormalViewSet)\n\nrouter.register(r'texts', TextViewSet)\nrouter.register(r'text-normals', TextNormalViewSet)\nrouter.register(r'text-tags', TextTagViewSet)\n\nrouter.register(r'tag-sets', TagSetViewSet)\nrouter.register(r'tags', TagViewSet)\nrouter.register(r'tokens', TokenViewSet)\nrouter.register(r'token-tags', TokenTagViewSet)\n\nrouter.register(r'validators', ValidatorViewSet)\nrouter.register(r'normalizers', NormalizerViewSet)\nrouter.register(r'taggers', TaggerViewSet)\n\n\n# router.register(r'sentences', SentenceViewSet)\n# router.register(r'normal-sentences', NormalSentenceViewSet)\n# router.register(r'tagged-sentences', TaggedSentenceViewSet)\n# router.register(r'rules/translation-characters', TranslationCharacterViewSet)\n# router.register(r'rules/refinement-patt/erns', RefinementPatternViewSet)\n\nurlpatterns = [\n re_path(r'^$', HomePageView.as_view(), name='home'),\n re_path(r'^api/', include(router.urls)),\n]\n\n# urlpatterns = [\n# # url('', HomePageView.as_view(), name = 'home'),\n# path('', views.index, name='home'),\n# path('word/', WordCreateView.as_view(), name=\"words\"),\n# path('word/<int:pk>/', WordDetailsView.as_view(), name=\"word\"),\n# path('text/fix/', views.fix_text, name=\"fix_text\"),\n# ]\n\n#urlpatterns = format_suffix_patterns(urlpatterns)\n\n\n# class OptionalSlashRouter(SimpleRouter):\n\n# def __init__(self):\n# self.trailing_slash = '/?'\n# super().__init__()\n# # super(SimpleRouter, self).__init__()\n\n# router.register(r'', HomeViewSet, basename='home')\n# router.register(r'api', router.APIRootView, basename='api')\n# router.register(r'schema', router.APISchemaView, basename='schema')",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#! /usr/bin/env python3
from PIL import Image
from imtools import *
import os
cwd = os.getcwd()
filelist = get_imlist(os.getcwd())
print(filelist)
for infile in filelist:
outfile = os.path.splitext(infile)[0] + ".jpg"
if infile != outfile:
try:
Image.open(infile).save(outfile)
except IOError:
print("cannot convert", infile)
|
normal
|
{
"blob_id": "31416f1ba9f3c44a7aa740365e05b5db49e70444",
"index": 9106,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(filelist)\nfor infile in filelist:\n outfile = os.path.splitext(infile)[0] + '.jpg'\n if infile != outfile:\n try:\n Image.open(infile).save(outfile)\n except IOError:\n print('cannot convert', infile)\n",
"step-3": "<mask token>\ncwd = os.getcwd()\nfilelist = get_imlist(os.getcwd())\nprint(filelist)\nfor infile in filelist:\n outfile = os.path.splitext(infile)[0] + '.jpg'\n if infile != outfile:\n try:\n Image.open(infile).save(outfile)\n except IOError:\n print('cannot convert', infile)\n",
"step-4": "from PIL import Image\nfrom imtools import *\nimport os\ncwd = os.getcwd()\nfilelist = get_imlist(os.getcwd())\nprint(filelist)\nfor infile in filelist:\n outfile = os.path.splitext(infile)[0] + '.jpg'\n if infile != outfile:\n try:\n Image.open(infile).save(outfile)\n except IOError:\n print('cannot convert', infile)\n",
"step-5": "#! /usr/bin/env python3\n\nfrom PIL import Image\nfrom imtools import *\nimport os\n\ncwd = os.getcwd()\n\nfilelist = get_imlist(os.getcwd())\n\nprint(filelist)\n\nfor infile in filelist:\n\toutfile = os.path.splitext(infile)[0] + \".jpg\"\n\tif infile != outfile:\n\t\ttry:\n\t\t\tImage.open(infile).save(outfile)\n\t\texcept IOError:\n\t\t\tprint(\"cannot convert\", infile)\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
Function of main.py:
config loader
hprams loader
feature extraction
Call model training and validation
Model Save and Load
Call model validation
载入训练参数
载入指定模型超参数
调用特征提取
调用模型训练和验证
模型保存与载入
调用模型验证
"""
"""A very simple MNIST classifier.
See extensive documentation at
https://www.tensorflow.org/get_started/mnist/beginners
usage: main.py [options]
options:
--data_dir=<dir> Where to get training data [default: ./datasets/MNIST/].
--base_log_dir=<dir> Where to save models [default: ./generated/logdir/].
--model Which model to use [default: autoencoder_vae].
--experiment_name Name of experiment defines the log path [default: Date-of-now].
--load_model=<dir> Where to load checkpoint, if necessary [default: None]
--total_epoch Max num of training epochs [default: by the model].
--eval_per_epoch Model eval per n epoch [default: by the model].
--save_per_epoch Model save per n epoch [default: by the model].
--batch_size Batch size [default: by the model].
-h, --help Show this help message and exit
"""
import argparse
import sys
import datetime
from tqdm import tqdm
import numpy as np
import os
import tensorflow as tf
from model.model_example import model_example
from model.deep_mnist import deep_mnist
from model.VAE.autoencoder_vae import autoencoder
from model.deep_mnist_with_Res import deep_mnist_with_Res
from preprocessing_util import autoencoder_vae_add_noise
from training_util import save,load
import params
FLAGS = None
def prepare_params(FLAGS):
if FLAGS.experiment_name == "default":
now=datetime.datetime.now()
FLAGS.experiment_name=now.strftime('%Y%m%d%H%M%S')
FLAGS.log_dir = FLAGS.base_log_dir+FLAGS.experiment_name+'_'+FLAGS.model+'/'
return FLAGS
def main():
#Avoid tensorboard error on IPython
tf.reset_default_graph()
# Prepare data
train_data = np.load(os.path.join(FLAGS.data_dir, 'train_data.npy'))
train_labels = np.load(os.path.join(FLAGS.data_dir, 'train_labels.npy'))
test_data = np.load(os.path.join(FLAGS.data_dir, 'test_data.npy'))
test_labels = np.load(os.path.join(FLAGS.data_dir, 'test_labels.npy'))
train_set = tf.data.Dataset.from_tensor_slices((train_data, train_labels))
test_set = tf.data.Dataset.from_tensor_slices((test_data, test_labels))
if FLAGS.model == "autoencoder_vae":
train_set = train_set.map(autoencoder_vae_add_noise)
test_set = test_set.map(autoencoder_vae_add_noise)
# Do reshuffle to avoid biased estimation when model reloaded
train_set = train_set.shuffle(
FLAGS.batch_size,reshuffle_each_iteration=True).batch(
FLAGS.batch_size).repeat(10)
test_set = test_set.shuffle(
FLAGS.batch_size,reshuffle_each_iteration=True).batch(
FLAGS.batch_size).repeat(10)
trainIter = train_set.make_initializable_iterator()
next_examples, next_labels = trainIter.get_next()
testIter = test_set.make_initializable_iterator()
test_examples, text_labels = testIter.get_next()
# Create the model
if FLAGS.model == "deep_mnist":
hp = params.Deep_MNIST_model_params
x = tf.placeholder(tf.float32, [None, hp.input_dim])
y = tf.placeholder(tf.float32, [None, hp.output_dim])
keep_probe = tf.placeholder(tf.float32)
model = deep_mnist(hp, x ,y, keep_probe)
train_fetch_list = [model.train_step,model.merged]
test_fetch_list = [model.accuracy,model.merged]
if FLAGS.model == "deep_mnist_AdamW":
hp = params.Deep_MNIST_model_params
x = tf.placeholder(tf.float32, [None, hp.input_dim])
y = tf.placeholder(tf.float32, [None, hp.output_dim])
keep_probe = tf.placeholder(tf.float32)
model = deep_mnist(hp, x ,y, keep_probe,use_adamW = True)
train_fetch_list = [model.train_step,model.merged]
test_fetch_list = [model.accuracy,model.merged]
if FLAGS.model == "deep_mnist_with_Res":
hp = params.Deep_MNIST_model_params
x = tf.placeholder(tf.float32, [None, hp.input_dim])
y = tf.placeholder(tf.float32, [None, hp.output_dim])
keep_probe = tf.placeholder(tf.float32)
model = deep_mnist_with_Res(hp, x ,y, keep_probe)
train_fetch_list = [model.train_step,model.merged]
test_fetch_list = [model.accuracy,model.merged]
if FLAGS.model == "autoencoder_vae":
hp = params.autoencoder_vae_model_params
x = tf.placeholder(tf.float32, [None, hp.input_dim])
x_hat = tf.placeholder(tf.float32, [None, hp.input_dim])
keep_probe = tf.placeholder(tf.float32)
model = autoencoder(hp, x ,x_hat, keep_probe)
y=x_hat
train_fetch_list = [model.train_step,model.merged]
test_fetch_list = [model.loss_mean,model.merged]
#Prepare tensorboard
train_writer = tf.summary.FileWriter(FLAGS.log_dir+'/train',model.train_step.graph)
test_writer = tf.summary.FileWriter(FLAGS.log_dir+'/test')
print('checkout result of this time with "tensorboard --logdir={}"'.format(FLAGS.log_dir))
print('For result compare run "tensorboard --logdir={}"'.format(FLAGS.base_log_dir))
session_conf = tf.ConfigProto(
gpu_options=tf.GPUOptions(
allow_growth=True,
),
)
saver = tf.train.Saver()
#Start tf session
with tf.Session(config=session_conf) as sess:
try:
sess.run(tf.global_variables_initializer())
sess.run(trainIter.initializer)
sess.run(testIter.initializer)
# Restore variables from disk.
if FLAGS.load_model != None:
load(saver, sess, FLAGS.load_model)
for epoch in tqdm(range(FLAGS.total_epoch)):
batch_xs, batch_ys = sess.run([next_examples, next_labels])
train_feed_dict={x: batch_xs,
y: batch_ys,
keep_probe: hp.keep_probe}
_,summary = sess.run(train_fetch_list, feed_dict=train_feed_dict)
if epoch % 10 == 0:
train_writer.add_summary(summary, epoch)
if epoch % FLAGS.eval_per_epoch == 0:
batch_xs, batch_ys = sess.run([test_examples, text_labels])
test_feed_dict={x: batch_xs,
y: batch_ys,
keep_probe: hp.keep_probe_test}
mertics,summary = sess.run(test_fetch_list, feed_dict=test_feed_dict)
test_writer.add_summary(summary, epoch)
if epoch % FLAGS.save_per_epoch == 0:
save(saver, sess, FLAGS.log_dir, epoch)
except:
pass
finally:
save(saver, sess, FLAGS.log_dir, epoch)
train_writer.close()
test_writer.close()
if __name__ == '__main__':
default_hp=params.default_hyper_params
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default="./datasets/MNIST/")
parser.add_argument('--experiment_name', type=str, default="deep_mnist_AdamW_wd1e4")
parser.add_argument('--base_log_dir', type=str, default="./generated/logdir/")
parser.add_argument('--model', type=str, default="deep_mnist_AdamW")
parser.add_argument('--load_model', type=str, default=None)
parser.add_argument('--total_epoch', type=int, default=default_hp.num_epochs)
parser.add_argument('--eval_per_epoch', type=int, default=default_hp.eval_per_epoch)
parser.add_argument('--save_per_epoch', type=int, default=default_hp.save_per_epoch)
parser.add_argument('--batch_size', type=int, default=default_hp.batch_size)
FLAGS, unparsed = parser.parse_known_args()
FLAGS = prepare_params(FLAGS)
main()
|
normal
|
{
"blob_id": "c6174fae929366cabb8da3d810df705b19895c1c",
"index": 2763,
"step-1": "\"\"\"\nFunction of main.py:\n\nconfig loader\nhprams loader\nfeature extraction\nCall model training and validation\nModel Save and Load\nCall model validation\n\n载入训练参数\n载入指定模型超参数\n调用特征提取\n调用模型训练和验证\n模型保存与载入\n调用模型验证\n\"\"\"\n\n\"\"\"A very simple MNIST classifier.\nSee extensive documentation at\nhttps://www.tensorflow.org/get_started/mnist/beginners\nusage: main.py [options] \noptions:\n --data_dir=<dir> Where to get training data [default: ./datasets/MNIST/].\n --base_log_dir=<dir> Where to save models [default: ./generated/logdir/].\n --model Which model to use [default: autoencoder_vae].\n --experiment_name Name of experiment defines the log path [default: Date-of-now].\n --load_model=<dir> Where to load checkpoint, if necessary [default: None]\n --total_epoch Max num of training epochs [default: by the model].\n --eval_per_epoch Model eval per n epoch [default: by the model].\n --save_per_epoch Model save per n epoch [default: by the model].\n --batch_size Batch size [default: by the model].\n -h, --help Show this help message and exit\n\"\"\"\n\nimport argparse\nimport sys\nimport datetime\nfrom tqdm import tqdm\nimport numpy as np\nimport os\n\nimport tensorflow as tf\n\nfrom model.model_example import model_example\nfrom model.deep_mnist import deep_mnist\nfrom model.VAE.autoencoder_vae import autoencoder\nfrom model.deep_mnist_with_Res import deep_mnist_with_Res\n\nfrom preprocessing_util import autoencoder_vae_add_noise\nfrom training_util import save,load\nimport params \n\nFLAGS = None\n\ndef prepare_params(FLAGS):\n if FLAGS.experiment_name == \"default\":\n now=datetime.datetime.now()\n FLAGS.experiment_name=now.strftime('%Y%m%d%H%M%S')\n FLAGS.log_dir = FLAGS.base_log_dir+FLAGS.experiment_name+'_'+FLAGS.model+'/'\n return FLAGS\n\n\ndef main():\n #Avoid tensorboard error on IPython\n tf.reset_default_graph()\n \n # Prepare data\n train_data = np.load(os.path.join(FLAGS.data_dir, 'train_data.npy'))\n train_labels = np.load(os.path.join(FLAGS.data_dir, 'train_labels.npy'))\n test_data = np.load(os.path.join(FLAGS.data_dir, 'test_data.npy'))\n test_labels = np.load(os.path.join(FLAGS.data_dir, 'test_labels.npy'))\n \n train_set = tf.data.Dataset.from_tensor_slices((train_data, train_labels))\n test_set = tf.data.Dataset.from_tensor_slices((test_data, test_labels))\n \n if FLAGS.model == \"autoencoder_vae\":\n train_set = train_set.map(autoencoder_vae_add_noise)\n test_set = test_set.map(autoencoder_vae_add_noise)\n \n # Do reshuffle to avoid biased estimation when model reloaded\n train_set = train_set.shuffle(\n FLAGS.batch_size,reshuffle_each_iteration=True).batch(\n FLAGS.batch_size).repeat(10)\n test_set = test_set.shuffle(\n FLAGS.batch_size,reshuffle_each_iteration=True).batch(\n FLAGS.batch_size).repeat(10)\n \n trainIter = train_set.make_initializable_iterator()\n next_examples, next_labels = trainIter.get_next()\n \n testIter = test_set.make_initializable_iterator()\n test_examples, text_labels = testIter.get_next()\n \n # Create the model\n \n if FLAGS.model == \"deep_mnist\":\n hp = params.Deep_MNIST_model_params\n \n x = tf.placeholder(tf.float32, [None, hp.input_dim])\n y = tf.placeholder(tf.float32, [None, hp.output_dim])\n keep_probe = tf.placeholder(tf.float32)\n \n model = deep_mnist(hp, x ,y, keep_probe)\n \n train_fetch_list = [model.train_step,model.merged]\n test_fetch_list = [model.accuracy,model.merged]\n \n if FLAGS.model == \"deep_mnist_AdamW\":\n hp = params.Deep_MNIST_model_params\n \n x = tf.placeholder(tf.float32, [None, hp.input_dim])\n y = tf.placeholder(tf.float32, [None, hp.output_dim])\n keep_probe = tf.placeholder(tf.float32)\n \n model = deep_mnist(hp, x ,y, keep_probe,use_adamW = True)\n \n train_fetch_list = [model.train_step,model.merged]\n test_fetch_list = [model.accuracy,model.merged]\n \n if FLAGS.model == \"deep_mnist_with_Res\":\n hp = params.Deep_MNIST_model_params\n \n x = tf.placeholder(tf.float32, [None, hp.input_dim])\n y = tf.placeholder(tf.float32, [None, hp.output_dim])\n keep_probe = tf.placeholder(tf.float32)\n \n model = deep_mnist_with_Res(hp, x ,y, keep_probe)\n \n train_fetch_list = [model.train_step,model.merged]\n test_fetch_list = [model.accuracy,model.merged]\n \n if FLAGS.model == \"autoencoder_vae\":\n hp = params.autoencoder_vae_model_params\n \n x = tf.placeholder(tf.float32, [None, hp.input_dim])\n x_hat = tf.placeholder(tf.float32, [None, hp.input_dim])\n keep_probe = tf.placeholder(tf.float32)\n \n model = autoencoder(hp, x ,x_hat, keep_probe)\n \n y=x_hat\n train_fetch_list = [model.train_step,model.merged]\n test_fetch_list = [model.loss_mean,model.merged]\n \n #Prepare tensorboard\n train_writer = tf.summary.FileWriter(FLAGS.log_dir+'/train',model.train_step.graph)\n test_writer = tf.summary.FileWriter(FLAGS.log_dir+'/test')\n print('checkout result of this time with \"tensorboard --logdir={}\"'.format(FLAGS.log_dir))\n print('For result compare run \"tensorboard --logdir={}\"'.format(FLAGS.base_log_dir))\n \n \n session_conf = tf.ConfigProto(\n gpu_options=tf.GPUOptions(\n allow_growth=True,\n ),\n )\n saver = tf.train.Saver()\n\n #Start tf session\n with tf.Session(config=session_conf) as sess:\n try:\n sess.run(tf.global_variables_initializer())\n sess.run(trainIter.initializer)\n sess.run(testIter.initializer)\n \n # Restore variables from disk.\n if FLAGS.load_model != None:\n load(saver, sess, FLAGS.load_model)\n \n \n for epoch in tqdm(range(FLAGS.total_epoch)):\n batch_xs, batch_ys = sess.run([next_examples, next_labels])\n train_feed_dict={x: batch_xs,\n y: batch_ys,\n keep_probe: hp.keep_probe}\n _,summary = sess.run(train_fetch_list, feed_dict=train_feed_dict)\n \n if epoch % 10 == 0:\n train_writer.add_summary(summary, epoch)\n \n if epoch % FLAGS.eval_per_epoch == 0:\n batch_xs, batch_ys = sess.run([test_examples, text_labels])\n test_feed_dict={x: batch_xs,\n y: batch_ys,\n keep_probe: hp.keep_probe_test}\n mertics,summary = sess.run(test_fetch_list, feed_dict=test_feed_dict)\n test_writer.add_summary(summary, epoch)\n \n if epoch % FLAGS.save_per_epoch == 0:\n save(saver, sess, FLAGS.log_dir, epoch)\n \n except:\n pass\n finally:\n save(saver, sess, FLAGS.log_dir, epoch)\n train_writer.close()\n test_writer.close()\n\n \n \nif __name__ == '__main__':\n default_hp=params.default_hyper_params\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_dir', type=str, default=\"./datasets/MNIST/\")\n parser.add_argument('--experiment_name', type=str, default=\"deep_mnist_AdamW_wd1e4\")\n parser.add_argument('--base_log_dir', type=str, default=\"./generated/logdir/\")\n parser.add_argument('--model', type=str, default=\"deep_mnist_AdamW\")\n parser.add_argument('--load_model', type=str, default=None)\n parser.add_argument('--total_epoch', type=int, default=default_hp.num_epochs)\n parser.add_argument('--eval_per_epoch', type=int, default=default_hp.eval_per_epoch)\n parser.add_argument('--save_per_epoch', type=int, default=default_hp.save_per_epoch)\n parser.add_argument('--batch_size', type=int, default=default_hp.batch_size)\n \n FLAGS, unparsed = parser.parse_known_args()\n FLAGS = prepare_params(FLAGS)\n main()",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
ii = [('LeakWTI2.py', 6)]
|
normal
|
{
"blob_id": "997b68e42547b8f8a1059776c55c3ad16df494da",
"index": 1468,
"step-1": "<mask token>\n",
"step-2": "ii = [('LeakWTI2.py', 6)]\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from django.contrib import admin
from .models import Profile, Address
admin.site.register(Profile)
admin.site.register(Address)
|
normal
|
{
"blob_id": "4cc6a9c48e174b33ed93d7bda159fcc3a7b59d4c",
"index": 6727,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Profile)\nadmin.site.register(Address)\n",
"step-3": "from django.contrib import admin\nfrom .models import Profile, Address\nadmin.site.register(Profile)\nadmin.site.register(Address)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
def clear_firefox_driver_session(firefox_driver):
firefox_driver.delete_all_cookies()
# Note this only works if the browser is set to a location.
firefox_driver.execute_script('window.localStorage.clear();')
firefox_driver.execute_script('window.sessionStorage.clear();')
class LocationNotSet(Exception):
pass
|
normal
|
{
"blob_id": "6d0b9523668bd0b302fdbc196d3d7ff25be10b23",
"index": 5045,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass LocationNotSet(Exception):\n pass\n",
"step-3": "def clear_firefox_driver_session(firefox_driver):\n firefox_driver.delete_all_cookies()\n firefox_driver.execute_script('window.localStorage.clear();')\n firefox_driver.execute_script('window.sessionStorage.clear();')\n\n\nclass LocationNotSet(Exception):\n pass\n",
"step-4": "def clear_firefox_driver_session(firefox_driver):\n firefox_driver.delete_all_cookies()\n # Note this only works if the browser is set to a location.\n firefox_driver.execute_script('window.localStorage.clear();')\n firefox_driver.execute_script('window.sessionStorage.clear();')\n\n\nclass LocationNotSet(Exception):\n pass\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pandas as pd
import json
import spacy
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
nlp = spacy.load('en_core_web_sm')
list_data = []
list_data_only_reviews = []
list_data_reviewerid = []
result = []
l = []
for line in open('Automotive_5.json', 'r'):
list_data.append(json.loads(line))
for item in list_data:
list_data_only_reviews.append(item['reviewText'])
list_data_reviewerid.append(item['reviewerID'])
# opening the csv file in 'w+' mode
file = open('review_file.csv', 'w+')
# writing the data into the file
with file:
df = pd.DataFrame(list_data_only_reviews, columns=['Reviews'])
df.to_csv(file,index=False)
npr = pd.read_csv('review_file.csv')
tfidf = TfidfVectorizer(max_df=0.8,min_df=5,stop_words='english')
dtm = tfidf.fit_transform(npr['Reviews'].values.astype('U'))
nmf_model = NMF(n_components=20,random_state=50)
nmf_model.fit(dtm)
#returns index positions that sort the array
#checking which word in the topic has high probability
for i,topic in enumerate(nmf_model.components_):
print(f"THE TOP 30 WORDS FOR TOPIC #{i}")
print([tfidf.get_feature_names()[i] for i in topic.argsort()[-30:] if len(tfidf.get_feature_names()[i]) > 5])
print('\n')
#probability of a document belonging to a topic
topic_results = nmf_model.transform(dtm)
npr['Topic'] = topic_results.argmax(axis=1)
topic_label = {0:'plastic', 1:'winter batteries', 2:'engines', 3:'liquid', 4:'wind', 5:'shipping', 6:'light',
7:'quality', 8:'instructions', 9:'worked', 10:'rubber', 11:'cleaning', 12:'pressure', 13:'washing',
14:'recommendation', 15:'advertise', 16:'bucket', 17:'camp', 18:'brush', 19:'travel'}
npr['Topic Label'] = npr['Topic'].map(topic_label)
npr = npr.assign(Reviews=list_data_reviewerid)
npr.to_csv('classified_output.csv')
|
normal
|
{
"blob_id": "43b519d7db2e46a0bf9317eddac1f5cf6b7b79e3",
"index": 6417,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor line in open('Automotive_5.json', 'r'):\n list_data.append(json.loads(line))\nfor item in list_data:\n list_data_only_reviews.append(item['reviewText'])\n list_data_reviewerid.append(item['reviewerID'])\n<mask token>\nwith file:\n df = pd.DataFrame(list_data_only_reviews, columns=['Reviews'])\n df.to_csv(file, index=False)\n<mask token>\nnmf_model.fit(dtm)\nfor i, topic in enumerate(nmf_model.components_):\n print(f'THE TOP 30 WORDS FOR TOPIC #{i}')\n print([tfidf.get_feature_names()[i] for i in topic.argsort()[-30:] if \n len(tfidf.get_feature_names()[i]) > 5])\n print('\\n')\n<mask token>\nnpr.to_csv('classified_output.csv')\n",
"step-3": "<mask token>\nnlp = spacy.load('en_core_web_sm')\nlist_data = []\nlist_data_only_reviews = []\nlist_data_reviewerid = []\nresult = []\nl = []\nfor line in open('Automotive_5.json', 'r'):\n list_data.append(json.loads(line))\nfor item in list_data:\n list_data_only_reviews.append(item['reviewText'])\n list_data_reviewerid.append(item['reviewerID'])\nfile = open('review_file.csv', 'w+')\nwith file:\n df = pd.DataFrame(list_data_only_reviews, columns=['Reviews'])\n df.to_csv(file, index=False)\nnpr = pd.read_csv('review_file.csv')\ntfidf = TfidfVectorizer(max_df=0.8, min_df=5, stop_words='english')\ndtm = tfidf.fit_transform(npr['Reviews'].values.astype('U'))\nnmf_model = NMF(n_components=20, random_state=50)\nnmf_model.fit(dtm)\nfor i, topic in enumerate(nmf_model.components_):\n print(f'THE TOP 30 WORDS FOR TOPIC #{i}')\n print([tfidf.get_feature_names()[i] for i in topic.argsort()[-30:] if \n len(tfidf.get_feature_names()[i]) > 5])\n print('\\n')\ntopic_results = nmf_model.transform(dtm)\nnpr['Topic'] = topic_results.argmax(axis=1)\ntopic_label = {(0): 'plastic', (1): 'winter batteries', (2): 'engines', (3):\n 'liquid', (4): 'wind', (5): 'shipping', (6): 'light', (7): 'quality', (\n 8): 'instructions', (9): 'worked', (10): 'rubber', (11): 'cleaning', (\n 12): 'pressure', (13): 'washing', (14): 'recommendation', (15):\n 'advertise', (16): 'bucket', (17): 'camp', (18): 'brush', (19): 'travel'}\nnpr['Topic Label'] = npr['Topic'].map(topic_label)\nnpr = npr.assign(Reviews=list_data_reviewerid)\nnpr.to_csv('classified_output.csv')\n",
"step-4": "import pandas as pd\nimport json\nimport spacy\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import NMF\nnlp = spacy.load('en_core_web_sm')\nlist_data = []\nlist_data_only_reviews = []\nlist_data_reviewerid = []\nresult = []\nl = []\nfor line in open('Automotive_5.json', 'r'):\n list_data.append(json.loads(line))\nfor item in list_data:\n list_data_only_reviews.append(item['reviewText'])\n list_data_reviewerid.append(item['reviewerID'])\nfile = open('review_file.csv', 'w+')\nwith file:\n df = pd.DataFrame(list_data_only_reviews, columns=['Reviews'])\n df.to_csv(file, index=False)\nnpr = pd.read_csv('review_file.csv')\ntfidf = TfidfVectorizer(max_df=0.8, min_df=5, stop_words='english')\ndtm = tfidf.fit_transform(npr['Reviews'].values.astype('U'))\nnmf_model = NMF(n_components=20, random_state=50)\nnmf_model.fit(dtm)\nfor i, topic in enumerate(nmf_model.components_):\n print(f'THE TOP 30 WORDS FOR TOPIC #{i}')\n print([tfidf.get_feature_names()[i] for i in topic.argsort()[-30:] if \n len(tfidf.get_feature_names()[i]) > 5])\n print('\\n')\ntopic_results = nmf_model.transform(dtm)\nnpr['Topic'] = topic_results.argmax(axis=1)\ntopic_label = {(0): 'plastic', (1): 'winter batteries', (2): 'engines', (3):\n 'liquid', (4): 'wind', (5): 'shipping', (6): 'light', (7): 'quality', (\n 8): 'instructions', (9): 'worked', (10): 'rubber', (11): 'cleaning', (\n 12): 'pressure', (13): 'washing', (14): 'recommendation', (15):\n 'advertise', (16): 'bucket', (17): 'camp', (18): 'brush', (19): 'travel'}\nnpr['Topic Label'] = npr['Topic'].map(topic_label)\nnpr = npr.assign(Reviews=list_data_reviewerid)\nnpr.to_csv('classified_output.csv')\n",
"step-5": "import pandas as pd\nimport json\nimport spacy\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.decomposition import NMF\n\n\n\nnlp = spacy.load('en_core_web_sm')\nlist_data = []\nlist_data_only_reviews = []\nlist_data_reviewerid = []\nresult = []\nl = []\n\nfor line in open('Automotive_5.json', 'r'):\n list_data.append(json.loads(line))\n\nfor item in list_data:\n list_data_only_reviews.append(item['reviewText'])\n list_data_reviewerid.append(item['reviewerID'])\n \n\n# opening the csv file in 'w+' mode \nfile = open('review_file.csv', 'w+') \n \n# writing the data into the file \nwith file: \n df = pd.DataFrame(list_data_only_reviews, columns=['Reviews'])\n df.to_csv(file,index=False)\n\nnpr = pd.read_csv('review_file.csv')\n\n\ntfidf = TfidfVectorizer(max_df=0.8,min_df=5,stop_words='english')\n\ndtm = tfidf.fit_transform(npr['Reviews'].values.astype('U'))\n\nnmf_model = NMF(n_components=20,random_state=50)\nnmf_model.fit(dtm)\n\n#returns index positions that sort the array\n#checking which word in the topic has high probability\nfor i,topic in enumerate(nmf_model.components_):\n print(f\"THE TOP 30 WORDS FOR TOPIC #{i}\")\n print([tfidf.get_feature_names()[i] for i in topic.argsort()[-30:] if len(tfidf.get_feature_names()[i]) > 5])\n print('\\n')\n\n#probability of a document belonging to a topic\ntopic_results = nmf_model.transform(dtm)\n\n\nnpr['Topic'] = topic_results.argmax(axis=1)\n\ntopic_label = {0:'plastic', 1:'winter batteries', 2:'engines', 3:'liquid', 4:'wind', 5:'shipping', 6:'light',\n 7:'quality', 8:'instructions', 9:'worked', 10:'rubber', 11:'cleaning', 12:'pressure', 13:'washing',\n 14:'recommendation', 15:'advertise', 16:'bucket', 17:'camp', 18:'brush', 19:'travel'}\nnpr['Topic Label'] = npr['Topic'].map(topic_label)\n\nnpr = npr.assign(Reviews=list_data_reviewerid)\n\nnpr.to_csv('classified_output.csv')\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
from coupon.models import Coupon, Games
admin.site.register(Coupon)
admin.site.register(Games)
|
normal
|
{
"blob_id": "6c10213c2e866ec84f229aa426c7122aa817d167",
"index": 4239,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(Coupon)\nadmin.site.register(Games)\n",
"step-3": "from django.contrib import admin\nfrom coupon.models import Coupon, Games\nadmin.site.register(Coupon)\nadmin.site.register(Games)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import requests
from bs4 import BeautifulSoup
from urllib.request import urlretrieve
import json
import time
#功能一:下载单一歌曲、歌词
def single_song(song_id,path,song_name): #下载单一歌曲,输入为歌曲id,保存路径,歌曲名称
song_url = "http://music.163.com/song/media/outer/url?id=%s" % song_id
down_path = path +'\\'+ song_name + '.mp3'
urlretrieve(song_url,down_path)
print("歌曲下载完成:"+song_name)
def save2txt(songname, lyric,path): #写进歌词到指定路径,并保存,输入为歌曲名称、歌词信息、保存路径
# print('正在保存歌曲:{}'.format(songname))
print("歌词下载完成:"+songname)
lyric_path=path+'\\'+songname+'.txt'
with open(lyric_path, 'a', encoding='utf-8')as f:
f.write(lyric)
def single_song_lyric(song_id,path,song_name): #下载单一歌曲的歌词,输入为歌曲id,保存路径,歌曲名称
url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(song_id)
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
html = requests.get(url, headers=headers).text
json_obj = json.loads(html)
initial_lyric = json_obj['lrc']['lyric']
reg = re.compile(r'\[.*\]')
lyric = re.sub(reg, '', initial_lyric).strip()
save2txt(song_name, lyric, path)
time.sleep(1)
#功能二:根据歌单url下载
def songs_from_list(url,path): #url:歌单网址;path:本地保存目录 下载某一歌单的所有歌曲(包括歌手页、排行榜)
new_url = url.replace('/#', '')
header = {
'Host': 'music.163.com',
'Referer': 'https://music.163.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
}
res = requests.get(new_url, headers=header).text
r = BeautifulSoup(res, "html.parser")
music_dict = {}
result = r.find('ul', {'class', 'f-hide'}).find_all('a')
for music in result:
print(music)
music_id = music.get('href').strip('/song?id=')
music_name = music.text
music_dict[music_id] = music_name
for song_id in music_dict:
song_url = "http://music.163.com/song/media/outer/url?id=%s" % song_id
down_path=path+'\\'+music_dict[song_id]+'.mp3'
# path = "C:\\Users\\ming-\\Downloads\\%s.mp3" % music_dict[song_id]
# 添加数据
print( "正在下载:%s" % music_dict[song_id])
# text.see(END)
# text.update()
urlretrieve(song_url, down_path)
def get_lyrics(songids): #根据歌曲id获取歌词,输入为歌曲Id
url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(songids)
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
html = requests.get(url, headers=headers).text
json_obj = json.loads(html)
initial_lyric = json_obj['lrc']['lyric']
reg = re.compile(r'\[.*\]')
lyric = re.sub(reg, '', initial_lyric).strip()
return lyric
def lyrics_from_list(url,path): #根据歌单下载歌曲歌词
new_url = url.replace('/#', '')
header = {
'Host': 'music.163.com',
'Referer': 'https://music.163.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'
}
res = requests.get(new_url, headers=header).text
r = BeautifulSoup(res, "html.parser")
music_dict = {}
result = r.find('ul', {'class', 'f-hide'}).find_all('a')
for music in result:
print(music)
music_id = music.get('href').strip('/song?id=')
music_name = music.text
music_dict[music_id] = music_name
songids=music_dict.keys()
for i in songids:
lyric=get_lyrics(i)
save2txt(music_dict[i],lyric,path)
time.sleep(1)
#功能三:根据歌手下载
#获取歌手信息和id
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import csv
import re
# chrome_driver = "D:\\software\\chromedriver_win32\\chromedriver.exe" #chromedriver的文件位置
# browser = webdriver.Chrome(executable_path = chrome_driver)
# wait = WebDriverWait(browser, 5) # 设置等待时间
def get_singer(url): # 返回歌手名字和歌手id,输入为歌手详情页
chrome_driver = "D:\\software\\chromedriver_win32\\chromedriver.exe" # chromedriver的文件位置
browser = webdriver.Chrome(executable_path=chrome_driver)
wait = WebDriverWait(browser, 5) # 设置等待时间
browser.get(url)
browser.switch_to.frame('g_iframe')
html = browser.page_source
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.nm.nm-icn.f-thide.s-fc0')
singername = []
singerid = []
for snames in info:
name = snames.get_text()
songid = str(re.findall('href="(.*?)"', str(snames))).split('=')[1].split('\'')[0] #正则表达式获取歌曲id
singername.append(name)
singerid.append(songid)
return zip(singername, singerid)
def get_data(url):
data = []
for singernames, singerids in get_singer(url):
info = {}
info['歌手名字'] = singernames
info['歌手ID'] = singerids
data.append(info)
return data
def save2csv(url):
print('保存歌手信息中...请稍后查看')
with open('singer.csv', 'a', newline='', encoding='utf-8-sig') as f:
# CSV 基本写入用 w,追加改模式 w 为 a
fieldnames = ['歌手名字', '歌手ID']
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
data = get_data(url)
print(data)
writer.writerows(data)
print('保存成功')
def download_singer():
idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, 6002, 6003, 7001, 7002, 7003]
for id in idlist:
url = 'https://music.163.com/#/discover/artist/cat?id={}&initial=-1'.format(id)
save2csv(url)
def get_id(singer_name): #根据歌手姓名获取对应的歌手id,输入为歌手姓名
file = "lib\\singer_info.csv"
with open(file, 'r',encoding='utf-8-sig') as f:
reader = csv.reader(f)
name = []
id = []
for i in reader:
name.append(i[0])
id.append(i[1])
a=name.index(singer_name)
return id[a]
#根据歌手姓名下载
def get_html(url): #通过代理获取网页信息,输入为指定网页url
proxy_addr = {'http': '61.135.217.7:80'}
# 用的代理 ip,如果被封或者失效,在http://www.xicidaili.com/换一个
headers = {
'User-Agent':
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}
try:
html = requests.get(url, headers=headers, proxies=proxy_addr).text
return html
except BaseException:
print('request error')
pass
def get_top50(html): #获取热度前50名的歌曲,并返回对应的歌曲名称和歌曲id,输入为歌手详情页
soup = BeautifulSoup(html, 'lxml')
info = soup.select('.f-hide #song-list-pre-cache a')
songname = []
songids = []
for sn in info:
songnames = sn.getText()
songname.append(songnames)
for si in info:
songid = str(re.findall('href="(.*?)"', str(si))).strip().split('=')[-1].split('\'')[0] # 用re查找,查找对象一定要是str类型
songids.append(songid)
return zip(songname, songids)
def lyrics_from_singername(name,path): #根据歌手姓名下载热度前50名歌曲的歌词
id=get_id(name)
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
def save_song(songurl, path,songname): #下载指定链接的歌曲,并保存到指定路径,输入为歌曲下载链接、保存路径、歌曲名称
try:
urlretrieve(songurl, path)
print('歌曲下载完成:' + songname)
except BaseException:
print('下载失败:' + songname)
pass
def songs_from_singername(name,path): #根据歌手姓名下载歌曲到指定路径,输入为歌手姓名和保存路径
id=get_id(name)
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
songid = singer_info[1]
songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)
songname = singer_info[0]
# path = 'D:\\code_new\\pycharm\\yunmusic\\song' + songname + '.mp3'
down_path=path+'\\'+songname+'.mp3'
save_song(songurl, down_path,songname)
time.sleep(1)
def lyrics_from_singerid(id,path): #根据歌手id下载歌词,输入为歌手id和本地保存路径
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
lyric = get_lyrics(singer_info[1])
save2txt(singer_info[0], lyric, path)
time.sleep(1)
def songs_from_singerid(id,path): #根据歌手id下载歌曲音频,输入为歌手id和本地保存路径
top50url = 'https://music.163.com/artist?id={}'.format(id)
html = get_html(top50url)
singer_infos = get_top50(html)
for singer_info in singer_infos:
songid = singer_info[1]
songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)
songname = singer_info[0]
# path = 'D:\\code_new\\pycharm\\yunmusic\\song' + songname + '.mp3'
down_path = path + '\\' + songname + '.mp3'
save_song(songurl, down_path, songname)
time.sleep(1)
#功能四:下载mv
import requests
import os
import sys
from urllib.parse import urlparse,parse_qs
def http_get(api):
my_cookie = {
"version":0,
"name":'appver',
"value":'1.5.0.75771',
"port":None,
# "port_specified":False,
"domain":'www.mydomain.com',
# "domain_specified":False,
# "domain_initial_dot":False,
"path":'/',
# "path_specified":True,
"secure":False,
"expires":None,
"discard":True,
"comment":None,
"comment_url":None,
"rest":{},
"rfc2109":False
}
s = requests.Session()
s.headers.update({'Referer': "http://music.163.com/"})
s.cookies.set(**my_cookie)
response = s.get(api)
json_data = json.loads(response.text)
return json_data
def download_single_mv(id): #根据mvid下载
size = "720" #default 720p
api = "http://music.163.com/api/mv/detail?id="+str(id)+"&type=mp4"
json_data = http_get(api)
if json_data["code"]==200:
a = list(json_data["data"]["brs"].keys())
if size not in a:
size = a[0] #如果没有720p,则选择最小的版本
mvurl = json_data["data"]["brs"][size] #mv网址
artist = json_data["data"]["artistName"] #歌手信息
song = json_data["data"]["name"] #歌曲信息
filename = '%s/[%s]%s.mp4' %(artist,size,song)
if os.path.exists(filename)==False:
if os.path.exists(artist)==False:
os.makedirs(artist)
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
s = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
sys.stderr.write(s)
if readsofar >= totalsize: # near the end
sys.stderr.write("\n")
else: # total size is unknown
sys.stderr.write("read %d\n" % (readsofar,))
print("downloading "+filename)
urlretrieve(mvurl,filename,reporthook)
def download_mv_from_list(url): #批量下载歌单的mv资源
input=url.replace("#","")
id = parse_qs(urlparse(input).query)["id"][0]
if "playlist" in input:
playlist_api = "http://music.163.com/api/playlist/detail?id=%s" % (id)
json_data = http_get(playlist_api)
for idx, mv in enumerate(json_data["result"]["tracks"]): #mv信息
download_single_mv(mv["mvid"])
print("downloaded:" + str(idx))
elif "album" in input:
playlist_api = "http://music.163.com/api/album/%s" % (id)
json_data = http_get(playlist_api)
for idx, mv in enumerate(json_data["album"]["songs"]):
if mv["mvid"] != None and mv["mvid"] != 0:
download_single_mv(mv["mvid"])
print("downloaded:" + str(idx))
download_single_mv(id)
#功能五:爬取歌曲评论并生成词云图
from jieba import posseg
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import wordcloud
def _content_generator(music_id): #根据歌曲id获取评论信息
url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Host': 'music.163.com',
'Proxy-Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'Cookie': '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',
}
limit = 20
offset = 0
compiler = re.compile(r'[^\u4E00-\u9FA5^\u3000-\u303F^\uFF00-\uFFEF^0-9^a-z^A-Z]')
while True:
params = {
'limit': limit,
'offset': offset,
}
offset += limit
r = requests.get(url, headers=headers, params=params)
comments = r.json()['comments']
has_more = r.json()['more']
for t in comments:
yield compiler.subn('', t['content'])[0]
if not has_more:
break
class WangYiMusicWordCloud: #自定义类,生成词云图
stop_words = ['首歌']
def __init__(self, music_id, mask=None, font_path=None, stop_words=None):
self.music_id = music_id #歌曲信息
self.mask = mask #背景图片
self.font_path = font_path #字体
if not stop_words is None:
self.stop_words+=stop_words
self.img_wordcloud = None
def _cut_word(self, comment): #分词
word_pairs = posseg.lcut(comment, HMM=False)
result = []
for t in word_pairs:
if not (t.word in result or t.word in self.stop_words):
result.append(t.word)
return '/'.join(result)
def get_words_text(self): #若已有评论文件则读取,若没有则爬取评论并保存
if os.path.isfile(f'{self.music_id}.txt'):
print('评论文件已存在,读取文件...')
with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:
return f.read()
else:
print('没有默认评论文件,开始爬取评论...')
count = 0
text = []
comments = _content_generator(self.music_id)
for t in comments:
text.append(self._cut_word(t))
count += 1
print(f'\r已爬取 {count}条评论', end='')
if count % 100 == 0:
print(f'\r已爬取 {count}条评论, 休息 2s', end='')
time.sleep(2)
str_text = '\n'.join(text)
with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:
f.write(str_text)
print(f'\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')
return str_text
def generate(self, **kwargs):
default_kwargs = {
'background_color': "white",
'width': 1000,
'height': 860,
'margin': 2,
'max_words': 50,
'stopwords': wordcloud.STOPWORDS,
}
if not self.mask is None:
default_kwargs['mask'] = np.array(Image.open(self.mask))
if not self.font_path is None:
default_kwargs['font_path'] = self.font_path
elif 'font_path' not in kwargs:
raise ValueError('缺少参数 font_path')
default_kwargs.update(kwargs)
str_text = self.get_words_text()
self.wordcloud = wordcloud.WordCloud(**default_kwargs)
self.img_wordcloud = self.wordcloud.generate(str_text)
def show_wordcloud(self): #生成词云图
if self.img_wordcloud is None:
self.generate()
plt.axis('off')
plt.imshow(self.img_wordcloud)
plt.show()
def to_file(self, filename): #保存到本地
if not hasattr(self, 'wordcloud'):
self.generate()
self.wordcloud.to_file(filename)
def get_wordcloud(music_id,mask,font,path): #执行函数
wordcloud_obj = WangYiMusicWordCloud(music_id, mask=mask, font_path=font)
wordcloud_obj.show_wordcloud()
result=path+'\\'+'result.jpg'
wordcloud_obj.to_file(result)
|
normal
|
{
"blob_id": "3b11d514b15775e4c818a7a2adf9a80e89dca968",
"index": 5801,
"step-1": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\n<mask token>\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\ndef lyrics_from_list(url, path):\n new_url = url.replace('/#', '')\n header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n res = requests.get(new_url, headers=header).text\n r = BeautifulSoup(res, 'html.parser')\n music_dict = {}\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\n for music in result:\n print(music)\n music_id = music.get('href').strip('/song?id=')\n music_name = music.text\n music_dict[music_id] = music_name\n songids = music_dict.keys()\n for i in songids:\n lyric = get_lyrics(i)\n save2txt(music_dict[i], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\n<mask token>\n\n\ndef download_singer():\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, \n 6002, 6003, 7001, 7002, 7003]\n for id in idlist:\n url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'\n .format(id))\n save2csv(url)\n\n\n<mask token>\n\n\ndef get_html(url):\n proxy_addr = {'http': '61.135.217.7:80'}\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n try:\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\n return html\n except BaseException:\n print('request error')\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\ndef lyrics_from_list(url, path):\n new_url = url.replace('/#', '')\n header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n res = requests.get(new_url, headers=header).text\n r = BeautifulSoup(res, 'html.parser')\n music_dict = {}\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\n for music in result:\n print(music)\n music_id = music.get('href').strip('/song?id=')\n music_name = music.text\n music_dict[music_id] = music_name\n songids = music_dict.keys()\n for i in songids:\n lyric = get_lyrics(i)\n save2txt(music_dict[i], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\n<mask token>\n\n\ndef download_singer():\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, \n 6002, 6003, 7001, 7002, 7003]\n for id in idlist:\n url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'\n .format(id))\n save2csv(url)\n\n\n<mask token>\n\n\ndef get_html(url):\n proxy_addr = {'http': '61.135.217.7:80'}\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n try:\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\n return html\n except BaseException:\n print('request error')\n pass\n\n\ndef get_top50(html):\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.f-hide #song-list-pre-cache a')\n songname = []\n songids = []\n for sn in info:\n songnames = sn.getText()\n songname.append(songnames)\n for si in info:\n songid = str(re.findall('href=\"(.*?)\"', str(si))).strip().split('=')[-1\n ].split(\"'\")[0]\n songids.append(songid)\n return zip(songname, songids)\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\n<mask token>\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef save2txt(songname, lyric, path):\n print('歌词下载完成:' + songname)\n lyric_path = path + '\\\\' + songname + '.txt'\n with open(lyric_path, 'a', encoding='utf-8') as f:\n f.write(lyric)\n\n\ndef single_song_lyric(song_id, path, song_name):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n song_id)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n save2txt(song_name, lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_lyrics(songids):\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(\n songids)\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n html = requests.get(url, headers=headers).text\n json_obj = json.loads(html)\n initial_lyric = json_obj['lrc']['lyric']\n reg = re.compile('\\\\[.*\\\\]')\n lyric = re.sub(reg, '', initial_lyric).strip()\n return lyric\n\n\ndef lyrics_from_list(url, path):\n new_url = url.replace('/#', '')\n header = {'Host': 'music.163.com', 'Referer': 'https://music.163.com/',\n 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\n }\n res = requests.get(new_url, headers=header).text\n r = BeautifulSoup(res, 'html.parser')\n music_dict = {}\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\n for music in result:\n print(music)\n music_id = music.get('href').strip('/song?id=')\n music_name = music.text\n music_dict[music_id] = music_name\n songids = music_dict.keys()\n for i in songids:\n lyric = get_lyrics(i)\n save2txt(music_dict[i], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef get_singer(url):\n chrome_driver = 'D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe'\n browser = webdriver.Chrome(executable_path=chrome_driver)\n wait = WebDriverWait(browser, 5)\n browser.get(url)\n browser.switch_to.frame('g_iframe')\n html = browser.page_source\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\n singername = []\n singerid = []\n for snames in info:\n name = snames.get_text()\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1\n ].split(\"'\")[0]\n singername.append(name)\n singerid.append(songid)\n return zip(singername, singerid)\n\n\ndef get_data(url):\n data = []\n for singernames, singerids in get_singer(url):\n info = {}\n info['歌手名字'] = singernames\n info['歌手ID'] = singerids\n data.append(info)\n return data\n\n\n<mask token>\n\n\ndef download_singer():\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, \n 6002, 6003, 7001, 7002, 7003]\n for id in idlist:\n url = ('https://music.163.com/#/discover/artist/cat?id={}&initial=-1'\n .format(id))\n save2csv(url)\n\n\n<mask token>\n\n\ndef get_html(url):\n proxy_addr = {'http': '61.135.217.7:80'}\n headers = {'User-Agent':\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'\n }\n try:\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\n return html\n except BaseException:\n print('request error')\n pass\n\n\ndef get_top50(html):\n soup = BeautifulSoup(html, 'lxml')\n info = soup.select('.f-hide #song-list-pre-cache a')\n songname = []\n songids = []\n for sn in info:\n songnames = sn.getText()\n songname.append(songnames)\n for si in info:\n songid = str(re.findall('href=\"(.*?)\"', str(si))).strip().split('=')[-1\n ].split(\"'\")[0]\n songids.append(songid)\n return zip(songname, songids)\n\n\ndef lyrics_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\ndef save_song(songurl, path, songname):\n try:\n urlretrieve(songurl, path)\n print('歌曲下载完成:' + songname)\n except BaseException:\n print('下载失败:' + songname)\n pass\n\n\ndef songs_from_singername(name, path):\n id = get_id(name)\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n songid = singer_info[1]\n songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(\n songid)\n songname = singer_info[0]\n down_path = path + '\\\\' + songname + '.mp3'\n save_song(songurl, down_path, songname)\n time.sleep(1)\n\n\ndef lyrics_from_singerid(id, path):\n top50url = 'https://music.163.com/artist?id={}'.format(id)\n html = get_html(top50url)\n singer_infos = get_top50(html)\n for singer_info in singer_infos:\n lyric = get_lyrics(singer_info[1])\n save2txt(singer_info[0], lyric, path)\n time.sleep(1)\n\n\n<mask token>\n\n\ndef http_get(api):\n my_cookie = {'version': 0, 'name': 'appver', 'value': '1.5.0.75771',\n 'port': None, 'domain': 'www.mydomain.com', 'path': '/', 'secure': \n False, 'expires': None, 'discard': True, 'comment': None,\n 'comment_url': None, 'rest': {}, 'rfc2109': False}\n s = requests.Session()\n s.headers.update({'Referer': 'http://music.163.com/'})\n s.cookies.set(**my_cookie)\n response = s.get(api)\n json_data = json.loads(response.text)\n return json_data\n\n\ndef download_single_mv(id):\n size = '720'\n api = 'http://music.163.com/api/mv/detail?id=' + str(id) + '&type=mp4'\n json_data = http_get(api)\n if json_data['code'] == 200:\n a = list(json_data['data']['brs'].keys())\n if size not in a:\n size = a[0]\n mvurl = json_data['data']['brs'][size]\n artist = json_data['data']['artistName']\n song = json_data['data']['name']\n filename = '%s/[%s]%s.mp4' % (artist, size, song)\n if os.path.exists(filename) == False:\n if os.path.exists(artist) == False:\n os.makedirs(artist)\n\n def reporthook(blocknum, blocksize, totalsize):\n readsofar = blocknum * blocksize\n if totalsize > 0:\n percent = readsofar * 100.0 / totalsize\n s = '\\r%5.1f%% %*d / %d' % (percent, len(str(totalsize)\n ), readsofar, totalsize)\n sys.stderr.write(s)\n if readsofar >= totalsize:\n sys.stderr.write('\\n')\n else:\n sys.stderr.write('read %d\\n' % (readsofar,))\n print('downloading ' + filename)\n urlretrieve(mvurl, filename, reporthook)\n\n\n<mask token>\n\n\ndef _content_generator(music_id):\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\n headers = {'Accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'Accept-Encoding': 'gzip, deflate', 'Accept-Language':\n 'zh-CN,zh;q=0.9', 'Cache-Control': 'max-age=0', 'Host':\n 'music.163.com', 'Proxy-Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1', 'Cookie':\n '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761'\n , 'User-Agent':\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'\n }\n limit = 20\n offset = 0\n compiler = re.compile(\n '[^\\\\u4E00-\\\\u9FA5^\\\\u3000-\\\\u303F^\\\\uFF00-\\\\uFFEF^0-9^a-z^A-Z]')\n while True:\n params = {'limit': limit, 'offset': offset}\n offset += limit\n r = requests.get(url, headers=headers, params=params)\n comments = r.json()['comments']\n has_more = r.json()['more']\n for t in comments:\n yield compiler.subn('', t['content'])[0]\n if not has_more:\n break\n\n\nclass WangYiMusicWordCloud:\n stop_words = ['首歌']\n\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\n self.music_id = music_id\n self.mask = mask\n self.font_path = font_path\n if not stop_words is None:\n self.stop_words += stop_words\n self.img_wordcloud = None\n\n def _cut_word(self, comment):\n word_pairs = posseg.lcut(comment, HMM=False)\n result = []\n for t in word_pairs:\n if not (t.word in result or t.word in self.stop_words):\n result.append(t.word)\n return '/'.join(result)\n\n def get_words_text(self):\n if os.path.isfile(f'{self.music_id}.txt'):\n print('评论文件已存在,读取文件...')\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\n return f.read()\n else:\n print('没有默认评论文件,开始爬取评论...')\n count = 0\n text = []\n comments = _content_generator(self.music_id)\n for t in comments:\n text.append(self._cut_word(t))\n count += 1\n print(f'\\r已爬取 {count}条评论', end='')\n if count % 100 == 0:\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\n time.sleep(2)\n str_text = '\\n'.join(text)\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\n f.write(str_text)\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\n return str_text\n\n def generate(self, **kwargs):\n default_kwargs = {'background_color': 'white', 'width': 1000,\n 'height': 860, 'margin': 2, 'max_words': 50, 'stopwords':\n wordcloud.STOPWORDS}\n if not self.mask is None:\n default_kwargs['mask'] = np.array(Image.open(self.mask))\n if not self.font_path is None:\n default_kwargs['font_path'] = self.font_path\n elif 'font_path' not in kwargs:\n raise ValueError('缺少参数 font_path')\n default_kwargs.update(kwargs)\n str_text = self.get_words_text()\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\n self.img_wordcloud = self.wordcloud.generate(str_text)\n\n def show_wordcloud(self):\n if self.img_wordcloud is None:\n self.generate()\n plt.axis('off')\n plt.imshow(self.img_wordcloud)\n plt.show()\n\n def to_file(self, filename):\n if not hasattr(self, 'wordcloud'):\n self.generate()\n self.wordcloud.to_file(filename)\n\n\n<mask token>\n",
"step-5": "import requests\r\nfrom bs4 import BeautifulSoup\r\nfrom urllib.request import urlretrieve\r\nimport json\r\nimport time\r\n\r\n#功能一:下载单一歌曲、歌词\r\n\r\ndef single_song(song_id,path,song_name): #下载单一歌曲,输入为歌曲id,保存路径,歌曲名称\r\n song_url = \"http://music.163.com/song/media/outer/url?id=%s\" % song_id\r\n down_path = path +'\\\\'+ song_name + '.mp3'\r\n urlretrieve(song_url,down_path)\r\n print(\"歌曲下载完成:\"+song_name)\r\n\r\ndef save2txt(songname, lyric,path): #写进歌词到指定路径,并保存,输入为歌曲名称、歌词信息、保存路径\r\n # print('正在保存歌曲:{}'.format(songname))\r\n print(\"歌词下载完成:\"+songname)\r\n lyric_path=path+'\\\\'+songname+'.txt'\r\n with open(lyric_path, 'a', encoding='utf-8')as f:\r\n f.write(lyric)\r\n\r\ndef single_song_lyric(song_id,path,song_name): #下载单一歌曲的歌词,输入为歌曲id,保存路径,歌曲名称\r\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(song_id)\r\n headers = {\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\r\n html = requests.get(url, headers=headers).text\r\n json_obj = json.loads(html)\r\n initial_lyric = json_obj['lrc']['lyric']\r\n reg = re.compile(r'\\[.*\\]')\r\n lyric = re.sub(reg, '', initial_lyric).strip()\r\n save2txt(song_name, lyric, path)\r\n time.sleep(1)\r\n\r\n\r\n#功能二:根据歌单url下载\r\n\r\ndef songs_from_list(url,path): #url:歌单网址;path:本地保存目录 下载某一歌单的所有歌曲(包括歌手页、排行榜)\r\n new_url = url.replace('/#', '')\r\n\r\n header = {\r\n 'Host': 'music.163.com',\r\n 'Referer': 'https://music.163.com/',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\r\n }\r\n\r\n res = requests.get(new_url, headers=header).text\r\n\r\n r = BeautifulSoup(res, \"html.parser\")\r\n music_dict = {}\r\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\r\n for music in result:\r\n print(music)\r\n music_id = music.get('href').strip('/song?id=')\r\n music_name = music.text\r\n music_dict[music_id] = music_name\r\n for song_id in music_dict:\r\n song_url = \"http://music.163.com/song/media/outer/url?id=%s\" % song_id\r\n down_path=path+'\\\\'+music_dict[song_id]+'.mp3'\r\n\r\n # path = \"C:\\\\Users\\\\ming-\\\\Downloads\\\\%s.mp3\" % music_dict[song_id]\r\n\r\n # 添加数据\r\n print( \"正在下载:%s\" % music_dict[song_id])\r\n # text.see(END)\r\n # text.update()\r\n\r\n urlretrieve(song_url, down_path)\r\n\r\ndef get_lyrics(songids): #根据歌曲id获取歌词,输入为歌曲Id\r\n url = 'http://music.163.com/api/song/lyric?id={}&lv=-1&kv=-1&tv=-1'.format(songids)\r\n headers = {\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\r\n html = requests.get(url, headers=headers).text\r\n json_obj = json.loads(html)\r\n initial_lyric = json_obj['lrc']['lyric']\r\n reg = re.compile(r'\\[.*\\]')\r\n lyric = re.sub(reg, '', initial_lyric).strip()\r\n return lyric\r\n\r\ndef lyrics_from_list(url,path): #根据歌单下载歌曲歌词\r\n new_url = url.replace('/#', '')\r\n\r\n header = {\r\n 'Host': 'music.163.com',\r\n 'Referer': 'https://music.163.com/',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0'\r\n }\r\n\r\n res = requests.get(new_url, headers=header).text\r\n\r\n r = BeautifulSoup(res, \"html.parser\")\r\n music_dict = {}\r\n result = r.find('ul', {'class', 'f-hide'}).find_all('a')\r\n for music in result:\r\n print(music)\r\n music_id = music.get('href').strip('/song?id=')\r\n music_name = music.text\r\n music_dict[music_id] = music_name\r\n songids=music_dict.keys()\r\n for i in songids:\r\n lyric=get_lyrics(i)\r\n save2txt(music_dict[i],lyric,path)\r\n time.sleep(1)\r\n\r\n\r\n#功能三:根据歌手下载\r\n\r\n#获取歌手信息和id\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nimport csv\r\nimport re\r\n# chrome_driver = \"D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe\" #chromedriver的文件位置\r\n# browser = webdriver.Chrome(executable_path = chrome_driver)\r\n# wait = WebDriverWait(browser, 5) # 设置等待时间\r\ndef get_singer(url): # 返回歌手名字和歌手id,输入为歌手详情页\r\n chrome_driver = \"D:\\\\software\\\\chromedriver_win32\\\\chromedriver.exe\" # chromedriver的文件位置\r\n browser = webdriver.Chrome(executable_path=chrome_driver)\r\n wait = WebDriverWait(browser, 5) # 设置等待时间\r\n browser.get(url)\r\n browser.switch_to.frame('g_iframe')\r\n html = browser.page_source\r\n soup = BeautifulSoup(html, 'lxml')\r\n info = soup.select('.nm.nm-icn.f-thide.s-fc0')\r\n singername = []\r\n singerid = []\r\n for snames in info:\r\n name = snames.get_text()\r\n songid = str(re.findall('href=\"(.*?)\"', str(snames))).split('=')[1].split('\\'')[0] #正则表达式获取歌曲id\r\n singername.append(name)\r\n singerid.append(songid)\r\n return zip(singername, singerid)\r\n\r\ndef get_data(url):\r\n data = []\r\n for singernames, singerids in get_singer(url):\r\n info = {}\r\n info['歌手名字'] = singernames\r\n info['歌手ID'] = singerids\r\n data.append(info)\r\n return data\r\n\r\ndef save2csv(url):\r\n print('保存歌手信息中...请稍后查看')\r\n with open('singer.csv', 'a', newline='', encoding='utf-8-sig') as f:\r\n # CSV 基本写入用 w,追加改模式 w 为 a\r\n fieldnames = ['歌手名字', '歌手ID']\r\n writer = csv.DictWriter(f, fieldnames=fieldnames)\r\n writer.writeheader()\r\n data = get_data(url)\r\n print(data)\r\n writer.writerows(data)\r\n print('保存成功')\r\n\r\ndef download_singer():\r\n idlist = [1001, 1002, 1003, 2001, 2002, 2003, 4001, 4002, 4003, 6001, 6002, 6003, 7001, 7002, 7003]\r\n for id in idlist:\r\n url = 'https://music.163.com/#/discover/artist/cat?id={}&initial=-1'.format(id)\r\n save2csv(url)\r\n\r\ndef get_id(singer_name): #根据歌手姓名获取对应的歌手id,输入为歌手姓名\r\n file = \"lib\\\\singer_info.csv\"\r\n with open(file, 'r',encoding='utf-8-sig') as f:\r\n reader = csv.reader(f)\r\n name = []\r\n id = []\r\n for i in reader:\r\n name.append(i[0])\r\n id.append(i[1])\r\n a=name.index(singer_name)\r\n return id[a]\r\n\r\n\r\n#根据歌手姓名下载\r\ndef get_html(url): #通过代理获取网页信息,输入为指定网页url\r\n proxy_addr = {'http': '61.135.217.7:80'}\r\n # 用的代理 ip,如果被封或者失效,在http://www.xicidaili.com/换一个\r\n headers = {\r\n 'User-Agent':\r\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'}\r\n try:\r\n html = requests.get(url, headers=headers, proxies=proxy_addr).text\r\n return html\r\n except BaseException:\r\n print('request error')\r\n pass\r\n\r\ndef get_top50(html): #获取热度前50名的歌曲,并返回对应的歌曲名称和歌曲id,输入为歌手详情页\r\n soup = BeautifulSoup(html, 'lxml')\r\n info = soup.select('.f-hide #song-list-pre-cache a')\r\n songname = []\r\n songids = []\r\n for sn in info:\r\n songnames = sn.getText()\r\n songname.append(songnames)\r\n for si in info:\r\n songid = str(re.findall('href=\"(.*?)\"', str(si))).strip().split('=')[-1].split('\\'')[0] # 用re查找,查找对象一定要是str类型\r\n songids.append(songid)\r\n return zip(songname, songids)\r\n\r\ndef lyrics_from_singername(name,path): #根据歌手姓名下载热度前50名歌曲的歌词\r\n id=get_id(name)\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n lyric = get_lyrics(singer_info[1])\r\n save2txt(singer_info[0], lyric, path)\r\n time.sleep(1)\r\n\r\ndef save_song(songurl, path,songname): #下载指定链接的歌曲,并保存到指定路径,输入为歌曲下载链接、保存路径、歌曲名称\r\n try:\r\n urlretrieve(songurl, path)\r\n print('歌曲下载完成:' + songname)\r\n except BaseException:\r\n print('下载失败:' + songname)\r\n pass\r\n\r\ndef songs_from_singername(name,path): #根据歌手姓名下载歌曲到指定路径,输入为歌手姓名和保存路径\r\n id=get_id(name)\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n songid = singer_info[1]\r\n songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)\r\n songname = singer_info[0]\r\n # path = 'D:\\\\code_new\\\\pycharm\\\\yunmusic\\\\song' + songname + '.mp3'\r\n down_path=path+'\\\\'+songname+'.mp3'\r\n save_song(songurl, down_path,songname)\r\n time.sleep(1)\r\n\r\ndef lyrics_from_singerid(id,path): #根据歌手id下载歌词,输入为歌手id和本地保存路径\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n lyric = get_lyrics(singer_info[1])\r\n save2txt(singer_info[0], lyric, path)\r\n time.sleep(1)\r\n\r\ndef songs_from_singerid(id,path): #根据歌手id下载歌曲音频,输入为歌手id和本地保存路径\r\n top50url = 'https://music.163.com/artist?id={}'.format(id)\r\n html = get_html(top50url)\r\n singer_infos = get_top50(html)\r\n for singer_info in singer_infos:\r\n songid = singer_info[1]\r\n songurl = 'http://music.163.com/song/media/outer/url?id={}.mp3'.format(songid)\r\n songname = singer_info[0]\r\n # path = 'D:\\\\code_new\\\\pycharm\\\\yunmusic\\\\song' + songname + '.mp3'\r\n down_path = path + '\\\\' + songname + '.mp3'\r\n save_song(songurl, down_path, songname)\r\n time.sleep(1)\r\n\r\n#功能四:下载mv\r\nimport requests\r\nimport os\r\nimport sys\r\nfrom urllib.parse import urlparse,parse_qs\r\n\r\ndef http_get(api):\r\n my_cookie = {\r\n \"version\":0,\r\n \"name\":'appver',\r\n \"value\":'1.5.0.75771',\r\n \"port\":None,\r\n # \"port_specified\":False,\r\n \"domain\":'www.mydomain.com',\r\n # \"domain_specified\":False,\r\n # \"domain_initial_dot\":False,\r\n \"path\":'/',\r\n # \"path_specified\":True,\r\n \"secure\":False,\r\n \"expires\":None,\r\n \"discard\":True,\r\n \"comment\":None,\r\n \"comment_url\":None,\r\n \"rest\":{},\r\n \"rfc2109\":False\r\n }\r\n\r\n s = requests.Session()\r\n s.headers.update({'Referer': \"http://music.163.com/\"})\r\n s.cookies.set(**my_cookie)\r\n response = s.get(api)\r\n json_data = json.loads(response.text)\r\n return json_data\r\n\r\ndef download_single_mv(id): #根据mvid下载\r\n size = \"720\" #default 720p\r\n api = \"http://music.163.com/api/mv/detail?id=\"+str(id)+\"&type=mp4\"\r\n json_data = http_get(api)\r\n if json_data[\"code\"]==200:\r\n a = list(json_data[\"data\"][\"brs\"].keys())\r\n if size not in a:\r\n size = a[0] #如果没有720p,则选择最小的版本\r\n mvurl = json_data[\"data\"][\"brs\"][size] #mv网址\r\n artist = json_data[\"data\"][\"artistName\"] #歌手信息\r\n song = json_data[\"data\"][\"name\"] #歌曲信息\r\n\r\n filename = '%s/[%s]%s.mp4' %(artist,size,song)\r\n\r\n if os.path.exists(filename)==False:\r\n if os.path.exists(artist)==False:\r\n os.makedirs(artist)\r\n def reporthook(blocknum, blocksize, totalsize):\r\n readsofar = blocknum * blocksize\r\n if totalsize > 0:\r\n percent = readsofar * 1e2 / totalsize\r\n s = \"\\r%5.1f%% %*d / %d\" % (\r\n percent, len(str(totalsize)), readsofar, totalsize)\r\n sys.stderr.write(s)\r\n if readsofar >= totalsize: # near the end\r\n sys.stderr.write(\"\\n\")\r\n else: # total size is unknown\r\n sys.stderr.write(\"read %d\\n\" % (readsofar,))\r\n print(\"downloading \"+filename)\r\n urlretrieve(mvurl,filename,reporthook)\r\n\r\ndef download_mv_from_list(url): #批量下载歌单的mv资源\r\n input=url.replace(\"#\",\"\")\r\n id = parse_qs(urlparse(input).query)[\"id\"][0]\r\n if \"playlist\" in input:\r\n playlist_api = \"http://music.163.com/api/playlist/detail?id=%s\" % (id)\r\n json_data = http_get(playlist_api)\r\n for idx, mv in enumerate(json_data[\"result\"][\"tracks\"]): #mv信息\r\n download_single_mv(mv[\"mvid\"])\r\n print(\"downloaded:\" + str(idx))\r\n elif \"album\" in input:\r\n playlist_api = \"http://music.163.com/api/album/%s\" % (id)\r\n json_data = http_get(playlist_api)\r\n for idx, mv in enumerate(json_data[\"album\"][\"songs\"]):\r\n if mv[\"mvid\"] != None and mv[\"mvid\"] != 0:\r\n download_single_mv(mv[\"mvid\"])\r\n print(\"downloaded:\" + str(idx))\r\n download_single_mv(id)\r\n\r\n\r\n#功能五:爬取歌曲评论并生成词云图\r\nfrom jieba import posseg\r\nfrom PIL import Image\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport wordcloud\r\n\r\ndef _content_generator(music_id): #根据歌曲id获取评论信息\r\n url = 'http://music.163.com/api/v1/resource/comments/R_SO_4_%s' % music_id\r\n headers = {\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\r\n 'Accept-Encoding': 'gzip, deflate',\r\n 'Accept-Language': 'zh-CN,zh;q=0.9',\r\n 'Cache-Control': 'max-age=0',\r\n 'Host': 'music.163.com',\r\n 'Proxy-Connection': 'keep-alive',\r\n 'Upgrade-Insecure-Requests': '1',\r\n 'Cookie': '__f_=1544879495065; _ntes_nnid=ec5f372598a44f7d45726f800d3c244b,1544879496275; _ntes_nuid=ec5f372598a44f7d45726f800d3c244b; _iuqxldmzr_=32; __utmc=94650624; WM_TID=SjPgpIfajWhEUVQQAVYoLv%2BJSutc41%2BE; __utma=94650624.1212198154.1546091705.1546142549.1546173830.4; __utmz=94650624.1546173830.4.4.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; WM_NI=fjy1sURvfoc29LFwx6VN7rVC6wTgq5EA1go8oNGPt2OIoPoLBInGAKxG9Rc6%2BZ%2F6HQPKefTD2kdeQesFU899HSQfRmRPbGmc6lxhGHcRpZAVtsYhGxIWtlaVLL1c0Z7HYUc%3D; WM_NIKE=9ca17ae2e6ffcda170e2e6ee89ef48839ff7a3f0668abc8aa3d15b938b8abab76ab6afbab4db5aacaea290c52af0fea7c3b92aa6b6b7d2f25f92aaaa90e23afb948a98fb3e9692f993d549f6a99c88f43f879fff88ee34ad9289b1f73a8d97a1b1ee488297a2a8c441bc99f7b3e23ee986e1d7cb5b9495ab87d750f2b5ac86d46fb19a9bd9bc338c8d9f87d1679290aea8f069f6b4b889c644a18ec0bbc45eb8ad9789c6748b89bc8de45e9094ff84b352f59897b6e237e2a3; __utmb=94650624.8.10.1546173830; JSESSIONID-WYYY=JhDousUg2D2BV1f%2Bvq6Ka6iQHAWfFvQOPdvf5%5CPMQISbc5nnfzqQAJDcQsezW82Cup2H5n1grdeIxXp79veCgoKA68D6CSkgCXcOFkI04Hv8hEXG9tWSMKuRx0XZ4Bp%5C%5CSbZzeRs6ey4FxADkuPVlIIVSGn%2BTq8mYstxPYBIg0f2quO%5C%3A1546177369761',\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36',\r\n }\r\n limit = 20\r\n offset = 0\r\n compiler = re.compile(r'[^\\u4E00-\\u9FA5^\\u3000-\\u303F^\\uFF00-\\uFFEF^0-9^a-z^A-Z]')\r\n\r\n while True:\r\n params = {\r\n 'limit': limit,\r\n 'offset': offset,\r\n }\r\n offset += limit\r\n r = requests.get(url, headers=headers, params=params)\r\n comments = r.json()['comments']\r\n has_more = r.json()['more']\r\n\r\n for t in comments:\r\n yield compiler.subn('', t['content'])[0]\r\n\r\n if not has_more:\r\n break\r\n\r\n\r\nclass WangYiMusicWordCloud: #自定义类,生成词云图\r\n stop_words = ['首歌']\r\n def __init__(self, music_id, mask=None, font_path=None, stop_words=None):\r\n self.music_id = music_id #歌曲信息\r\n self.mask = mask #背景图片\r\n self.font_path = font_path #字体\r\n\r\n if not stop_words is None:\r\n self.stop_words+=stop_words\r\n\r\n self.img_wordcloud = None\r\n\r\n def _cut_word(self, comment): #分词\r\n word_pairs = posseg.lcut(comment, HMM=False)\r\n result = []\r\n for t in word_pairs:\r\n if not (t.word in result or t.word in self.stop_words):\r\n result.append(t.word)\r\n return '/'.join(result)\r\n\r\n\r\n def get_words_text(self): #若已有评论文件则读取,若没有则爬取评论并保存\r\n if os.path.isfile(f'{self.music_id}.txt'):\r\n print('评论文件已存在,读取文件...')\r\n with open(f'{self.music_id}.txt', 'r', encoding='utf-8') as f:\r\n return f.read()\r\n else:\r\n print('没有默认评论文件,开始爬取评论...')\r\n count = 0\r\n text = []\r\n comments = _content_generator(self.music_id)\r\n for t in comments:\r\n text.append(self._cut_word(t))\r\n\r\n count += 1\r\n print(f'\\r已爬取 {count}条评论', end='')\r\n if count % 100 == 0:\r\n print(f'\\r已爬取 {count}条评论, 休息 2s', end='')\r\n time.sleep(2)\r\n\r\n str_text = '\\n'.join(text)\r\n with open(f'{self.music_id}.txt', 'w', encoding='utf-8') as f:\r\n f.write(str_text)\r\n print(f'\\r共爬取 {count}条评论,已写入文件 {self.music_id}.txt')\r\n return str_text\r\n\r\n def generate(self, **kwargs):\r\n default_kwargs = {\r\n 'background_color': \"white\",\r\n 'width': 1000,\r\n 'height': 860,\r\n 'margin': 2,\r\n 'max_words': 50,\r\n 'stopwords': wordcloud.STOPWORDS,\r\n }\r\n if not self.mask is None:\r\n default_kwargs['mask'] = np.array(Image.open(self.mask))\r\n if not self.font_path is None:\r\n default_kwargs['font_path'] = self.font_path\r\n elif 'font_path' not in kwargs:\r\n raise ValueError('缺少参数 font_path')\r\n default_kwargs.update(kwargs)\r\n\r\n str_text = self.get_words_text()\r\n self.wordcloud = wordcloud.WordCloud(**default_kwargs)\r\n self.img_wordcloud = self.wordcloud.generate(str_text)\r\n\r\n def show_wordcloud(self): #生成词云图\r\n if self.img_wordcloud is None:\r\n self.generate()\r\n\r\n plt.axis('off')\r\n plt.imshow(self.img_wordcloud)\r\n plt.show()\r\n\r\n def to_file(self, filename): #保存到本地\r\n if not hasattr(self, 'wordcloud'):\r\n self.generate()\r\n self.wordcloud.to_file(filename)\r\n\r\ndef get_wordcloud(music_id,mask,font,path): #执行函数\r\n wordcloud_obj = WangYiMusicWordCloud(music_id, mask=mask, font_path=font)\r\n wordcloud_obj.show_wordcloud()\r\n result=path+'\\\\'+'result.jpg'\r\n wordcloud_obj.to_file(result)\r\n\r\n\r\n",
"step-ids": [
17,
20,
21,
24,
33
]
}
|
[
17,
20,
21,
24,
33
] |
#!/usr/bin/python
import errno
import fuse
import stat
import time
#from multiprocessing import Queue
from functools import wraps
from processfs.svcmanager import Manager
import processfs.svcmanager as svcmanager
fuse.fuse_python_api = (0, 2)
_vfiles = ['stdin', 'stdout', 'stderr', 'cmdline', 'control', 'status']
def has_ent (func):
@wraps(func)
def wrapper(self, path, *args,**kwargs):
print 'called %s %s %s' % (func, path, args)
print self._svcmanager.procs.keys()
vpaths = ['%s/%s' % (x,z) for x in self._svcmanager.procs.keys() \
for z in _vfiles]
vpaths.append('/')
vpaths.extend(self._svcmanager.procs.keys())
if path not in vpaths:
return -errno.ENOENT
return func(self, path, *args,**kwargs)
return wrapper
class processfs(fuse.Fuse):
def __init__(self, *args, **kw):
fuse.Fuse.__init__(self, *args, **kw)
self._svcmanager = Manager()
self._svc_queue = self._svcmanager.queue
print type(self._svc_queue)
# start the process manager thread
print 'starting svc manager'
self._svcmanager.start()
## NEED - returns dir and file stat struct
@has_ent
def getattr(self, path):
print 'getattr(%s)' % path
st = fuse.Stat()
if path in self._svcmanager.procs.keys() or path == '/':
st.st_nlink = 2
st.st_mode = stat.S_IFDIR | 0777
else:
st.st_mode = stat.S_IFREG | 0600
st.st_nlink = 1
st.st_atime = int(time.time())
st.st_mtime = st.st_atime
st.st_ctime = st.st_atime
st.st_size = 100
return st
# returns the contents of a directory
def readdir(self, path, offset):
## always return . and ..
for p in ['.', '..']:
yield fuse.Direntry(p)
procs = self._svcmanager.procs.keys()
if path == '/':
for p in procs:
yield fuse.Direntry(p[1:])
elif path in procs:
for p in _vfiles:
yield fuse.Direntry(p)
# obvious - see the syscall
# Note, offset is always ignored. There'll be no appending here
## if we are not creating a new file, buf should be sent to proc
## stdin
@has_ent
def write(self, path, buf, offset):
print 'write(%s, %s)' % (path, buf.strip())
if path not in ['%s/%s' % (x,z) \
for x in self._svcmanager.procs.keys() \
for z in _vfiles]:
return -errno.EOPNOTSUPP
else:
# Implement later
return -errno.EACCES
# obvious - see the syscall
@has_ent
def open(self, path, flags):
print 'open(%s)' % path
return 0
# called after create to set times
@has_ent
def utime(self, path, times):
print 'utime(%s)' % path
# called after write to "commit" data to "disk"
@has_ent
def flush(self, path):
print 'flush(%s)' % path
# should connect to proc ring buffer
@has_ent
def read(self, path, len, offset):
return self.files[path]['process'][offset:offset+len]
@has_ent
def unlink(self, path):
print 'unlink(%s)' % path
# another noop - makes some file writes happy
@has_ent
def truncate(self, path, size):
print 'truncate(%s)' % path
return 0
def mkdir(self, path, mode):
print 'mkdir(%s, %s)' % (path, mode)
self._svc_queue.put([svcmanager.MKPROC, path])
self._svc_queue.join()
return 0
def fsdestroy(self, *args, **kw):
self._svcmanager.stop()
|
normal
|
{
"blob_id": "028c2193e180ccdbfdcc51e5d061904ea1d6164e",
"index": 3536,
"step-1": "#!/usr/bin/python\n\nimport errno\nimport fuse\nimport stat\nimport time\n#from multiprocessing import Queue\nfrom functools import wraps\n\nfrom processfs.svcmanager import Manager\nimport processfs.svcmanager as svcmanager\n\nfuse.fuse_python_api = (0, 2)\n\n_vfiles = ['stdin', 'stdout', 'stderr', 'cmdline', 'control', 'status']\n\ndef has_ent (func):\n @wraps(func)\n def wrapper(self, path, *args,**kwargs):\n print 'called %s %s %s' % (func, path, args)\n print self._svcmanager.procs.keys()\n vpaths = ['%s/%s' % (x,z) for x in self._svcmanager.procs.keys() \\\n for z in _vfiles]\n vpaths.append('/')\n vpaths.extend(self._svcmanager.procs.keys())\n if path not in vpaths:\n return -errno.ENOENT\n return func(self, path, *args,**kwargs)\n return wrapper\n\nclass processfs(fuse.Fuse):\n def __init__(self, *args, **kw):\n fuse.Fuse.__init__(self, *args, **kw)\n\n self._svcmanager = Manager()\n self._svc_queue = self._svcmanager.queue\n\n print type(self._svc_queue)\n\n # start the process manager thread\n print 'starting svc manager'\n self._svcmanager.start()\n\n ## NEED - returns dir and file stat struct\n @has_ent\n def getattr(self, path):\n print 'getattr(%s)' % path\n\n st = fuse.Stat()\n\n if path in self._svcmanager.procs.keys() or path == '/':\n st.st_nlink = 2\n st.st_mode = stat.S_IFDIR | 0777\n else:\n st.st_mode = stat.S_IFREG | 0600\n st.st_nlink = 1\n\n st.st_atime = int(time.time())\n st.st_mtime = st.st_atime\n st.st_ctime = st.st_atime\n st.st_size = 100\n\n return st\n\n # returns the contents of a directory\n def readdir(self, path, offset):\n ## always return . and ..\n for p in ['.', '..']:\n yield fuse.Direntry(p)\n procs = self._svcmanager.procs.keys()\n if path == '/':\n for p in procs:\n yield fuse.Direntry(p[1:])\n elif path in procs:\n for p in _vfiles:\n yield fuse.Direntry(p)\n\n # obvious - see the syscall\n # Note, offset is always ignored. There'll be no appending here\n ## if we are not creating a new file, buf should be sent to proc\n ## stdin\n @has_ent\n def write(self, path, buf, offset):\n print 'write(%s, %s)' % (path, buf.strip())\n\n if path not in ['%s/%s' % (x,z) \\\n for x in self._svcmanager.procs.keys() \\\n for z in _vfiles]:\n return -errno.EOPNOTSUPP\n\n else:\n # Implement later\n return -errno.EACCES\n\n # obvious - see the syscall\n @has_ent\n def open(self, path, flags):\n print 'open(%s)' % path\n return 0\n\n # called after create to set times\n @has_ent\n def utime(self, path, times):\n print 'utime(%s)' % path\n\n # called after write to \"commit\" data to \"disk\"\n @has_ent\n def flush(self, path):\n print 'flush(%s)' % path\n\n # should connect to proc ring buffer\n @has_ent\n def read(self, path, len, offset):\n\n return self.files[path]['process'][offset:offset+len]\n\n @has_ent\n def unlink(self, path):\n print 'unlink(%s)' % path\n\n # another noop - makes some file writes happy\n @has_ent\n def truncate(self, path, size):\n print 'truncate(%s)' % path\n return 0\n\n def mkdir(self, path, mode):\n print 'mkdir(%s, %s)' % (path, mode)\n self._svc_queue.put([svcmanager.MKPROC, path])\n self._svc_queue.join()\n return 0\n\n def fsdestroy(self, *args, **kw):\n self._svcmanager.stop()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'foo.views.home', name='home'),
# url(r'^foo/', include('foo.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
# required url to login so you can authorize token
url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),
)
# piston, oauth urls
urlpatterns += patterns(
'piston.authentication',
url(r'^oauth/request_token/$','oauth_request_token'),
url(r'^oauth/authorize/$','oauth_user_auth'),
url(r'^oauth/access_token/$','oauth_access_token'),
)
|
normal
|
{
"blob_id": "266ce1aaa3283cf2aaa271a317a80c3860880a49",
"index": 4901,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.autodiscover()\n<mask token>\nurlpatterns += patterns('piston.authentication', url(\n '^oauth/request_token/$', 'oauth_request_token'), url(\n '^oauth/authorize/$', 'oauth_user_auth'), url('^oauth/access_token/$',\n 'oauth_access_token'))\n",
"step-3": "<mask token>\nadmin.autodiscover()\nurlpatterns = patterns('', url('^admin/', include(admin.site.urls)), url(\n '^accounts/login/$', 'django.contrib.auth.views.login', {\n 'template_name': 'login.html'}))\nurlpatterns += patterns('piston.authentication', url(\n '^oauth/request_token/$', 'oauth_request_token'), url(\n '^oauth/authorize/$', 'oauth_user_auth'), url('^oauth/access_token/$',\n 'oauth_access_token'))\n",
"step-4": "from django.conf.urls.defaults import patterns, include, url\nfrom django.contrib import admin\nadmin.autodiscover()\nurlpatterns = patterns('', url('^admin/', include(admin.site.urls)), url(\n '^accounts/login/$', 'django.contrib.auth.views.login', {\n 'template_name': 'login.html'}))\nurlpatterns += patterns('piston.authentication', url(\n '^oauth/request_token/$', 'oauth_request_token'), url(\n '^oauth/authorize/$', 'oauth_user_auth'), url('^oauth/access_token/$',\n 'oauth_access_token'))\n",
"step-5": "from django.conf.urls.defaults import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'foo.views.home', name='home'),\n # url(r'^foo/', include('foo.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n url(r'^admin/', include(admin.site.urls)),\n\n # required url to login so you can authorize token\n url(r'^accounts/login/$', 'django.contrib.auth.views.login', {'template_name': 'login.html'}),\n)\n\n# piston, oauth urls\nurlpatterns += patterns(\n 'piston.authentication',\n url(r'^oauth/request_token/$','oauth_request_token'),\n url(r'^oauth/authorize/$','oauth_user_auth'),\n url(r'^oauth/access_token/$','oauth_access_token'),\n)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import numpy as np
import tensorflow as tf
x_data = np.random.rand(100)
y_data = x_data * 10 + 5
#构造线性模型
b = tf.Variable(0.)
k = tf.Variable(0.)
y=k*x_data+b
#二次代价函数 square求平方
loss= tf.reduce_mean(tf.square(y_data-y))
#定义一个梯度下降法来进行训练的优化器
optimizer=tf.train.GradientDescentOptimizer(.2)
train=optimizer.minimize(loss)
init=tf.global_variables_initializer()
with tf.Session() as ss:
ss.run(init)
for step in range(201):
ss.run(train)
if step %10==0:
print(step,ss.run([k,b]))
|
normal
|
{
"blob_id": "ba7f66a0f9cf1028add778315033d596e10d6f16",
"index": 3197,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nwith tf.Session() as ss:\n ss.run(init)\n for step in range(201):\n ss.run(train)\n if step % 10 == 0:\n print(step, ss.run([k, b]))\n",
"step-3": "<mask token>\nx_data = np.random.rand(100)\ny_data = x_data * 10 + 5\nb = tf.Variable(0.0)\nk = tf.Variable(0.0)\ny = k * x_data + b\nloss = tf.reduce_mean(tf.square(y_data - y))\noptimizer = tf.train.GradientDescentOptimizer(0.2)\ntrain = optimizer.minimize(loss)\ninit = tf.global_variables_initializer()\nwith tf.Session() as ss:\n ss.run(init)\n for step in range(201):\n ss.run(train)\n if step % 10 == 0:\n print(step, ss.run([k, b]))\n",
"step-4": "import numpy as np\nimport tensorflow as tf\nx_data = np.random.rand(100)\ny_data = x_data * 10 + 5\nb = tf.Variable(0.0)\nk = tf.Variable(0.0)\ny = k * x_data + b\nloss = tf.reduce_mean(tf.square(y_data - y))\noptimizer = tf.train.GradientDescentOptimizer(0.2)\ntrain = optimizer.minimize(loss)\ninit = tf.global_variables_initializer()\nwith tf.Session() as ss:\n ss.run(init)\n for step in range(201):\n ss.run(train)\n if step % 10 == 0:\n print(step, ss.run([k, b]))\n",
"step-5": "import numpy as np\nimport tensorflow as tf\n\nx_data = np.random.rand(100)\ny_data = x_data * 10 + 5\n\n#构造线性模型\nb = tf.Variable(0.)\nk = tf.Variable(0.)\ny=k*x_data+b\n\n\n#二次代价函数 square求平方\nloss= tf.reduce_mean(tf.square(y_data-y))\n\n#定义一个梯度下降法来进行训练的优化器\n\noptimizer=tf.train.GradientDescentOptimizer(.2)\n\ntrain=optimizer.minimize(loss)\n\ninit=tf.global_variables_initializer()\n\nwith tf.Session() as ss:\n ss.run(init)\n for step in range(201):\n ss.run(train)\n if step %10==0:\n print(step,ss.run([k,b]))\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#coding=UTF-8
import random
import random
list=[]
s=0
for i in range(1,5):
for j in range(1,5):
for k in range(1,5):
if i!=j and j<>k:
list.append(str(i)+str(j)+str(k))
s=s+1
print len(list)
print s
if len(list)==s:
print "是相等的!"
else:
print "不相等!"
print list[random.randrange(1,len(list))]
import math
for n in range(1,1):
i=math.sqrt(n+100)
print i
j=math.sqrt(n+268)
print j
if i/2.0==int(i/2) and j/2.0==int(j/2):
print n
break
import time
#print help(time.strftime)
print time.strftime("%Y")
list=[90,19,8,99,87,45,109]
list.sort()
print u"sort排序输出:",list
list=[90,19,8,99,87,45,109]
i=len(list)
for b in range(1,i):
i=i-1
for a in range(0,i):
if list[a+1]<list[a]:
temp=list[a+1]
list[a+1]=list[a]
list[a]=temp
print u"冒泡排序输出:",list
print '*'*10
for i in range(5):
print "* *"
print '*'*10
import sys
#sys.stdout.write(chr(1))
temp=0#正常产仔的兔子
temp1=0#剩余一个月产仔的兔子
temp2=1#剩余2个月产仔的兔子
m=12#int(raw_input(u"请输入月份:"))
for i in range(1,m+1):
temp=temp+temp1
temp22=temp2
temp2=temp
temp1=temp22
print "24个月后的兔子数量:",temp+temp1+temp2
f1=1
f2=1
for i in range(1,24):
#print "%12d%12d"%(f1,f1)
if (i%2)==0:
print ''
f1=f1+f2
f2=f1+f2
for i in range(1,10):
for j in range(0,10):
for k in range(0,10):
if i**3+j**3+k**3==int(str(i)+str(j)+str(k)):
print int(str(i)+str(j)+str(k))
import sys
from sys import stdout
n=45
print '数值:n=%d'%n
list=[]
for i in range(2,n+1):
while n!=0:
if n%i==0:
list.append(str(i))
sys.stdout.write(str(i))
sys.stdout.write("*")
n=n/i
else:
break
print "%d"%n
for i in range(0,len(list)):
if i<len(list)-1:
sys.stdout.write(list[i]+"*")
else:
sys.stdout.write(list[i])
h=100
sum=0
for i in range(1,11):
if i==1:
print ''
sum=sum+h
h=h/2.0
sum=sum+2*h
print h
print sum
|
normal
|
{
"blob_id": "fa07553477e3bb2ecbeb87bd1383a2194282579c",
"index": 4081,
"step-1": "#coding=UTF-8\nimport random\nimport random\nlist=[]\ns=0\nfor i in range(1,5):\n for j in range(1,5):\n for k in range(1,5):\n if i!=j and j<>k:\n list.append(str(i)+str(j)+str(k))\n s=s+1\nprint len(list)\nprint s\nif len(list)==s:\n print \"是相等的!\"\nelse:\n print \"不相等!\"\nprint list[random.randrange(1,len(list))]\n\n\nimport math\nfor n in range(1,1):\n i=math.sqrt(n+100)\n print i\n j=math.sqrt(n+268)\n print j\n if i/2.0==int(i/2) and j/2.0==int(j/2):\n print n\n break\n \nimport time\n#print help(time.strftime)\nprint time.strftime(\"%Y\")\n\n\n\nlist=[90,19,8,99,87,45,109]\nlist.sort()\nprint u\"sort排序输出:\",list\nlist=[90,19,8,99,87,45,109]\ni=len(list)\nfor b in range(1,i):\n i=i-1\n for a in range(0,i):\n if list[a+1]<list[a]:\n temp=list[a+1]\n list[a+1]=list[a] \n list[a]=temp\nprint u\"冒泡排序输出:\",list\n\n\n\n\nprint '*'*10\nfor i in range(5):\n print \"* *\"\nprint '*'*10\n\n\n\nimport sys\n#sys.stdout.write(chr(1))\n\n\n\n\n\ntemp=0#正常产仔的兔子\ntemp1=0#剩余一个月产仔的兔子\ntemp2=1#剩余2个月产仔的兔子\nm=12#int(raw_input(u\"请输入月份:\"))\nfor i in range(1,m+1):\n temp=temp+temp1\n temp22=temp2\n temp2=temp\n temp1=temp22\nprint \"24个月后的兔子数量:\",temp+temp1+temp2\n\nf1=1\nf2=1\nfor i in range(1,24): \n #print \"%12d%12d\"%(f1,f1)\n if (i%2)==0:\n print ''\n f1=f1+f2\n f2=f1+f2\n\nfor i in range(1,10):\n for j in range(0,10):\n for k in range(0,10):\n if i**3+j**3+k**3==int(str(i)+str(j)+str(k)):\n print int(str(i)+str(j)+str(k))\n\nimport sys\nfrom sys import stdout\nn=45\nprint '数值:n=%d'%n\nlist=[]\nfor i in range(2,n+1):\n while n!=0:\n if n%i==0:\n list.append(str(i))\n sys.stdout.write(str(i))\n sys.stdout.write(\"*\")\n n=n/i\n else:\n break\n print \"%d\"%n\nfor i in range(0,len(list)):\n if i<len(list)-1:\n sys.stdout.write(list[i]+\"*\")\n else:\n sys.stdout.write(list[i])\n\nh=100\nsum=0\nfor i in range(1,11):\n if i==1:\n print ''\n sum=sum+h\n h=h/2.0\n sum=sum+2*h\nprint h\nprint sum\n\n\n\n\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import numpy as np
import pandas as pd
import geopandas as gp
from sklearn.cluster import KMeans
import shapely
from descartes import PolygonPatch
# -- load the data
data = pd.read_csv('/scratch/share/gdobler/parqa/output/Tables/'
'ParkQualityScores/QualityArea_ZipCode_FiscalYears.csv')
zips = gp.GeoDataFrame.from_file('/scratch/share/gdobler/parqa/output/'
'ShapeData/ZIPCODE_Modified_Final.shp')
# -- prepare the data
cols = ['F2{0:03}'.format(i) for i in range(4,16)]
vals = data[cols].values
vals -=vals[:,np.newaxis].mean(-1)
vals /=vals[:,np.newaxis].std(-1)
# -- cluster
km = KMeans(n_clusters=5)
km.fit(vals)
# -- assign clusters to zips
zips['cluster'] = np.zeros(len(zips),dtype=int)-1
dzips = [i for i in data.ZIPCODE]
for ii in range(len(zips)):
tzip = int(zips.ZIPCODE[ii])
if tzip in dzips:
zips['cluster'][ii] = km.labels_[dzips.index(tzip)]
# -- assign color
zips['color'] = np.zeros(len(zips),dtype=str)
for tcluster in range(km.n_clusters):
print("tcluster = " + str(tcluster))
zips['color'][zips['cluster']==tcluster] = 'red'
zips['color'][zips['cluster']!=tcluster] = 'none'
# -- plot
close('all')
yrs = range(2004,2016)
fig, ax = plt.subplots(1,2,figsize=[10,5])
fig.set_facecolor('white')
ax[1].set_xlim([-74.26,-74.26+0.6])
ax[1].set_ylim([40.4,40.4+0.6])
ax[1].axis('off')
for ii in range(len(zips)):
geo = zips['geometry'][ii]
tzip = zips.ZIPCODE[ii]
if type(geo)==shapely.geometry.polygon.Polygon:
ax[1].add_patch(PolygonPatch(geo,fc=zips['color'][ii],
linewidth=0.2))
ax[0].plot(yrs,vals[km.labels_==tcluster].T,color='k',lw=0.1)
ax[0].plot(yrs,km.cluster_centers_[tcluster],color='indianred')
ax[0].set_title('Cluster {0}'.format(tcluster))
fig.canvas.draw()
fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster,
km.n_clusters),
clobber=True)
|
normal
|
{
"blob_id": "2c181a33c84ce262404c192abdc515924a1916a9",
"index": 6165,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nvals -= vals[:, np.newaxis].mean(-1)\nvals /= vals[:, np.newaxis].std(-1)\n<mask token>\nkm.fit(vals)\n<mask token>\nfor ii in range(len(zips)):\n tzip = int(zips.ZIPCODE[ii])\n if tzip in dzips:\n zips['cluster'][ii] = km.labels_[dzips.index(tzip)]\n<mask token>\nfor tcluster in range(km.n_clusters):\n print('tcluster = ' + str(tcluster))\n zips['color'][zips['cluster'] == tcluster] = 'red'\n zips['color'][zips['cluster'] != tcluster] = 'none'\n close('all')\n yrs = range(2004, 2016)\n fig, ax = plt.subplots(1, 2, figsize=[10, 5])\n fig.set_facecolor('white')\n ax[1].set_xlim([-74.26, -74.26 + 0.6])\n ax[1].set_ylim([40.4, 40.4 + 0.6])\n ax[1].axis('off')\n for ii in range(len(zips)):\n geo = zips['geometry'][ii]\n tzip = zips.ZIPCODE[ii]\n if type(geo) == shapely.geometry.polygon.Polygon:\n ax[1].add_patch(PolygonPatch(geo, fc=zips['color'][ii],\n linewidth=0.2))\n ax[0].plot(yrs, vals[km.labels_ == tcluster].T, color='k', lw=0.1)\n ax[0].plot(yrs, km.cluster_centers_[tcluster], color='indianred')\n ax[0].set_title('Cluster {0}'.format(tcluster))\n fig.canvas.draw()\n fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster, km.\n n_clusters), clobber=True)\n",
"step-3": "<mask token>\ndata = pd.read_csv(\n '/scratch/share/gdobler/parqa/output/Tables/ParkQualityScores/QualityArea_ZipCode_FiscalYears.csv'\n )\nzips = gp.GeoDataFrame.from_file(\n '/scratch/share/gdobler/parqa/output/ShapeData/ZIPCODE_Modified_Final.shp')\ncols = ['F2{0:03}'.format(i) for i in range(4, 16)]\nvals = data[cols].values\nvals -= vals[:, np.newaxis].mean(-1)\nvals /= vals[:, np.newaxis].std(-1)\nkm = KMeans(n_clusters=5)\nkm.fit(vals)\nzips['cluster'] = np.zeros(len(zips), dtype=int) - 1\ndzips = [i for i in data.ZIPCODE]\nfor ii in range(len(zips)):\n tzip = int(zips.ZIPCODE[ii])\n if tzip in dzips:\n zips['cluster'][ii] = km.labels_[dzips.index(tzip)]\nzips['color'] = np.zeros(len(zips), dtype=str)\nfor tcluster in range(km.n_clusters):\n print('tcluster = ' + str(tcluster))\n zips['color'][zips['cluster'] == tcluster] = 'red'\n zips['color'][zips['cluster'] != tcluster] = 'none'\n close('all')\n yrs = range(2004, 2016)\n fig, ax = plt.subplots(1, 2, figsize=[10, 5])\n fig.set_facecolor('white')\n ax[1].set_xlim([-74.26, -74.26 + 0.6])\n ax[1].set_ylim([40.4, 40.4 + 0.6])\n ax[1].axis('off')\n for ii in range(len(zips)):\n geo = zips['geometry'][ii]\n tzip = zips.ZIPCODE[ii]\n if type(geo) == shapely.geometry.polygon.Polygon:\n ax[1].add_patch(PolygonPatch(geo, fc=zips['color'][ii],\n linewidth=0.2))\n ax[0].plot(yrs, vals[km.labels_ == tcluster].T, color='k', lw=0.1)\n ax[0].plot(yrs, km.cluster_centers_[tcluster], color='indianred')\n ax[0].set_title('Cluster {0}'.format(tcluster))\n fig.canvas.draw()\n fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster, km.\n n_clusters), clobber=True)\n",
"step-4": "import numpy as np\nimport pandas as pd\nimport geopandas as gp\nfrom sklearn.cluster import KMeans\nimport shapely\nfrom descartes import PolygonPatch\ndata = pd.read_csv(\n '/scratch/share/gdobler/parqa/output/Tables/ParkQualityScores/QualityArea_ZipCode_FiscalYears.csv'\n )\nzips = gp.GeoDataFrame.from_file(\n '/scratch/share/gdobler/parqa/output/ShapeData/ZIPCODE_Modified_Final.shp')\ncols = ['F2{0:03}'.format(i) for i in range(4, 16)]\nvals = data[cols].values\nvals -= vals[:, np.newaxis].mean(-1)\nvals /= vals[:, np.newaxis].std(-1)\nkm = KMeans(n_clusters=5)\nkm.fit(vals)\nzips['cluster'] = np.zeros(len(zips), dtype=int) - 1\ndzips = [i for i in data.ZIPCODE]\nfor ii in range(len(zips)):\n tzip = int(zips.ZIPCODE[ii])\n if tzip in dzips:\n zips['cluster'][ii] = km.labels_[dzips.index(tzip)]\nzips['color'] = np.zeros(len(zips), dtype=str)\nfor tcluster in range(km.n_clusters):\n print('tcluster = ' + str(tcluster))\n zips['color'][zips['cluster'] == tcluster] = 'red'\n zips['color'][zips['cluster'] != tcluster] = 'none'\n close('all')\n yrs = range(2004, 2016)\n fig, ax = plt.subplots(1, 2, figsize=[10, 5])\n fig.set_facecolor('white')\n ax[1].set_xlim([-74.26, -74.26 + 0.6])\n ax[1].set_ylim([40.4, 40.4 + 0.6])\n ax[1].axis('off')\n for ii in range(len(zips)):\n geo = zips['geometry'][ii]\n tzip = zips.ZIPCODE[ii]\n if type(geo) == shapely.geometry.polygon.Polygon:\n ax[1].add_patch(PolygonPatch(geo, fc=zips['color'][ii],\n linewidth=0.2))\n ax[0].plot(yrs, vals[km.labels_ == tcluster].T, color='k', lw=0.1)\n ax[0].plot(yrs, km.cluster_centers_[tcluster], color='indianred')\n ax[0].set_title('Cluster {0}'.format(tcluster))\n fig.canvas.draw()\n fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster, km.\n n_clusters), clobber=True)\n",
"step-5": "import numpy as np\nimport pandas as pd\nimport geopandas as gp\nfrom sklearn.cluster import KMeans\nimport shapely\nfrom descartes import PolygonPatch\n\n\n# -- load the data\ndata = pd.read_csv('/scratch/share/gdobler/parqa/output/Tables/'\n 'ParkQualityScores/QualityArea_ZipCode_FiscalYears.csv')\n\nzips = gp.GeoDataFrame.from_file('/scratch/share/gdobler/parqa/output/'\n 'ShapeData/ZIPCODE_Modified_Final.shp')\n\n# -- prepare the data\ncols = ['F2{0:03}'.format(i) for i in range(4,16)]\nvals = data[cols].values\nvals -=vals[:,np.newaxis].mean(-1)\nvals /=vals[:,np.newaxis].std(-1)\n\n# -- cluster\nkm = KMeans(n_clusters=5)\nkm.fit(vals)\n\n# -- assign clusters to zips\nzips['cluster'] = np.zeros(len(zips),dtype=int)-1\ndzips = [i for i in data.ZIPCODE]\n\nfor ii in range(len(zips)):\n tzip = int(zips.ZIPCODE[ii])\n if tzip in dzips:\n zips['cluster'][ii] = km.labels_[dzips.index(tzip)]\n\n\n# -- assign color\nzips['color'] = np.zeros(len(zips),dtype=str)\nfor tcluster in range(km.n_clusters):\n print(\"tcluster = \" + str(tcluster))\n zips['color'][zips['cluster']==tcluster] = 'red'\n zips['color'][zips['cluster']!=tcluster] = 'none'\n\n # -- plot\n close('all')\n yrs = range(2004,2016)\n fig, ax = plt.subplots(1,2,figsize=[10,5])\n fig.set_facecolor('white')\n ax[1].set_xlim([-74.26,-74.26+0.6])\n ax[1].set_ylim([40.4,40.4+0.6])\n ax[1].axis('off')\n for ii in range(len(zips)):\n geo = zips['geometry'][ii]\n tzip = zips.ZIPCODE[ii]\n if type(geo)==shapely.geometry.polygon.Polygon:\n ax[1].add_patch(PolygonPatch(geo,fc=zips['color'][ii],\n linewidth=0.2))\n\n ax[0].plot(yrs,vals[km.labels_==tcluster].T,color='k',lw=0.1)\n ax[0].plot(yrs,km.cluster_centers_[tcluster],color='indianred')\n ax[0].set_title('Cluster {0}'.format(tcluster))\n fig.canvas.draw()\n fig.savefig('../Outputs/cluster_{0}_{1}.png'.format(tcluster,\n km.n_clusters),\n clobber=True)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# This script created by Joseph Aaron Campbell - 10/2020
""" With Help from Agisoft Forum @:
https://www.agisoft.com/forum/index.php?topic=12027.msg53791#msg53791
"""
""" Set up Working Environment """
# import Metashape library module
import Metashape
# create a reference to the current project via Document Class
doc = Metashape.app.document
# set reference for the currently active chunk
activeChunk = Metashape.app.document.chunk
# get the current Chunks label ( name )
currentChunkLabel = activeChunk.label
# get the current (saved) project's parent folder URL via python3 pathLib
# this path variable is used when exporting the 3D model later in the script.
# 'parent' will return the parent folder the project lives in
# 'name' will return the saved project name and extension
# 'stem' will return just the project name without extension
from pathlib import Path
parentFolderPath = str(Path(Metashape.app.document.path).parent)
print("parent Folder is : " + parentFolderPath)
# set reference to the output folders as string
outputFolder = Path(str(parentFolderPath) + "\\" + "_Output")
outputChunkFolder = Path(str(outputFolder) + "\\" + "_" + str(currentChunkLabel))
outputMaskfolder = Path(str(outputChunkFolder) + "\\" + "_Masks")
print("output folder: " + str(outputFolder))
print("output chunk folder: " + str(outputChunkFolder))
print("mask output folder is: " + str(outputMaskfolder))
# create an 'output' sub-folder for exported data from project
# also create sub-folder for model export within 'output' sub-folder
# this method will create the folder if doesnt exist, and also do nothing if it does exist
Path(outputFolder).mkdir(exist_ok=True)
Path(outputChunkFolder).mkdir(exist_ok=True)
Path(outputMaskfolder).mkdir(exist_ok=True)
# export masks to output mask folder
# this uses the Metashape Task class, otherwise loop through every camera in chunk and save mask as image file
# create a reference to the Tasks ExportMasks method
mask_task = Metashape.Tasks.ExportMasks()
# define which cameras to export masks for
mask_task.cameras = activeChunk.cameras
# define the output path for the exported mask files
mask_task.path = str(str(outputMaskfolder) + "\\" + "{filename}.png")
# activate the task for the active chunk to export the masks
mask_task.apply(object=activeChunk)
|
normal
|
{
"blob_id": "dcfc6d76730ba3b33e64cc8f2c166f739bbde5ff",
"index": 3655,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('parent Folder is : ' + parentFolderPath)\n<mask token>\nprint('output folder: ' + str(outputFolder))\nprint('output chunk folder: ' + str(outputChunkFolder))\nprint('mask output folder is: ' + str(outputMaskfolder))\nPath(outputFolder).mkdir(exist_ok=True)\nPath(outputChunkFolder).mkdir(exist_ok=True)\nPath(outputMaskfolder).mkdir(exist_ok=True)\n<mask token>\nmask_task.apply(object=activeChunk)\n",
"step-3": "<mask token>\ndoc = Metashape.app.document\nactiveChunk = Metashape.app.document.chunk\ncurrentChunkLabel = activeChunk.label\n<mask token>\nparentFolderPath = str(Path(Metashape.app.document.path).parent)\nprint('parent Folder is : ' + parentFolderPath)\noutputFolder = Path(str(parentFolderPath) + '\\\\' + '_Output')\noutputChunkFolder = Path(str(outputFolder) + '\\\\' + '_' + str(\n currentChunkLabel))\noutputMaskfolder = Path(str(outputChunkFolder) + '\\\\' + '_Masks')\nprint('output folder: ' + str(outputFolder))\nprint('output chunk folder: ' + str(outputChunkFolder))\nprint('mask output folder is: ' + str(outputMaskfolder))\nPath(outputFolder).mkdir(exist_ok=True)\nPath(outputChunkFolder).mkdir(exist_ok=True)\nPath(outputMaskfolder).mkdir(exist_ok=True)\nmask_task = Metashape.Tasks.ExportMasks()\nmask_task.cameras = activeChunk.cameras\nmask_task.path = str(str(outputMaskfolder) + '\\\\' + '{filename}.png')\nmask_task.apply(object=activeChunk)\n",
"step-4": "<mask token>\nimport Metashape\ndoc = Metashape.app.document\nactiveChunk = Metashape.app.document.chunk\ncurrentChunkLabel = activeChunk.label\nfrom pathlib import Path\nparentFolderPath = str(Path(Metashape.app.document.path).parent)\nprint('parent Folder is : ' + parentFolderPath)\noutputFolder = Path(str(parentFolderPath) + '\\\\' + '_Output')\noutputChunkFolder = Path(str(outputFolder) + '\\\\' + '_' + str(\n currentChunkLabel))\noutputMaskfolder = Path(str(outputChunkFolder) + '\\\\' + '_Masks')\nprint('output folder: ' + str(outputFolder))\nprint('output chunk folder: ' + str(outputChunkFolder))\nprint('mask output folder is: ' + str(outputMaskfolder))\nPath(outputFolder).mkdir(exist_ok=True)\nPath(outputChunkFolder).mkdir(exist_ok=True)\nPath(outputMaskfolder).mkdir(exist_ok=True)\nmask_task = Metashape.Tasks.ExportMasks()\nmask_task.cameras = activeChunk.cameras\nmask_task.path = str(str(outputMaskfolder) + '\\\\' + '{filename}.png')\nmask_task.apply(object=activeChunk)\n",
"step-5": "# This script created by Joseph Aaron Campbell - 10/2020\r\n\r\n\"\"\" With Help from Agisoft Forum @:\r\nhttps://www.agisoft.com/forum/index.php?topic=12027.msg53791#msg53791\r\n\"\"\"\r\n\r\n\"\"\" Set up Working Environment \"\"\"\r\n# import Metashape library module\r\nimport Metashape\r\n# create a reference to the current project via Document Class\r\ndoc = Metashape.app.document\r\n# set reference for the currently active chunk\r\nactiveChunk = Metashape.app.document.chunk\r\n\r\n# get the current Chunks label ( name )\r\ncurrentChunkLabel = activeChunk.label\r\n\r\n# get the current (saved) project's parent folder URL via python3 pathLib\r\n# this path variable is used when exporting the 3D model later in the script.\r\n# 'parent' will return the parent folder the project lives in\r\n# 'name' will return the saved project name and extension\r\n# 'stem' will return just the project name without extension\r\nfrom pathlib import Path\r\nparentFolderPath = str(Path(Metashape.app.document.path).parent)\r\nprint(\"parent Folder is : \" + parentFolderPath)\r\n\r\n# set reference to the output folders as string\r\noutputFolder = Path(str(parentFolderPath) + \"\\\\\" + \"_Output\")\r\noutputChunkFolder = Path(str(outputFolder) + \"\\\\\" + \"_\" + str(currentChunkLabel))\r\noutputMaskfolder = Path(str(outputChunkFolder) + \"\\\\\" + \"_Masks\")\r\n\r\nprint(\"output folder: \" + str(outputFolder))\r\nprint(\"output chunk folder: \" + str(outputChunkFolder))\r\nprint(\"mask output folder is: \" + str(outputMaskfolder))\r\n\r\n# create an 'output' sub-folder for exported data from project\r\n# also create sub-folder for model export within 'output' sub-folder\r\n# this method will create the folder if doesnt exist, and also do nothing if it does exist\r\nPath(outputFolder).mkdir(exist_ok=True)\r\nPath(outputChunkFolder).mkdir(exist_ok=True)\r\nPath(outputMaskfolder).mkdir(exist_ok=True)\r\n\r\n# export masks to output mask folder\r\n# this uses the Metashape Task class, otherwise loop through every camera in chunk and save mask as image file\r\n# create a reference to the Tasks ExportMasks method\r\nmask_task = Metashape.Tasks.ExportMasks()\r\n# define which cameras to export masks for\r\nmask_task.cameras = activeChunk.cameras\r\n# define the output path for the exported mask files\r\nmask_task.path = str(str(outputMaskfolder) + \"\\\\\" + \"{filename}.png\")\r\n# activate the task for the active chunk to export the masks\r\nmask_task.apply(object=activeChunk)\r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
class subset:
def __init__(self, weight, itemSet, size, setNum):
self.weight = weight
self.itemSet = itemSet
self.size = size
self.setNum = setNum
def findCover(base, arr):
uniq = [] #array that can be union
uni = [] #array has been unionized w/ base
if len(base.itemSet) == rangeOfVal:
# print("COVER:", base.itemSet)
return base
remain = rangeOfVal
# Search through arr to find all potential subsets
for i in arr:
# print("compare: ", i.itemSet)
if base.itemSet.isdisjoint(i.itemSet) == True:
# Unique array
uniq.append(i)
remain = remain - len(i.itemSet)
# print("uniq: ", len(uniq))
addedSub = subset(base.weight + i.weight,
base.itemSet.union(i.itemSet),
base.size + i.size,
str(base.setNum) + " " + str(i.setNum))
# Union array
uni.append(addedSub)
print("added:", addedSub.itemSet)
if addedSub.size == rangeOfVal:
# print("COVER:", addedSub.itemSet)
return addedSub
print()
for j in uni:
# print(j.setNum)
if remain == len(base.itemSet):
findCover(j, uniq)
# print("_____________________________NONE_______________________________")
return
# fileName="./inputs/input_group115.txt"
fileName="Input_attempt3.txt"
f=open(fileName, "r")
rangeOfVal=int(f.readline()) # n
numOfSub=int(f.readline()) # m
num=0
minWeight=500001
minCover=[]
subsetList=[]
# Loop to read through file and set up the data structures
# to hold all the values
while True:
itemSet=f.readline()
if itemSet == "":
break
else:
weight=int(f.readline())
arrItems=itemSet.split(" ")
i=0
# Convert each item into an int and delete any \n
for item in arrItems:
if item != "\n":
arrItems[i]=int(item)
i += 1
else:
arrItems.remove("\n")
arrItems.sort()
s=subset(weight, set(arrItems), len(arrItems), num)
subsetList.append(s)
num += 1
# print("---------------------------------------------")
# for s in subsetList:
# print(s.itemSet)
# print("---------------------------------------------")
covers = []
inc = 1
for base in subsetList:
# print()
print("base:", base.setNum)
o = findCover(base, subsetList[inc:len(subsetList)])
if o != None:
print("here!")
covers.append(o)
# print(o.setNum)
inc += 1
for w in covers:
if w.weight < minWeight:
minWeight = w.weight
# if type(s.setNum) == int: continue
# else: minCover = (s.setNum).split(" ").sort()
minCover = w.setNum
print(minWeight)
print(minCover)
# for cov in covers:
# print(cov.itemSet)
# #
|
normal
|
{
"blob_id": "b865c37623f405f67592d1eabc620d11ff87827e",
"index": 3378,
"step-1": "class subset:\n <mask token>\n\n\n<mask token>\n",
"step-2": "class subset:\n\n def __init__(self, weight, itemSet, size, setNum):\n self.weight = weight\n self.itemSet = itemSet\n self.size = size\n self.setNum = setNum\n\n\ndef findCover(base, arr):\n uniq = []\n uni = []\n if len(base.itemSet) == rangeOfVal:\n return base\n remain = rangeOfVal\n for i in arr:\n if base.itemSet.isdisjoint(i.itemSet) == True:\n uniq.append(i)\n remain = remain - len(i.itemSet)\n addedSub = subset(base.weight + i.weight, base.itemSet.union(i.\n itemSet), base.size + i.size, str(base.setNum) + ' ' + str(\n i.setNum))\n uni.append(addedSub)\n print('added:', addedSub.itemSet)\n if addedSub.size == rangeOfVal:\n return addedSub\n print()\n for j in uni:\n if remain == len(base.itemSet):\n findCover(j, uniq)\n return\n\n\n<mask token>\n",
"step-3": "class subset:\n\n def __init__(self, weight, itemSet, size, setNum):\n self.weight = weight\n self.itemSet = itemSet\n self.size = size\n self.setNum = setNum\n\n\ndef findCover(base, arr):\n uniq = []\n uni = []\n if len(base.itemSet) == rangeOfVal:\n return base\n remain = rangeOfVal\n for i in arr:\n if base.itemSet.isdisjoint(i.itemSet) == True:\n uniq.append(i)\n remain = remain - len(i.itemSet)\n addedSub = subset(base.weight + i.weight, base.itemSet.union(i.\n itemSet), base.size + i.size, str(base.setNum) + ' ' + str(\n i.setNum))\n uni.append(addedSub)\n print('added:', addedSub.itemSet)\n if addedSub.size == rangeOfVal:\n return addedSub\n print()\n for j in uni:\n if remain == len(base.itemSet):\n findCover(j, uniq)\n return\n\n\n<mask token>\nwhile True:\n itemSet = f.readline()\n if itemSet == '':\n break\n else:\n weight = int(f.readline())\n arrItems = itemSet.split(' ')\n i = 0\n for item in arrItems:\n if item != '\\n':\n arrItems[i] = int(item)\n i += 1\n else:\n arrItems.remove('\\n')\n arrItems.sort()\n s = subset(weight, set(arrItems), len(arrItems), num)\n subsetList.append(s)\n num += 1\n<mask token>\nfor base in subsetList:\n print('base:', base.setNum)\n o = findCover(base, subsetList[inc:len(subsetList)])\n if o != None:\n print('here!')\n covers.append(o)\n inc += 1\nfor w in covers:\n if w.weight < minWeight:\n minWeight = w.weight\n minCover = w.setNum\nprint(minWeight)\nprint(minCover)\n",
"step-4": "class subset:\n\n def __init__(self, weight, itemSet, size, setNum):\n self.weight = weight\n self.itemSet = itemSet\n self.size = size\n self.setNum = setNum\n\n\ndef findCover(base, arr):\n uniq = []\n uni = []\n if len(base.itemSet) == rangeOfVal:\n return base\n remain = rangeOfVal\n for i in arr:\n if base.itemSet.isdisjoint(i.itemSet) == True:\n uniq.append(i)\n remain = remain - len(i.itemSet)\n addedSub = subset(base.weight + i.weight, base.itemSet.union(i.\n itemSet), base.size + i.size, str(base.setNum) + ' ' + str(\n i.setNum))\n uni.append(addedSub)\n print('added:', addedSub.itemSet)\n if addedSub.size == rangeOfVal:\n return addedSub\n print()\n for j in uni:\n if remain == len(base.itemSet):\n findCover(j, uniq)\n return\n\n\nfileName = 'Input_attempt3.txt'\nf = open(fileName, 'r')\nrangeOfVal = int(f.readline())\nnumOfSub = int(f.readline())\nnum = 0\nminWeight = 500001\nminCover = []\nsubsetList = []\nwhile True:\n itemSet = f.readline()\n if itemSet == '':\n break\n else:\n weight = int(f.readline())\n arrItems = itemSet.split(' ')\n i = 0\n for item in arrItems:\n if item != '\\n':\n arrItems[i] = int(item)\n i += 1\n else:\n arrItems.remove('\\n')\n arrItems.sort()\n s = subset(weight, set(arrItems), len(arrItems), num)\n subsetList.append(s)\n num += 1\ncovers = []\ninc = 1\nfor base in subsetList:\n print('base:', base.setNum)\n o = findCover(base, subsetList[inc:len(subsetList)])\n if o != None:\n print('here!')\n covers.append(o)\n inc += 1\nfor w in covers:\n if w.weight < minWeight:\n minWeight = w.weight\n minCover = w.setNum\nprint(minWeight)\nprint(minCover)\n",
"step-5": "class subset:\n\tdef __init__(self, weight, itemSet, size, setNum):\n\t\tself.weight = weight\n\t\tself.itemSet = itemSet\n\t\tself.size = size\n\t\tself.setNum = setNum\n\n\ndef findCover(base, arr):\n\tuniq = [] #array that can be union\n\tuni = [] #array has been unionized w/ base\n\tif len(base.itemSet) == rangeOfVal:\n\t\t# print(\"COVER:\", base.itemSet)\n\t\treturn base\n\tremain = rangeOfVal\n\t# Search through arr to find all potential subsets\n\tfor i in arr:\n\t\t# print(\"compare: \", i.itemSet)\n\t\tif base.itemSet.isdisjoint(i.itemSet) == True:\n\t\t\t# Unique array\n\t\t\tuniq.append(i)\n\t\t\tremain = remain - len(i.itemSet)\n\t\t\t# print(\"uniq: \", len(uniq))\n\t\t\taddedSub = subset(base.weight + i.weight,\n\t\t\t\t\t\t\tbase.itemSet.union(i.itemSet),\n\t\t\t\t\t\t\tbase.size + i.size,\n\t\t\t\t\t\t\tstr(base.setNum) + \" \" + str(i.setNum))\n\t\t\t# Union array\n\t\t\tuni.append(addedSub)\n\t\t\tprint(\"added:\", addedSub.itemSet)\n\t\t\tif addedSub.size == rangeOfVal:\n\t\t\t\t# print(\"COVER:\", addedSub.itemSet)\n\t\t\t\treturn addedSub\n\tprint()\n\tfor j in uni:\n\t\t# print(j.setNum)\n\t\tif remain == len(base.itemSet):\n\t\t\tfindCover(j, uniq)\n\t# print(\"_____________________________NONE_______________________________\")\t\t \n\treturn\n\n\n\n# fileName=\"./inputs/input_group115.txt\"\nfileName=\"Input_attempt3.txt\"\nf=open(fileName, \"r\")\n\nrangeOfVal=int(f.readline()) # n\nnumOfSub=int(f.readline()) # m\nnum=0\nminWeight=500001\nminCover=[]\nsubsetList=[]\n# Loop to read through file and set up the data structures\n# to hold all the values\nwhile True:\n\titemSet=f.readline()\n\tif itemSet == \"\":\n\t\tbreak\n\telse:\n\t\tweight=int(f.readline())\n\t\tarrItems=itemSet.split(\" \")\n\t\ti=0\n\t\t# Convert each item into an int and delete any \\n\n\t\tfor item in arrItems:\n\t\t\tif item != \"\\n\":\n\t\t\t\tarrItems[i]=int(item)\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tarrItems.remove(\"\\n\")\n\t\tarrItems.sort()\n\t\ts=subset(weight, set(arrItems), len(arrItems), num)\n\t\tsubsetList.append(s)\n\tnum += 1\n\n# print(\"---------------------------------------------\")\n# for s in subsetList:\n# \tprint(s.itemSet)\n# print(\"---------------------------------------------\")\n\ncovers = []\ninc = 1\nfor base in subsetList:\n\t# print()\n\tprint(\"base:\", base.setNum)\n\to = findCover(base, subsetList[inc:len(subsetList)])\n\tif o != None:\n\t\tprint(\"here!\")\n\t\tcovers.append(o)\n\t\t# print(o.setNum)\n\tinc += 1\nfor w in covers:\n\tif w.weight < minWeight:\n\t\tminWeight = w.weight\n\t\t# if type(s.setNum) == int: continue\n\t\t# else: minCover = (s.setNum).split(\" \").sort()\n\t\tminCover = w.setNum\n\nprint(minWeight)\nprint(minCover)\n\n\n# for cov in covers:\n# \tprint(cov.itemSet)\n\n# # \n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
date = input()
if date == ("DEC 25") or date == ("OCT 31"):
print("yup")
else:
print("nope")
|
normal
|
{
"blob_id": "bc5b368a710b8dfc4492b996c42c46638e1f538c",
"index": 9811,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif date == 'DEC 25' or date == 'OCT 31':\n print('yup')\nelse:\n print('nope')\n",
"step-3": "date = input()\nif date == 'DEC 25' or date == 'OCT 31':\n print('yup')\nelse:\n print('nope')\n",
"step-4": "date = input()\nif date == (\"DEC 25\") or date == (\"OCT 31\"):\n print(\"yup\")\n\nelse:\n print(\"nope\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from pyspark import SparkContext
from pyspark.sql import SQLContext
from pyspark.sql.types import *
sc = SparkContext("local", "weblog app")
effective_care = sc.textFile('file:///data/exercise1/effective_care').map(lambda l:l.encode().split(',')).map(lambda x: (x[0], x[1:]))
procedure_care = effective_care.map(lambda p:(p[1][1], [p[0], p[1][2]]))
procedure_care_grouped = procedure_care.groupByKey()
def range_func(measures):
scores = []
for entry in measures:
try:
curr = int(entry[1])
except:
curr = None
if curr is not None:
scores.append(curr)
if len(scores) < 1:
return 0
return max(scores) - min(scores)
measure_dates = sc.textFile('file:///data/exercise1/measure_dates').map(lambda l:l.encode().split(',')).map(lambda x: (x[1], x[0]))
procedure_score_range = procedure_care_grouped.map(lambda p:(p[0], range_func(p[1]))).join(measure_dates)
sorted_ranges = procedure_score_range.sortBy(lambda x:x[1], False)
top = sorted_ranges.take(10)
print(top)
|
normal
|
{
"blob_id": "4c60fd123f591bf2a88ca0affe14a3c3ec0d3cf6",
"index": 60,
"step-1": "<mask token>\n\n\ndef range_func(measures):\n scores = []\n for entry in measures:\n try:\n curr = int(entry[1])\n except:\n curr = None\n if curr is not None:\n scores.append(curr)\n if len(scores) < 1:\n return 0\n return max(scores) - min(scores)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef range_func(measures):\n scores = []\n for entry in measures:\n try:\n curr = int(entry[1])\n except:\n curr = None\n if curr is not None:\n scores.append(curr)\n if len(scores) < 1:\n return 0\n return max(scores) - min(scores)\n\n\n<mask token>\nprint(top)\n",
"step-3": "<mask token>\nsc = SparkContext('local', 'weblog app')\neffective_care = sc.textFile('file:///data/exercise1/effective_care').map(\n lambda l: l.encode().split(',')).map(lambda x: (x[0], x[1:]))\nprocedure_care = effective_care.map(lambda p: (p[1][1], [p[0], p[1][2]]))\nprocedure_care_grouped = procedure_care.groupByKey()\n\n\ndef range_func(measures):\n scores = []\n for entry in measures:\n try:\n curr = int(entry[1])\n except:\n curr = None\n if curr is not None:\n scores.append(curr)\n if len(scores) < 1:\n return 0\n return max(scores) - min(scores)\n\n\nmeasure_dates = sc.textFile('file:///data/exercise1/measure_dates').map(lambda\n l: l.encode().split(',')).map(lambda x: (x[1], x[0]))\nprocedure_score_range = procedure_care_grouped.map(lambda p: (p[0],\n range_func(p[1]))).join(measure_dates)\nsorted_ranges = procedure_score_range.sortBy(lambda x: x[1], False)\ntop = sorted_ranges.take(10)\nprint(top)\n",
"step-4": "from pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.types import *\nsc = SparkContext('local', 'weblog app')\neffective_care = sc.textFile('file:///data/exercise1/effective_care').map(\n lambda l: l.encode().split(',')).map(lambda x: (x[0], x[1:]))\nprocedure_care = effective_care.map(lambda p: (p[1][1], [p[0], p[1][2]]))\nprocedure_care_grouped = procedure_care.groupByKey()\n\n\ndef range_func(measures):\n scores = []\n for entry in measures:\n try:\n curr = int(entry[1])\n except:\n curr = None\n if curr is not None:\n scores.append(curr)\n if len(scores) < 1:\n return 0\n return max(scores) - min(scores)\n\n\nmeasure_dates = sc.textFile('file:///data/exercise1/measure_dates').map(lambda\n l: l.encode().split(',')).map(lambda x: (x[1], x[0]))\nprocedure_score_range = procedure_care_grouped.map(lambda p: (p[0],\n range_func(p[1]))).join(measure_dates)\nsorted_ranges = procedure_score_range.sortBy(lambda x: x[1], False)\ntop = sorted_ranges.take(10)\nprint(top)\n",
"step-5": "from pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.types import *\nsc = SparkContext(\"local\", \"weblog app\")\n\neffective_care = sc.textFile('file:///data/exercise1/effective_care').map(lambda l:l.encode().split(',')).map(lambda x: (x[0], x[1:]))\nprocedure_care = effective_care.map(lambda p:(p[1][1], [p[0], p[1][2]]))\nprocedure_care_grouped = procedure_care.groupByKey()\n\ndef range_func(measures):\n\tscores = []\n\tfor entry in measures:\n\t\ttry:\n\t\t\tcurr = int(entry[1])\n\t\texcept:\n\t\t\tcurr = None\n\t\tif curr is not None:\n\t\t\tscores.append(curr)\n\tif len(scores) < 1:\n\t\treturn 0\n\treturn max(scores) - min(scores)\n\nmeasure_dates = sc.textFile('file:///data/exercise1/measure_dates').map(lambda l:l.encode().split(',')).map(lambda x: (x[1], x[0]))\nprocedure_score_range = procedure_care_grouped.map(lambda p:(p[0], range_func(p[1]))).join(measure_dates)\nsorted_ranges = procedure_score_range.sortBy(lambda x:x[1], False)\ntop = sorted_ranges.take(10)\nprint(top)\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from scipy.stats import mannwhitneyu
import matplotlib.patches as patches
import os
import numpy
import pandas
from matplotlib.gridspec import GridSpec
from scipy.cluster.hierarchy import fcluster, linkage, dendrogram
from scipy.spatial.distance import squareform
import seaborn as sns
from scipy.stats import spearmanr
from statsmodels.stats.multitest import multipletests
import matplotlib.pyplot as plt
from config import base_path, out_path
MIN_OLIS = 200
THROW_BAD_OLIS = True
MIN_APPEAR = 0.02
CLUST_TH = 0.7
MIN_CLUST = 10
def get_clusters(link, dn, inds, th=0.7):
clst = fcluster(link, criterion='distance', t=th)
return pandas.Series(index=inds, data=clst).iloc[dn['leaves']]
def draw_significant_groups(groups, dn_ax, color='white'):
# Draw boxes around clusters
for group in groups:
rect = patches.Rectangle((group[0][0], group[1][0]), group[0][1] - group[0][0], group[1][1] - group[1][0],
linewidth=1, edgecolor=color, facecolor='none')
dn_ax.add_patch(rect)
def draw_legume_group(group, ax):
y_values = ax.get_ylim()
x_values = ax.get_xlim()
rect = patches.Rectangle((0, 0), x_values[1], group[0], linewidth=1, edgecolor='white',
facecolor='white', alpha=0.6)
ax.add_patch(rect)
rect = patches.Rectangle((0, group[1]), x_values[1], y_values[0] - group[1], linewidth=1, edgecolor='white',
facecolor='white', alpha=0.6)
ax.add_patch(rect)
def get_groups(clst, clust_above=MIN_CLUST):
groups = []
v = -1
for i in range(len(clst)):
if clst[i] == v:
continue
if v == -1:
groups.append([i])
v = clst[i]
continue
if (i - groups[-1][0]) >= clust_above:
groups[-1].append(i)
groups.append([i])
else:
groups[-1][0] = i
v = clst[i]
groups = groups[:-1]
return groups
if __name__ == "__main__":
os.makedirs(out_path, exist_ok=True)
df_info = pandas.read_csv(os.path.join(base_path, "library_contents.csv"), index_col=0, low_memory=False)
df_info = df_info[df_info.is_allergens & (df_info['num_copy'] == 1)]
inds = df_info.index
l_base = len(inds)
meta_df = pandas.read_csv(os.path.join(base_path, "cohort.csv"), index_col=0, low_memory=False)
meta_df = meta_df[(meta_df.timepoint == 1) & (meta_df.num_passed >= MIN_OLIS)]
fold_df = pandas.read_csv(os.path.join(base_path, "fold_data.csv"), index_col=[0, 1],
low_memory=False).loc[meta_df.index].unstack()
fold_df.columns = fold_df.columns.get_level_values(1)
fold_df = fold_df[fold_df.columns.intersection(inds)]
if THROW_BAD_OLIS:
drop = fold_df.columns[(fold_df == -1).sum() > 0]
fold_df = fold_df[fold_df.columns.difference(drop)].fillna(1)
inds = df_info.index.difference(drop)
df_info = df_info.loc[inds]
fold_df = fold_df[fold_df.columns[(fold_df > 1).sum() > (MIN_APPEAR * len(fold_df))]]
fold_df = numpy.log(fold_df.fillna(1))
df_info = df_info.loc[fold_df.columns]
th = CLUST_TH
# Oligos level correlations
corr = fold_df.corr('spearman')
link = linkage(squareform(1 - corr), method='average')
dn = dendrogram(link, no_plot=True)
clst = get_clusters(link, dn, corr.columns, th)
groups = get_groups(clst)
# Samples level correlations
corr1 = fold_df.T.corr('spearman')
link1 = linkage(squareform(1 - corr1), method='average')
dn1 = dendrogram(link1, no_plot=True)
clst1 = get_clusters(link1, dn1, corr1.columns, th)
groups1 = get_groups(clst1)
# Define figure
fig = plt.figure(figsize=[9.2, 12])
gs = GridSpec(1, 3, width_ratios=[0.2, 3, 1])
# Plot heatmap
bar_ax = fig.add_subplot(gs[0])
dendogram_ax = fig.add_subplot(gs[1])
sns.heatmap(fold_df.iloc[dn1['leaves'], dn['leaves']], cmap=sns.color_palette('flare', as_cmap=True),
ax=dendogram_ax, yticklabels=False, xticklabels=False, cbar_ax=bar_ax)
dendogram_ax.set_xlabel("oligos")
dendogram_ax.set_ylabel("samples")
# Plot sample level bars
mt = 'normalized mt_1342'
bar_axis1 = fig.add_subplot(gs[2], sharey=dendogram_ax)
meta_df['yob'] = (meta_df['yob'] - 1944) / 60
use_columns = ['gender', 'yob']
sample_extra_info = pandas.merge(meta_df[use_columns], meta_df[mt], left_index=True,
right_index=True, how='left')
sample_extra_info[mt] = ((sample_extra_info[mt] - sample_extra_info[mt].min()) /
(sample_extra_info[mt].max() - sample_extra_info[mt].min())).astype(float)
sample_extra_info.rename(columns={mt: 'norm mt_1342'}, inplace=True)
mt = 'norm mt_1342'
sample_extra_info = sample_extra_info.iloc[dn1['leaves']]
sns.heatmap(data=sample_extra_info, xticklabels=sample_extra_info.columns, yticklabels=False,
ax=bar_axis1, cmap=sns.color_palette("viridis", as_cmap=True))
# Compute significant shared groups
fold_df = fold_df.iloc[dn1['leaves'], dn['leaves']].copy()
significant_groups = []
for oligo_subgroup in groups:
sample_group_means = sorted(enumerate(
[fold_df.iloc[range(*sample_group), range(*oligo_subgroup)].mean().mean() for sample_group in groups1]),
key=lambda x: -x[1])
if sample_group_means[0][1] > 2 * sample_group_means[1][1]:
significant_groups.append([oligo_subgroup, groups1[sample_group_means[0][0]]])
draw_significant_groups(significant_groups, dendogram_ax)
mt_scores = pandas.Series([mannwhitneyu(sample_extra_info.iloc[range(*sample_group)][mt].dropna(),
sample_extra_info.iloc[list(range(0, sample_group[0])) +
list(range(sample_group[1], len(sample_extra_info)))]
[mt].dropna())[1]
for oligos_group, sample_group in significant_groups])
mt_group = significant_groups[mt_scores.idxmin()]
mt_pval = mt_scores.min()
draw_significant_groups([mt_group], dendogram_ax, color='blue')
draw_legume_group(mt_group[1], bar_axis1)
plt.suptitle('For group marked in blue the %s level\nof samples in group vs those not in group\n' % mt +
'got MW p-value of %g' % mt_pval)
plt.savefig(os.path.join(out_path, "legumes.png"))
res = {}
inds = sample_extra_info[mt].dropna().index
for i in range(*mt_group[0]):
col = fold_df.columns[i]
res[col] = spearmanr(sample_extra_info.loc[inds][mt], fold_df.loc[inds, col].values)
res = pandas.DataFrame(res, index=['stat', 'pval']).T.sort_values('pval')
res["Bonf"] = res['pval'] * len(res)
FDR = multipletests(res.pval.values.tolist(), method='fdr_by')
res["FDR_BY"] = FDR[0]
res['FDR_BY_qval'] = FDR[1]
FDR = multipletests(res.pval.values.tolist(), method='fdr_bh')
res["FDR_BH"] = FDR[0]
res['FDR_BH_qval'] = FDR[1]
res['allergens_common_name'] = df_info.loc[res.index].allergens_common_name
print("Of %d oligos in the blue group %d pass FDR (BY) vs %s" % (len(res), len(res[res.FDR_BY]), mt))
res.to_csv(os.path.join(out_path, "mt_1342.csv"))
|
normal
|
{
"blob_id": "bfd31d0b80511721ee5117daced04eaf63679fd8",
"index": 2230,
"step-1": "<mask token>\n\n\ndef get_clusters(link, dn, inds, th=0.7):\n clst = fcluster(link, criterion='distance', t=th)\n return pandas.Series(index=inds, data=clst).iloc[dn['leaves']]\n\n\ndef draw_significant_groups(groups, dn_ax, color='white'):\n for group in groups:\n rect = patches.Rectangle((group[0][0], group[1][0]), group[0][1] -\n group[0][0], group[1][1] - group[1][0], linewidth=1, edgecolor=\n color, facecolor='none')\n dn_ax.add_patch(rect)\n\n\ndef draw_legume_group(group, ax):\n y_values = ax.get_ylim()\n x_values = ax.get_xlim()\n rect = patches.Rectangle((0, 0), x_values[1], group[0], linewidth=1,\n edgecolor='white', facecolor='white', alpha=0.6)\n ax.add_patch(rect)\n rect = patches.Rectangle((0, group[1]), x_values[1], y_values[0] -\n group[1], linewidth=1, edgecolor='white', facecolor='white', alpha=0.6)\n ax.add_patch(rect)\n\n\ndef get_groups(clst, clust_above=MIN_CLUST):\n groups = []\n v = -1\n for i in range(len(clst)):\n if clst[i] == v:\n continue\n if v == -1:\n groups.append([i])\n v = clst[i]\n continue\n if i - groups[-1][0] >= clust_above:\n groups[-1].append(i)\n groups.append([i])\n else:\n groups[-1][0] = i\n v = clst[i]\n groups = groups[:-1]\n return groups\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_clusters(link, dn, inds, th=0.7):\n clst = fcluster(link, criterion='distance', t=th)\n return pandas.Series(index=inds, data=clst).iloc[dn['leaves']]\n\n\ndef draw_significant_groups(groups, dn_ax, color='white'):\n for group in groups:\n rect = patches.Rectangle((group[0][0], group[1][0]), group[0][1] -\n group[0][0], group[1][1] - group[1][0], linewidth=1, edgecolor=\n color, facecolor='none')\n dn_ax.add_patch(rect)\n\n\ndef draw_legume_group(group, ax):\n y_values = ax.get_ylim()\n x_values = ax.get_xlim()\n rect = patches.Rectangle((0, 0), x_values[1], group[0], linewidth=1,\n edgecolor='white', facecolor='white', alpha=0.6)\n ax.add_patch(rect)\n rect = patches.Rectangle((0, group[1]), x_values[1], y_values[0] -\n group[1], linewidth=1, edgecolor='white', facecolor='white', alpha=0.6)\n ax.add_patch(rect)\n\n\ndef get_groups(clst, clust_above=MIN_CLUST):\n groups = []\n v = -1\n for i in range(len(clst)):\n if clst[i] == v:\n continue\n if v == -1:\n groups.append([i])\n v = clst[i]\n continue\n if i - groups[-1][0] >= clust_above:\n groups[-1].append(i)\n groups.append([i])\n else:\n groups[-1][0] = i\n v = clst[i]\n groups = groups[:-1]\n return groups\n\n\nif __name__ == '__main__':\n os.makedirs(out_path, exist_ok=True)\n df_info = pandas.read_csv(os.path.join(base_path,\n 'library_contents.csv'), index_col=0, low_memory=False)\n df_info = df_info[df_info.is_allergens & (df_info['num_copy'] == 1)]\n inds = df_info.index\n l_base = len(inds)\n meta_df = pandas.read_csv(os.path.join(base_path, 'cohort.csv'),\n index_col=0, low_memory=False)\n meta_df = meta_df[(meta_df.timepoint == 1) & (meta_df.num_passed >=\n MIN_OLIS)]\n fold_df = pandas.read_csv(os.path.join(base_path, 'fold_data.csv'),\n index_col=[0, 1], low_memory=False).loc[meta_df.index].unstack()\n fold_df.columns = fold_df.columns.get_level_values(1)\n fold_df = fold_df[fold_df.columns.intersection(inds)]\n if THROW_BAD_OLIS:\n drop = fold_df.columns[(fold_df == -1).sum() > 0]\n fold_df = fold_df[fold_df.columns.difference(drop)].fillna(1)\n inds = df_info.index.difference(drop)\n df_info = df_info.loc[inds]\n fold_df = fold_df[fold_df.columns[(fold_df > 1).sum() > MIN_APPEAR *\n len(fold_df)]]\n fold_df = numpy.log(fold_df.fillna(1))\n df_info = df_info.loc[fold_df.columns]\n th = CLUST_TH\n corr = fold_df.corr('spearman')\n link = linkage(squareform(1 - corr), method='average')\n dn = dendrogram(link, no_plot=True)\n clst = get_clusters(link, dn, corr.columns, th)\n groups = get_groups(clst)\n corr1 = fold_df.T.corr('spearman')\n link1 = linkage(squareform(1 - corr1), method='average')\n dn1 = dendrogram(link1, no_plot=True)\n clst1 = get_clusters(link1, dn1, corr1.columns, th)\n groups1 = get_groups(clst1)\n fig = plt.figure(figsize=[9.2, 12])\n gs = GridSpec(1, 3, width_ratios=[0.2, 3, 1])\n bar_ax = fig.add_subplot(gs[0])\n dendogram_ax = fig.add_subplot(gs[1])\n sns.heatmap(fold_df.iloc[dn1['leaves'], dn['leaves']], cmap=sns.\n color_palette('flare', as_cmap=True), ax=dendogram_ax, yticklabels=\n False, xticklabels=False, cbar_ax=bar_ax)\n dendogram_ax.set_xlabel('oligos')\n dendogram_ax.set_ylabel('samples')\n mt = 'normalized mt_1342'\n bar_axis1 = fig.add_subplot(gs[2], sharey=dendogram_ax)\n meta_df['yob'] = (meta_df['yob'] - 1944) / 60\n use_columns = ['gender', 'yob']\n sample_extra_info = pandas.merge(meta_df[use_columns], meta_df[mt],\n left_index=True, right_index=True, how='left')\n sample_extra_info[mt] = ((sample_extra_info[mt] - sample_extra_info[mt]\n .min()) / (sample_extra_info[mt].max() - sample_extra_info[mt].min())\n ).astype(float)\n sample_extra_info.rename(columns={mt: 'norm mt_1342'}, inplace=True)\n mt = 'norm mt_1342'\n sample_extra_info = sample_extra_info.iloc[dn1['leaves']]\n sns.heatmap(data=sample_extra_info, xticklabels=sample_extra_info.\n columns, yticklabels=False, ax=bar_axis1, cmap=sns.color_palette(\n 'viridis', as_cmap=True))\n fold_df = fold_df.iloc[dn1['leaves'], dn['leaves']].copy()\n significant_groups = []\n for oligo_subgroup in groups:\n sample_group_means = sorted(enumerate([fold_df.iloc[range(*\n sample_group), range(*oligo_subgroup)].mean().mean() for\n sample_group in groups1]), key=lambda x: -x[1])\n if sample_group_means[0][1] > 2 * sample_group_means[1][1]:\n significant_groups.append([oligo_subgroup, groups1[\n sample_group_means[0][0]]])\n draw_significant_groups(significant_groups, dendogram_ax)\n mt_scores = pandas.Series([mannwhitneyu(sample_extra_info.iloc[range(*\n sample_group)][mt].dropna(), sample_extra_info.iloc[list(range(0,\n sample_group[0])) + list(range(sample_group[1], len(\n sample_extra_info)))][mt].dropna())[1] for oligos_group,\n sample_group in significant_groups])\n mt_group = significant_groups[mt_scores.idxmin()]\n mt_pval = mt_scores.min()\n draw_significant_groups([mt_group], dendogram_ax, color='blue')\n draw_legume_group(mt_group[1], bar_axis1)\n plt.suptitle(\n \"\"\"For group marked in blue the %s level\nof samples in group vs those not in group\n\"\"\"\n % mt + 'got MW p-value of %g' % mt_pval)\n plt.savefig(os.path.join(out_path, 'legumes.png'))\n res = {}\n inds = sample_extra_info[mt].dropna().index\n for i in range(*mt_group[0]):\n col = fold_df.columns[i]\n res[col] = spearmanr(sample_extra_info.loc[inds][mt], fold_df.loc[\n inds, col].values)\n res = pandas.DataFrame(res, index=['stat', 'pval']).T.sort_values('pval')\n res['Bonf'] = res['pval'] * len(res)\n FDR = multipletests(res.pval.values.tolist(), method='fdr_by')\n res['FDR_BY'] = FDR[0]\n res['FDR_BY_qval'] = FDR[1]\n FDR = multipletests(res.pval.values.tolist(), method='fdr_bh')\n res['FDR_BH'] = FDR[0]\n res['FDR_BH_qval'] = FDR[1]\n res['allergens_common_name'] = df_info.loc[res.index].allergens_common_name\n print('Of %d oligos in the blue group %d pass FDR (BY) vs %s' % (len(\n res), len(res[res.FDR_BY]), mt))\n res.to_csv(os.path.join(out_path, 'mt_1342.csv'))\n",
"step-3": "<mask token>\nMIN_OLIS = 200\nTHROW_BAD_OLIS = True\nMIN_APPEAR = 0.02\nCLUST_TH = 0.7\nMIN_CLUST = 10\n\n\ndef get_clusters(link, dn, inds, th=0.7):\n clst = fcluster(link, criterion='distance', t=th)\n return pandas.Series(index=inds, data=clst).iloc[dn['leaves']]\n\n\ndef draw_significant_groups(groups, dn_ax, color='white'):\n for group in groups:\n rect = patches.Rectangle((group[0][0], group[1][0]), group[0][1] -\n group[0][0], group[1][1] - group[1][0], linewidth=1, edgecolor=\n color, facecolor='none')\n dn_ax.add_patch(rect)\n\n\ndef draw_legume_group(group, ax):\n y_values = ax.get_ylim()\n x_values = ax.get_xlim()\n rect = patches.Rectangle((0, 0), x_values[1], group[0], linewidth=1,\n edgecolor='white', facecolor='white', alpha=0.6)\n ax.add_patch(rect)\n rect = patches.Rectangle((0, group[1]), x_values[1], y_values[0] -\n group[1], linewidth=1, edgecolor='white', facecolor='white', alpha=0.6)\n ax.add_patch(rect)\n\n\ndef get_groups(clst, clust_above=MIN_CLUST):\n groups = []\n v = -1\n for i in range(len(clst)):\n if clst[i] == v:\n continue\n if v == -1:\n groups.append([i])\n v = clst[i]\n continue\n if i - groups[-1][0] >= clust_above:\n groups[-1].append(i)\n groups.append([i])\n else:\n groups[-1][0] = i\n v = clst[i]\n groups = groups[:-1]\n return groups\n\n\nif __name__ == '__main__':\n os.makedirs(out_path, exist_ok=True)\n df_info = pandas.read_csv(os.path.join(base_path,\n 'library_contents.csv'), index_col=0, low_memory=False)\n df_info = df_info[df_info.is_allergens & (df_info['num_copy'] == 1)]\n inds = df_info.index\n l_base = len(inds)\n meta_df = pandas.read_csv(os.path.join(base_path, 'cohort.csv'),\n index_col=0, low_memory=False)\n meta_df = meta_df[(meta_df.timepoint == 1) & (meta_df.num_passed >=\n MIN_OLIS)]\n fold_df = pandas.read_csv(os.path.join(base_path, 'fold_data.csv'),\n index_col=[0, 1], low_memory=False).loc[meta_df.index].unstack()\n fold_df.columns = fold_df.columns.get_level_values(1)\n fold_df = fold_df[fold_df.columns.intersection(inds)]\n if THROW_BAD_OLIS:\n drop = fold_df.columns[(fold_df == -1).sum() > 0]\n fold_df = fold_df[fold_df.columns.difference(drop)].fillna(1)\n inds = df_info.index.difference(drop)\n df_info = df_info.loc[inds]\n fold_df = fold_df[fold_df.columns[(fold_df > 1).sum() > MIN_APPEAR *\n len(fold_df)]]\n fold_df = numpy.log(fold_df.fillna(1))\n df_info = df_info.loc[fold_df.columns]\n th = CLUST_TH\n corr = fold_df.corr('spearman')\n link = linkage(squareform(1 - corr), method='average')\n dn = dendrogram(link, no_plot=True)\n clst = get_clusters(link, dn, corr.columns, th)\n groups = get_groups(clst)\n corr1 = fold_df.T.corr('spearman')\n link1 = linkage(squareform(1 - corr1), method='average')\n dn1 = dendrogram(link1, no_plot=True)\n clst1 = get_clusters(link1, dn1, corr1.columns, th)\n groups1 = get_groups(clst1)\n fig = plt.figure(figsize=[9.2, 12])\n gs = GridSpec(1, 3, width_ratios=[0.2, 3, 1])\n bar_ax = fig.add_subplot(gs[0])\n dendogram_ax = fig.add_subplot(gs[1])\n sns.heatmap(fold_df.iloc[dn1['leaves'], dn['leaves']], cmap=sns.\n color_palette('flare', as_cmap=True), ax=dendogram_ax, yticklabels=\n False, xticklabels=False, cbar_ax=bar_ax)\n dendogram_ax.set_xlabel('oligos')\n dendogram_ax.set_ylabel('samples')\n mt = 'normalized mt_1342'\n bar_axis1 = fig.add_subplot(gs[2], sharey=dendogram_ax)\n meta_df['yob'] = (meta_df['yob'] - 1944) / 60\n use_columns = ['gender', 'yob']\n sample_extra_info = pandas.merge(meta_df[use_columns], meta_df[mt],\n left_index=True, right_index=True, how='left')\n sample_extra_info[mt] = ((sample_extra_info[mt] - sample_extra_info[mt]\n .min()) / (sample_extra_info[mt].max() - sample_extra_info[mt].min())\n ).astype(float)\n sample_extra_info.rename(columns={mt: 'norm mt_1342'}, inplace=True)\n mt = 'norm mt_1342'\n sample_extra_info = sample_extra_info.iloc[dn1['leaves']]\n sns.heatmap(data=sample_extra_info, xticklabels=sample_extra_info.\n columns, yticklabels=False, ax=bar_axis1, cmap=sns.color_palette(\n 'viridis', as_cmap=True))\n fold_df = fold_df.iloc[dn1['leaves'], dn['leaves']].copy()\n significant_groups = []\n for oligo_subgroup in groups:\n sample_group_means = sorted(enumerate([fold_df.iloc[range(*\n sample_group), range(*oligo_subgroup)].mean().mean() for\n sample_group in groups1]), key=lambda x: -x[1])\n if sample_group_means[0][1] > 2 * sample_group_means[1][1]:\n significant_groups.append([oligo_subgroup, groups1[\n sample_group_means[0][0]]])\n draw_significant_groups(significant_groups, dendogram_ax)\n mt_scores = pandas.Series([mannwhitneyu(sample_extra_info.iloc[range(*\n sample_group)][mt].dropna(), sample_extra_info.iloc[list(range(0,\n sample_group[0])) + list(range(sample_group[1], len(\n sample_extra_info)))][mt].dropna())[1] for oligos_group,\n sample_group in significant_groups])\n mt_group = significant_groups[mt_scores.idxmin()]\n mt_pval = mt_scores.min()\n draw_significant_groups([mt_group], dendogram_ax, color='blue')\n draw_legume_group(mt_group[1], bar_axis1)\n plt.suptitle(\n \"\"\"For group marked in blue the %s level\nof samples in group vs those not in group\n\"\"\"\n % mt + 'got MW p-value of %g' % mt_pval)\n plt.savefig(os.path.join(out_path, 'legumes.png'))\n res = {}\n inds = sample_extra_info[mt].dropna().index\n for i in range(*mt_group[0]):\n col = fold_df.columns[i]\n res[col] = spearmanr(sample_extra_info.loc[inds][mt], fold_df.loc[\n inds, col].values)\n res = pandas.DataFrame(res, index=['stat', 'pval']).T.sort_values('pval')\n res['Bonf'] = res['pval'] * len(res)\n FDR = multipletests(res.pval.values.tolist(), method='fdr_by')\n res['FDR_BY'] = FDR[0]\n res['FDR_BY_qval'] = FDR[1]\n FDR = multipletests(res.pval.values.tolist(), method='fdr_bh')\n res['FDR_BH'] = FDR[0]\n res['FDR_BH_qval'] = FDR[1]\n res['allergens_common_name'] = df_info.loc[res.index].allergens_common_name\n print('Of %d oligos in the blue group %d pass FDR (BY) vs %s' % (len(\n res), len(res[res.FDR_BY]), mt))\n res.to_csv(os.path.join(out_path, 'mt_1342.csv'))\n",
"step-4": "from scipy.stats import mannwhitneyu\nimport matplotlib.patches as patches\nimport os\nimport numpy\nimport pandas\nfrom matplotlib.gridspec import GridSpec\nfrom scipy.cluster.hierarchy import fcluster, linkage, dendrogram\nfrom scipy.spatial.distance import squareform\nimport seaborn as sns\nfrom scipy.stats import spearmanr\nfrom statsmodels.stats.multitest import multipletests\nimport matplotlib.pyplot as plt\nfrom config import base_path, out_path\nMIN_OLIS = 200\nTHROW_BAD_OLIS = True\nMIN_APPEAR = 0.02\nCLUST_TH = 0.7\nMIN_CLUST = 10\n\n\ndef get_clusters(link, dn, inds, th=0.7):\n clst = fcluster(link, criterion='distance', t=th)\n return pandas.Series(index=inds, data=clst).iloc[dn['leaves']]\n\n\ndef draw_significant_groups(groups, dn_ax, color='white'):\n for group in groups:\n rect = patches.Rectangle((group[0][0], group[1][0]), group[0][1] -\n group[0][0], group[1][1] - group[1][0], linewidth=1, edgecolor=\n color, facecolor='none')\n dn_ax.add_patch(rect)\n\n\ndef draw_legume_group(group, ax):\n y_values = ax.get_ylim()\n x_values = ax.get_xlim()\n rect = patches.Rectangle((0, 0), x_values[1], group[0], linewidth=1,\n edgecolor='white', facecolor='white', alpha=0.6)\n ax.add_patch(rect)\n rect = patches.Rectangle((0, group[1]), x_values[1], y_values[0] -\n group[1], linewidth=1, edgecolor='white', facecolor='white', alpha=0.6)\n ax.add_patch(rect)\n\n\ndef get_groups(clst, clust_above=MIN_CLUST):\n groups = []\n v = -1\n for i in range(len(clst)):\n if clst[i] == v:\n continue\n if v == -1:\n groups.append([i])\n v = clst[i]\n continue\n if i - groups[-1][0] >= clust_above:\n groups[-1].append(i)\n groups.append([i])\n else:\n groups[-1][0] = i\n v = clst[i]\n groups = groups[:-1]\n return groups\n\n\nif __name__ == '__main__':\n os.makedirs(out_path, exist_ok=True)\n df_info = pandas.read_csv(os.path.join(base_path,\n 'library_contents.csv'), index_col=0, low_memory=False)\n df_info = df_info[df_info.is_allergens & (df_info['num_copy'] == 1)]\n inds = df_info.index\n l_base = len(inds)\n meta_df = pandas.read_csv(os.path.join(base_path, 'cohort.csv'),\n index_col=0, low_memory=False)\n meta_df = meta_df[(meta_df.timepoint == 1) & (meta_df.num_passed >=\n MIN_OLIS)]\n fold_df = pandas.read_csv(os.path.join(base_path, 'fold_data.csv'),\n index_col=[0, 1], low_memory=False).loc[meta_df.index].unstack()\n fold_df.columns = fold_df.columns.get_level_values(1)\n fold_df = fold_df[fold_df.columns.intersection(inds)]\n if THROW_BAD_OLIS:\n drop = fold_df.columns[(fold_df == -1).sum() > 0]\n fold_df = fold_df[fold_df.columns.difference(drop)].fillna(1)\n inds = df_info.index.difference(drop)\n df_info = df_info.loc[inds]\n fold_df = fold_df[fold_df.columns[(fold_df > 1).sum() > MIN_APPEAR *\n len(fold_df)]]\n fold_df = numpy.log(fold_df.fillna(1))\n df_info = df_info.loc[fold_df.columns]\n th = CLUST_TH\n corr = fold_df.corr('spearman')\n link = linkage(squareform(1 - corr), method='average')\n dn = dendrogram(link, no_plot=True)\n clst = get_clusters(link, dn, corr.columns, th)\n groups = get_groups(clst)\n corr1 = fold_df.T.corr('spearman')\n link1 = linkage(squareform(1 - corr1), method='average')\n dn1 = dendrogram(link1, no_plot=True)\n clst1 = get_clusters(link1, dn1, corr1.columns, th)\n groups1 = get_groups(clst1)\n fig = plt.figure(figsize=[9.2, 12])\n gs = GridSpec(1, 3, width_ratios=[0.2, 3, 1])\n bar_ax = fig.add_subplot(gs[0])\n dendogram_ax = fig.add_subplot(gs[1])\n sns.heatmap(fold_df.iloc[dn1['leaves'], dn['leaves']], cmap=sns.\n color_palette('flare', as_cmap=True), ax=dendogram_ax, yticklabels=\n False, xticklabels=False, cbar_ax=bar_ax)\n dendogram_ax.set_xlabel('oligos')\n dendogram_ax.set_ylabel('samples')\n mt = 'normalized mt_1342'\n bar_axis1 = fig.add_subplot(gs[2], sharey=dendogram_ax)\n meta_df['yob'] = (meta_df['yob'] - 1944) / 60\n use_columns = ['gender', 'yob']\n sample_extra_info = pandas.merge(meta_df[use_columns], meta_df[mt],\n left_index=True, right_index=True, how='left')\n sample_extra_info[mt] = ((sample_extra_info[mt] - sample_extra_info[mt]\n .min()) / (sample_extra_info[mt].max() - sample_extra_info[mt].min())\n ).astype(float)\n sample_extra_info.rename(columns={mt: 'norm mt_1342'}, inplace=True)\n mt = 'norm mt_1342'\n sample_extra_info = sample_extra_info.iloc[dn1['leaves']]\n sns.heatmap(data=sample_extra_info, xticklabels=sample_extra_info.\n columns, yticklabels=False, ax=bar_axis1, cmap=sns.color_palette(\n 'viridis', as_cmap=True))\n fold_df = fold_df.iloc[dn1['leaves'], dn['leaves']].copy()\n significant_groups = []\n for oligo_subgroup in groups:\n sample_group_means = sorted(enumerate([fold_df.iloc[range(*\n sample_group), range(*oligo_subgroup)].mean().mean() for\n sample_group in groups1]), key=lambda x: -x[1])\n if sample_group_means[0][1] > 2 * sample_group_means[1][1]:\n significant_groups.append([oligo_subgroup, groups1[\n sample_group_means[0][0]]])\n draw_significant_groups(significant_groups, dendogram_ax)\n mt_scores = pandas.Series([mannwhitneyu(sample_extra_info.iloc[range(*\n sample_group)][mt].dropna(), sample_extra_info.iloc[list(range(0,\n sample_group[0])) + list(range(sample_group[1], len(\n sample_extra_info)))][mt].dropna())[1] for oligos_group,\n sample_group in significant_groups])\n mt_group = significant_groups[mt_scores.idxmin()]\n mt_pval = mt_scores.min()\n draw_significant_groups([mt_group], dendogram_ax, color='blue')\n draw_legume_group(mt_group[1], bar_axis1)\n plt.suptitle(\n \"\"\"For group marked in blue the %s level\nof samples in group vs those not in group\n\"\"\"\n % mt + 'got MW p-value of %g' % mt_pval)\n plt.savefig(os.path.join(out_path, 'legumes.png'))\n res = {}\n inds = sample_extra_info[mt].dropna().index\n for i in range(*mt_group[0]):\n col = fold_df.columns[i]\n res[col] = spearmanr(sample_extra_info.loc[inds][mt], fold_df.loc[\n inds, col].values)\n res = pandas.DataFrame(res, index=['stat', 'pval']).T.sort_values('pval')\n res['Bonf'] = res['pval'] * len(res)\n FDR = multipletests(res.pval.values.tolist(), method='fdr_by')\n res['FDR_BY'] = FDR[0]\n res['FDR_BY_qval'] = FDR[1]\n FDR = multipletests(res.pval.values.tolist(), method='fdr_bh')\n res['FDR_BH'] = FDR[0]\n res['FDR_BH_qval'] = FDR[1]\n res['allergens_common_name'] = df_info.loc[res.index].allergens_common_name\n print('Of %d oligos in the blue group %d pass FDR (BY) vs %s' % (len(\n res), len(res[res.FDR_BY]), mt))\n res.to_csv(os.path.join(out_path, 'mt_1342.csv'))\n",
"step-5": "from scipy.stats import mannwhitneyu\nimport matplotlib.patches as patches\nimport os\nimport numpy\nimport pandas\nfrom matplotlib.gridspec import GridSpec\nfrom scipy.cluster.hierarchy import fcluster, linkage, dendrogram\nfrom scipy.spatial.distance import squareform\nimport seaborn as sns\nfrom scipy.stats import spearmanr\nfrom statsmodels.stats.multitest import multipletests\nimport matplotlib.pyplot as plt\n\nfrom config import base_path, out_path\n\nMIN_OLIS = 200\nTHROW_BAD_OLIS = True\nMIN_APPEAR = 0.02\nCLUST_TH = 0.7\nMIN_CLUST = 10\n\n\ndef get_clusters(link, dn, inds, th=0.7):\n clst = fcluster(link, criterion='distance', t=th)\n return pandas.Series(index=inds, data=clst).iloc[dn['leaves']]\n\n\ndef draw_significant_groups(groups, dn_ax, color='white'):\n # Draw boxes around clusters\n for group in groups:\n rect = patches.Rectangle((group[0][0], group[1][0]), group[0][1] - group[0][0], group[1][1] - group[1][0],\n linewidth=1, edgecolor=color, facecolor='none')\n dn_ax.add_patch(rect)\n\n\ndef draw_legume_group(group, ax):\n y_values = ax.get_ylim()\n x_values = ax.get_xlim()\n rect = patches.Rectangle((0, 0), x_values[1], group[0], linewidth=1, edgecolor='white',\n facecolor='white', alpha=0.6)\n ax.add_patch(rect)\n rect = patches.Rectangle((0, group[1]), x_values[1], y_values[0] - group[1], linewidth=1, edgecolor='white',\n facecolor='white', alpha=0.6)\n ax.add_patch(rect)\n\n\ndef get_groups(clst, clust_above=MIN_CLUST):\n groups = []\n v = -1\n for i in range(len(clst)):\n if clst[i] == v:\n continue\n if v == -1:\n groups.append([i])\n v = clst[i]\n continue\n if (i - groups[-1][0]) >= clust_above:\n groups[-1].append(i)\n groups.append([i])\n else:\n groups[-1][0] = i\n v = clst[i]\n groups = groups[:-1]\n return groups\n\n\nif __name__ == \"__main__\":\n os.makedirs(out_path, exist_ok=True)\n\n df_info = pandas.read_csv(os.path.join(base_path, \"library_contents.csv\"), index_col=0, low_memory=False)\n df_info = df_info[df_info.is_allergens & (df_info['num_copy'] == 1)]\n inds = df_info.index\n l_base = len(inds)\n\n meta_df = pandas.read_csv(os.path.join(base_path, \"cohort.csv\"), index_col=0, low_memory=False)\n meta_df = meta_df[(meta_df.timepoint == 1) & (meta_df.num_passed >= MIN_OLIS)]\n\n fold_df = pandas.read_csv(os.path.join(base_path, \"fold_data.csv\"), index_col=[0, 1],\n low_memory=False).loc[meta_df.index].unstack()\n fold_df.columns = fold_df.columns.get_level_values(1)\n fold_df = fold_df[fold_df.columns.intersection(inds)]\n\n if THROW_BAD_OLIS:\n drop = fold_df.columns[(fold_df == -1).sum() > 0]\n fold_df = fold_df[fold_df.columns.difference(drop)].fillna(1)\n inds = df_info.index.difference(drop)\n df_info = df_info.loc[inds]\n\n fold_df = fold_df[fold_df.columns[(fold_df > 1).sum() > (MIN_APPEAR * len(fold_df))]]\n fold_df = numpy.log(fold_df.fillna(1))\n df_info = df_info.loc[fold_df.columns]\n\n th = CLUST_TH\n\n # Oligos level correlations\n corr = fold_df.corr('spearman')\n link = linkage(squareform(1 - corr), method='average')\n dn = dendrogram(link, no_plot=True)\n clst = get_clusters(link, dn, corr.columns, th)\n groups = get_groups(clst)\n\n # Samples level correlations\n corr1 = fold_df.T.corr('spearman')\n link1 = linkage(squareform(1 - corr1), method='average')\n dn1 = dendrogram(link1, no_plot=True)\n clst1 = get_clusters(link1, dn1, corr1.columns, th)\n groups1 = get_groups(clst1)\n\n # Define figure\n fig = plt.figure(figsize=[9.2, 12])\n gs = GridSpec(1, 3, width_ratios=[0.2, 3, 1])\n\n # Plot heatmap\n bar_ax = fig.add_subplot(gs[0])\n dendogram_ax = fig.add_subplot(gs[1])\n sns.heatmap(fold_df.iloc[dn1['leaves'], dn['leaves']], cmap=sns.color_palette('flare', as_cmap=True),\n ax=dendogram_ax, yticklabels=False, xticklabels=False, cbar_ax=bar_ax)\n\n dendogram_ax.set_xlabel(\"oligos\")\n dendogram_ax.set_ylabel(\"samples\")\n\n # Plot sample level bars\n mt = 'normalized mt_1342'\n bar_axis1 = fig.add_subplot(gs[2], sharey=dendogram_ax)\n meta_df['yob'] = (meta_df['yob'] - 1944) / 60\n use_columns = ['gender', 'yob']\n sample_extra_info = pandas.merge(meta_df[use_columns], meta_df[mt], left_index=True,\n right_index=True, how='left')\n sample_extra_info[mt] = ((sample_extra_info[mt] - sample_extra_info[mt].min()) /\n (sample_extra_info[mt].max() - sample_extra_info[mt].min())).astype(float)\n sample_extra_info.rename(columns={mt: 'norm mt_1342'}, inplace=True)\n mt = 'norm mt_1342'\n sample_extra_info = sample_extra_info.iloc[dn1['leaves']]\n sns.heatmap(data=sample_extra_info, xticklabels=sample_extra_info.columns, yticklabels=False,\n ax=bar_axis1, cmap=sns.color_palette(\"viridis\", as_cmap=True))\n\n # Compute significant shared groups\n fold_df = fold_df.iloc[dn1['leaves'], dn['leaves']].copy()\n significant_groups = []\n for oligo_subgroup in groups:\n sample_group_means = sorted(enumerate(\n [fold_df.iloc[range(*sample_group), range(*oligo_subgroup)].mean().mean() for sample_group in groups1]),\n key=lambda x: -x[1])\n if sample_group_means[0][1] > 2 * sample_group_means[1][1]:\n significant_groups.append([oligo_subgroup, groups1[sample_group_means[0][0]]])\n draw_significant_groups(significant_groups, dendogram_ax)\n\n mt_scores = pandas.Series([mannwhitneyu(sample_extra_info.iloc[range(*sample_group)][mt].dropna(),\n sample_extra_info.iloc[list(range(0, sample_group[0])) +\n list(range(sample_group[1], len(sample_extra_info)))]\n [mt].dropna())[1]\n for oligos_group, sample_group in significant_groups])\n mt_group = significant_groups[mt_scores.idxmin()]\n mt_pval = mt_scores.min()\n draw_significant_groups([mt_group], dendogram_ax, color='blue')\n draw_legume_group(mt_group[1], bar_axis1)\n plt.suptitle('For group marked in blue the %s level\\nof samples in group vs those not in group\\n' % mt +\n 'got MW p-value of %g' % mt_pval)\n\n plt.savefig(os.path.join(out_path, \"legumes.png\"))\n\n res = {}\n inds = sample_extra_info[mt].dropna().index\n for i in range(*mt_group[0]):\n col = fold_df.columns[i]\n res[col] = spearmanr(sample_extra_info.loc[inds][mt], fold_df.loc[inds, col].values)\n res = pandas.DataFrame(res, index=['stat', 'pval']).T.sort_values('pval')\n res[\"Bonf\"] = res['pval'] * len(res)\n FDR = multipletests(res.pval.values.tolist(), method='fdr_by')\n res[\"FDR_BY\"] = FDR[0]\n res['FDR_BY_qval'] = FDR[1]\n FDR = multipletests(res.pval.values.tolist(), method='fdr_bh')\n res[\"FDR_BH\"] = FDR[0]\n res['FDR_BH_qval'] = FDR[1]\n res['allergens_common_name'] = df_info.loc[res.index].allergens_common_name\n\n print(\"Of %d oligos in the blue group %d pass FDR (BY) vs %s\" % (len(res), len(res[res.FDR_BY]), mt))\n res.to_csv(os.path.join(out_path, \"mt_1342.csv\"))\n\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
class people:
def __init__(self, name):
self.name = name
self.purchase_descrip = []
self.purchase_price_descrip = []
self.purchases = []
self.total_spent = 0
self.debt = 0
self.debt_temp = 0
self.pay = []
self.pay_out = []
self.pay_who = []
def add_purchase(self, purchase):
self.purchases.append(purchase)
def add_description(self, description):
self.purchase_descrip.append(description)
def get_purchase(self):
return self.purchases
def get_description(self):
return self.purchase_descrip
def set_total(self):
self.total_spent = 0
for items in self.purchases:
self.total_spent = self.total_spent+float(items)
def get_total(self):
return self.total_spent
def get_name(self):
return self.name
def add_purchase_descrip(self, price, description):
self.purchase_price_descrip.append("$"+str(price)+" "+description)
def get_purchase_descrip(self):
return self.purchase_price_descrip
def set_debt(self, cost_per_person):
self.debt = float(self.total_spent)-cost_per_person
def get_debt(self):
return self.debt
def add_payment(self, payment):
self.pay.append(payment)
def get_pay(self):
return self.pay
def add_pay_who(self, who_to_pay):
self.pay_who.append(who_to_pay)
def get_pay_who(self):
return self.pay_who
def set_debt_temp(self):
self.debt_temp = self.debt
def get_temp_debt(self):
return self.debt_temp
def update_temp_debt(self, payment):
self.debt_temp = self.debt_temp+payment*-1
def pay_temp_debt(self, payment):
self.debt_temp-payment
def round_payments(self):
for x in range(0, len(self.pay)):
self.pay[x] = round(self.pay[x], 2)
def round_purchases(self):
for x in range(0, len(self.purchases)):
self.purchases[x] = round(float(self.purchases[x]), 2)
|
normal
|
{
"blob_id": "bdda42665acfefccad45a2b49f5436a186140579",
"index": 8576,
"step-1": "class people:\n <mask token>\n\n def add_purchase(self, purchase):\n self.purchases.append(purchase)\n\n def add_description(self, description):\n self.purchase_descrip.append(description)\n <mask token>\n <mask token>\n\n def set_total(self):\n self.total_spent = 0\n for items in self.purchases:\n self.total_spent = self.total_spent + float(items)\n <mask token>\n <mask token>\n\n def add_purchase_descrip(self, price, description):\n self.purchase_price_descrip.append('$' + str(price) + ' ' +\n description)\n\n def get_purchase_descrip(self):\n return self.purchase_price_descrip\n\n def set_debt(self, cost_per_person):\n self.debt = float(self.total_spent) - cost_per_person\n\n def get_debt(self):\n return self.debt\n <mask token>\n <mask token>\n <mask token>\n\n def get_pay_who(self):\n return self.pay_who\n\n def set_debt_temp(self):\n self.debt_temp = self.debt\n\n def get_temp_debt(self):\n return self.debt_temp\n <mask token>\n\n def pay_temp_debt(self, payment):\n self.debt_temp - payment\n\n def round_payments(self):\n for x in range(0, len(self.pay)):\n self.pay[x] = round(self.pay[x], 2)\n <mask token>\n",
"step-2": "class people:\n <mask token>\n\n def add_purchase(self, purchase):\n self.purchases.append(purchase)\n\n def add_description(self, description):\n self.purchase_descrip.append(description)\n\n def get_purchase(self):\n return self.purchases\n <mask token>\n\n def set_total(self):\n self.total_spent = 0\n for items in self.purchases:\n self.total_spent = self.total_spent + float(items)\n\n def get_total(self):\n return self.total_spent\n <mask token>\n\n def add_purchase_descrip(self, price, description):\n self.purchase_price_descrip.append('$' + str(price) + ' ' +\n description)\n\n def get_purchase_descrip(self):\n return self.purchase_price_descrip\n\n def set_debt(self, cost_per_person):\n self.debt = float(self.total_spent) - cost_per_person\n\n def get_debt(self):\n return self.debt\n\n def add_payment(self, payment):\n self.pay.append(payment)\n <mask token>\n\n def add_pay_who(self, who_to_pay):\n self.pay_who.append(who_to_pay)\n\n def get_pay_who(self):\n return self.pay_who\n\n def set_debt_temp(self):\n self.debt_temp = self.debt\n\n def get_temp_debt(self):\n return self.debt_temp\n\n def update_temp_debt(self, payment):\n self.debt_temp = self.debt_temp + payment * -1\n\n def pay_temp_debt(self, payment):\n self.debt_temp - payment\n\n def round_payments(self):\n for x in range(0, len(self.pay)):\n self.pay[x] = round(self.pay[x], 2)\n <mask token>\n",
"step-3": "class people:\n\n def __init__(self, name):\n self.name = name\n self.purchase_descrip = []\n self.purchase_price_descrip = []\n self.purchases = []\n self.total_spent = 0\n self.debt = 0\n self.debt_temp = 0\n self.pay = []\n self.pay_out = []\n self.pay_who = []\n\n def add_purchase(self, purchase):\n self.purchases.append(purchase)\n\n def add_description(self, description):\n self.purchase_descrip.append(description)\n\n def get_purchase(self):\n return self.purchases\n <mask token>\n\n def set_total(self):\n self.total_spent = 0\n for items in self.purchases:\n self.total_spent = self.total_spent + float(items)\n\n def get_total(self):\n return self.total_spent\n <mask token>\n\n def add_purchase_descrip(self, price, description):\n self.purchase_price_descrip.append('$' + str(price) + ' ' +\n description)\n\n def get_purchase_descrip(self):\n return self.purchase_price_descrip\n\n def set_debt(self, cost_per_person):\n self.debt = float(self.total_spent) - cost_per_person\n\n def get_debt(self):\n return self.debt\n\n def add_payment(self, payment):\n self.pay.append(payment)\n <mask token>\n\n def add_pay_who(self, who_to_pay):\n self.pay_who.append(who_to_pay)\n\n def get_pay_who(self):\n return self.pay_who\n\n def set_debt_temp(self):\n self.debt_temp = self.debt\n\n def get_temp_debt(self):\n return self.debt_temp\n\n def update_temp_debt(self, payment):\n self.debt_temp = self.debt_temp + payment * -1\n\n def pay_temp_debt(self, payment):\n self.debt_temp - payment\n\n def round_payments(self):\n for x in range(0, len(self.pay)):\n self.pay[x] = round(self.pay[x], 2)\n\n def round_purchases(self):\n for x in range(0, len(self.purchases)):\n self.purchases[x] = round(float(self.purchases[x]), 2)\n",
"step-4": "class people:\n\n def __init__(self, name):\n self.name = name\n self.purchase_descrip = []\n self.purchase_price_descrip = []\n self.purchases = []\n self.total_spent = 0\n self.debt = 0\n self.debt_temp = 0\n self.pay = []\n self.pay_out = []\n self.pay_who = []\n\n def add_purchase(self, purchase):\n self.purchases.append(purchase)\n\n def add_description(self, description):\n self.purchase_descrip.append(description)\n\n def get_purchase(self):\n return self.purchases\n <mask token>\n\n def set_total(self):\n self.total_spent = 0\n for items in self.purchases:\n self.total_spent = self.total_spent + float(items)\n\n def get_total(self):\n return self.total_spent\n\n def get_name(self):\n return self.name\n\n def add_purchase_descrip(self, price, description):\n self.purchase_price_descrip.append('$' + str(price) + ' ' +\n description)\n\n def get_purchase_descrip(self):\n return self.purchase_price_descrip\n\n def set_debt(self, cost_per_person):\n self.debt = float(self.total_spent) - cost_per_person\n\n def get_debt(self):\n return self.debt\n\n def add_payment(self, payment):\n self.pay.append(payment)\n\n def get_pay(self):\n return self.pay\n\n def add_pay_who(self, who_to_pay):\n self.pay_who.append(who_to_pay)\n\n def get_pay_who(self):\n return self.pay_who\n\n def set_debt_temp(self):\n self.debt_temp = self.debt\n\n def get_temp_debt(self):\n return self.debt_temp\n\n def update_temp_debt(self, payment):\n self.debt_temp = self.debt_temp + payment * -1\n\n def pay_temp_debt(self, payment):\n self.debt_temp - payment\n\n def round_payments(self):\n for x in range(0, len(self.pay)):\n self.pay[x] = round(self.pay[x], 2)\n\n def round_purchases(self):\n for x in range(0, len(self.purchases)):\n self.purchases[x] = round(float(self.purchases[x]), 2)\n",
"step-5": "class people:\n\n def __init__(self, name):\n self.name = name\n self.purchase_descrip = []\n self.purchase_price_descrip = []\n self.purchases = []\n self.total_spent = 0\n self.debt = 0\n self.debt_temp = 0\n self.pay = []\n self.pay_out = []\n self.pay_who = []\n\n def add_purchase(self, purchase):\n self.purchases.append(purchase)\n\n def add_description(self, description):\n self.purchase_descrip.append(description)\n\n def get_purchase(self):\n return self.purchases\n\n def get_description(self):\n return self.purchase_descrip\n\n def set_total(self):\n self.total_spent = 0\n for items in self.purchases:\n self.total_spent = self.total_spent+float(items)\n\n def get_total(self):\n return self.total_spent\n\n def get_name(self):\n return self.name\n\n def add_purchase_descrip(self, price, description):\n self.purchase_price_descrip.append(\"$\"+str(price)+\" \"+description)\n\n def get_purchase_descrip(self):\n return self.purchase_price_descrip\n\n def set_debt(self, cost_per_person):\n self.debt = float(self.total_spent)-cost_per_person\n\n def get_debt(self):\n return self.debt\n\n def add_payment(self, payment):\n self.pay.append(payment)\n\n def get_pay(self):\n return self.pay\n\n def add_pay_who(self, who_to_pay):\n self.pay_who.append(who_to_pay)\n\n def get_pay_who(self):\n return self.pay_who\n\n def set_debt_temp(self):\n self.debt_temp = self.debt\n\n def get_temp_debt(self):\n return self.debt_temp\n\n def update_temp_debt(self, payment):\n self.debt_temp = self.debt_temp+payment*-1\n\n def pay_temp_debt(self, payment):\n self.debt_temp-payment\n\n def round_payments(self):\n for x in range(0, len(self.pay)):\n self.pay[x] = round(self.pay[x], 2)\n\n def round_purchases(self):\n for x in range(0, len(self.purchases)):\n self.purchases[x] = round(float(self.purchases[x]), 2)\n\n\n\n",
"step-ids": [
13,
18,
20,
22,
24
]
}
|
[
13,
18,
20,
22,
24
] |
from django.shortcuts import render
from django.views.generic import ListView, DetailView
from django.views.generic.edit import CreateView, UpdateView
from django.urls import reverse_lazy
from django.utils import timezone
from time import time
import json
from .models import Attendance, Disciple
from users.models import CustomUser
class AttendanceListView(ListView):
model = Attendance
template_name = 'attendance_list.html'
def get_queryset(self):
return self.model.objects.order_by('-date')
class AttendanceDetailView(DetailView):
model = Attendance
template_name = 'attendance_detail.html'
"""
class AttendanceCreateView(CreateView):
model = Attendance
template_name = 'attendance_new.html'
fields = ['title', 'document']
def form_valid(self, form):
obj = form.save(commit=False)
obj.author = self.request.user
obj.date = timezone.now()
obj.save()
return super().form_valid(form)
"""
class AttendanceCreateView(CreateView):
model = Attendance
template_name = 'attendance_new.html'
fields = ['group', 'disciple']
def get_context_data(self, *args, **kwargs):
groups_choices = [
'ИУ1',
'ИУ2',
'ИУ3',
'ИУ4',
'ИУ5',
'ИУ6',
'ИУ7',
'ИУ8',
]
context = super(AttendanceCreateView, self).get_context_data(*args, **kwargs)
context['students'] = CustomUser.objects.filter(student_group='ИУ6')
context['disciples'] = Disciple.objects.all()
context['groups'] = groups_choices
return context
def form_valid(self, form):
obj = form.save(commit=False)
obj.author = self.request.user
obj.date = timezone.now()
#obj.disciple =
fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'
#print(fname)
form_data = self.request.POST
print(form_data)
#Process form_data; mk csv_file based on it; save it to obj.document
#obj.document = doc
obj.save()
print(form, dir(form))
return super().form_valid(form)
|
normal
|
{
"blob_id": "38c78a51a50ee9844aec8b8cdcdd42b858748518",
"index": 2552,
"step-1": "<mask token>\n\n\nclass AttendanceDetailView(DetailView):\n <mask token>\n <mask token>\n\n\n<mask token>\n\n\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['group', 'disciple']\n\n def get_context_data(self, *args, **kwargs):\n groups_choices = ['ИУ1', 'ИУ2', 'ИУ3', 'ИУ4', 'ИУ5', 'ИУ6', 'ИУ7',\n 'ИУ8']\n context = super(AttendanceCreateView, self).get_context_data(*args,\n **kwargs)\n context['students'] = CustomUser.objects.filter(student_group='ИУ6')\n context['disciples'] = Disciple.objects.all()\n context['groups'] = groups_choices\n return context\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'\n form_data = self.request.POST\n print(form_data)\n obj.save()\n print(form, dir(form))\n return super().form_valid(form)\n",
"step-2": "<mask token>\n\n\nclass AttendanceListView(ListView):\n <mask token>\n <mask token>\n\n def get_queryset(self):\n return self.model.objects.order_by('-date')\n\n\nclass AttendanceDetailView(DetailView):\n model = Attendance\n template_name = 'attendance_detail.html'\n\n\n<mask token>\n\n\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['group', 'disciple']\n\n def get_context_data(self, *args, **kwargs):\n groups_choices = ['ИУ1', 'ИУ2', 'ИУ3', 'ИУ4', 'ИУ5', 'ИУ6', 'ИУ7',\n 'ИУ8']\n context = super(AttendanceCreateView, self).get_context_data(*args,\n **kwargs)\n context['students'] = CustomUser.objects.filter(student_group='ИУ6')\n context['disciples'] = Disciple.objects.all()\n context['groups'] = groups_choices\n return context\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'\n form_data = self.request.POST\n print(form_data)\n obj.save()\n print(form, dir(form))\n return super().form_valid(form)\n",
"step-3": "<mask token>\n\n\nclass AttendanceListView(ListView):\n model = Attendance\n template_name = 'attendance_list.html'\n\n def get_queryset(self):\n return self.model.objects.order_by('-date')\n\n\nclass AttendanceDetailView(DetailView):\n model = Attendance\n template_name = 'attendance_detail.html'\n\n\n<mask token>\n\n\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['group', 'disciple']\n\n def get_context_data(self, *args, **kwargs):\n groups_choices = ['ИУ1', 'ИУ2', 'ИУ3', 'ИУ4', 'ИУ5', 'ИУ6', 'ИУ7',\n 'ИУ8']\n context = super(AttendanceCreateView, self).get_context_data(*args,\n **kwargs)\n context['students'] = CustomUser.objects.filter(student_group='ИУ6')\n context['disciples'] = Disciple.objects.all()\n context['groups'] = groups_choices\n return context\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'\n form_data = self.request.POST\n print(form_data)\n obj.save()\n print(form, dir(form))\n return super().form_valid(form)\n",
"step-4": "from django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nfrom django.views.generic.edit import CreateView, UpdateView\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\nfrom time import time\nimport json\nfrom .models import Attendance, Disciple\nfrom users.models import CustomUser\n\n\nclass AttendanceListView(ListView):\n model = Attendance\n template_name = 'attendance_list.html'\n\n def get_queryset(self):\n return self.model.objects.order_by('-date')\n\n\nclass AttendanceDetailView(DetailView):\n model = Attendance\n template_name = 'attendance_detail.html'\n\n\n<mask token>\n\n\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['group', 'disciple']\n\n def get_context_data(self, *args, **kwargs):\n groups_choices = ['ИУ1', 'ИУ2', 'ИУ3', 'ИУ4', 'ИУ5', 'ИУ6', 'ИУ7',\n 'ИУ8']\n context = super(AttendanceCreateView, self).get_context_data(*args,\n **kwargs)\n context['students'] = CustomUser.objects.filter(student_group='ИУ6')\n context['disciples'] = Disciple.objects.all()\n context['groups'] = groups_choices\n return context\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'\n form_data = self.request.POST\n print(form_data)\n obj.save()\n print(form, dir(form))\n return super().form_valid(form)\n",
"step-5": "from django.shortcuts import render\nfrom django.views.generic import ListView, DetailView\nfrom django.views.generic.edit import CreateView, UpdateView\nfrom django.urls import reverse_lazy\nfrom django.utils import timezone\n\nfrom time import time\nimport json\n\nfrom .models import Attendance, Disciple\nfrom users.models import CustomUser\n\n\nclass AttendanceListView(ListView):\n model = Attendance\n template_name = 'attendance_list.html'\n\n def get_queryset(self):\n return self.model.objects.order_by('-date')\n\nclass AttendanceDetailView(DetailView):\n model = Attendance\n template_name = 'attendance_detail.html'\n\n\n\"\"\"\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['title', 'document']\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n obj.save()\n\n return super().form_valid(form)\n\"\"\"\n\n\nclass AttendanceCreateView(CreateView):\n model = Attendance\n template_name = 'attendance_new.html'\n fields = ['group', 'disciple']\n\n def get_context_data(self, *args, **kwargs):\n groups_choices = [\n 'ИУ1',\n 'ИУ2',\n 'ИУ3',\n 'ИУ4',\n 'ИУ5',\n 'ИУ6',\n 'ИУ7',\n 'ИУ8',\n ]\n\n\n context = super(AttendanceCreateView, self).get_context_data(*args, **kwargs)\n context['students'] = CustomUser.objects.filter(student_group='ИУ6')\n context['disciples'] = Disciple.objects.all()\n context['groups'] = groups_choices\n\n return context\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.author = self.request.user\n obj.date = timezone.now()\n #obj.disciple =\n\n\n fname = f'Журнал-{obj.disciple.name}-{timezone.now()}.csv'\n #print(fname)\n form_data = self.request.POST\n print(form_data)\n\n #Process form_data; mk csv_file based on it; save it to obj.document\n\n #obj.document = doc\n\n obj.save()\n print(form, dir(form))\n\n return super().form_valid(form)\n",
"step-ids": [
5,
8,
9,
10,
11
]
}
|
[
5,
8,
9,
10,
11
] |
# Sets up directories
MusicDir = "AudioFiles\\"
ModelsDir = "Models\\"
MonstersDir = "Models\\Monsters\\"
|
normal
|
{
"blob_id": "a929bfbe2be6d8f93cafa5b6cc66c7506037ffca",
"index": 4735,
"step-1": "<mask token>\n",
"step-2": "MusicDir = 'AudioFiles\\\\'\nModelsDir = 'Models\\\\'\nMonstersDir = 'Models\\\\Monsters\\\\'\n",
"step-3": "# Sets up directories\nMusicDir = \"AudioFiles\\\\\"\nModelsDir = \"Models\\\\\"\nMonstersDir = \"Models\\\\Monsters\\\\\"",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django import forms
class CriteriaForm(forms.Form):
query = forms.CharField(widget=forms.Textarea)
|
normal
|
{
"blob_id": "b6529dc77d89cdf2d49c689dc583b78c94e31c4d",
"index": 4716,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass CriteriaForm(forms.Form):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass CriteriaForm(forms.Form):\n query = forms.CharField(widget=forms.Textarea)\n",
"step-4": "from django import forms\n\n\nclass CriteriaForm(forms.Form):\n query = forms.CharField(widget=forms.Textarea)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__author__ = 'ldd'
# -*- coding: utf-8 -*-
from view.api_doc import handler_define, api_define, Param
from view.base import BaseHandler,CachedPlusHandler
@handler_define
class HelloWorld(BaseHandler):
@api_define("HelloWorld", r'/', [
], description="HelloWorld")
def get(self):
self.write({'status':"HelloWorld"})
|
normal
|
{
"blob_id": "3c738a07d71338ab838e4f1d683e631252d50a30",
"index": 4085,
"step-1": "<mask token>\n\n\n@handler_define\nclass HelloWorld(BaseHandler):\n <mask token>\n",
"step-2": "<mask token>\n\n\n@handler_define\nclass HelloWorld(BaseHandler):\n\n @api_define('HelloWorld', '/', [], description='HelloWorld')\n def get(self):\n self.write({'status': 'HelloWorld'})\n",
"step-3": "__author__ = 'ldd'\n<mask token>\n\n\n@handler_define\nclass HelloWorld(BaseHandler):\n\n @api_define('HelloWorld', '/', [], description='HelloWorld')\n def get(self):\n self.write({'status': 'HelloWorld'})\n",
"step-4": "__author__ = 'ldd'\nfrom view.api_doc import handler_define, api_define, Param\nfrom view.base import BaseHandler, CachedPlusHandler\n\n\n@handler_define\nclass HelloWorld(BaseHandler):\n\n @api_define('HelloWorld', '/', [], description='HelloWorld')\n def get(self):\n self.write({'status': 'HelloWorld'})\n",
"step-5": "__author__ = 'ldd'\n# -*- coding: utf-8 -*-\n\nfrom view.api_doc import handler_define, api_define, Param\nfrom view.base import BaseHandler,CachedPlusHandler\n\n@handler_define\nclass HelloWorld(BaseHandler):\n @api_define(\"HelloWorld\", r'/', [\n ], description=\"HelloWorld\")\n def get(self):\n self.write({'status':\"HelloWorld\"})",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import cv2
import numpy
import os
import glob
import ntpath
from backSub import *
from ConfigParser import SafeConfigParser
filepath = "./tl3Pictures/" # where the input files are
pathRGB = ".diff/" # where the result is saved
extension = "*.jpg" # only jpg files considered
batchCount = 0
backSubInstance = backSub()
if not os.path.exists(filepath + pathRGB):
os.makedirs(filepath+pathRGB) #create the result folder if it
# is not there
backSubInstance.setConfig('sample.cfg') # load the backSub parameters
# from the configuration file
for filename in glob.glob(filepath + extension):
#print(filename) #full file name and path
pathAndFile = os.path.splitext(filename)[0]
#print(pathAndFile) #file name and path without extension
latestFilename = ntpath.basename(pathAndFile)
#print(latestFilename) #only file name
image = cv2.imread(filepath + latestFilename + ".jpg",\
cv2.CV_LOAD_IMAGE_COLOR) #read the image from the source
print(latestFilename)
diffImage = backSubInstance.getDiff(image) # get the difference image
resultFileName = filepath + pathRGB + latestFilename + "motion"+ \
str(batchCount) + ".jpg" #contruct the path where to save diffImage
cv2.imwrite(resultFileName, diffImage) # write the image to the
# destination
batchCount +=1
|
normal
|
{
"blob_id": "506d33587ff6c8b2c3d9bc546307996d2f518d86",
"index": 2060,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif not os.path.exists(filepath + pathRGB):\n os.makedirs(filepath + pathRGB)\nbackSubInstance.setConfig('sample.cfg')\nfor filename in glob.glob(filepath + extension):\n pathAndFile = os.path.splitext(filename)[0]\n latestFilename = ntpath.basename(pathAndFile)\n image = cv2.imread(filepath + latestFilename + '.jpg', cv2.\n CV_LOAD_IMAGE_COLOR)\n print(latestFilename)\n diffImage = backSubInstance.getDiff(image)\n resultFileName = filepath + pathRGB + latestFilename + 'motion' + str(\n batchCount) + '.jpg'\n cv2.imwrite(resultFileName, diffImage)\n batchCount += 1\n",
"step-3": "<mask token>\nfilepath = './tl3Pictures/'\npathRGB = '.diff/'\nextension = '*.jpg'\nbatchCount = 0\nbackSubInstance = backSub()\nif not os.path.exists(filepath + pathRGB):\n os.makedirs(filepath + pathRGB)\nbackSubInstance.setConfig('sample.cfg')\nfor filename in glob.glob(filepath + extension):\n pathAndFile = os.path.splitext(filename)[0]\n latestFilename = ntpath.basename(pathAndFile)\n image = cv2.imread(filepath + latestFilename + '.jpg', cv2.\n CV_LOAD_IMAGE_COLOR)\n print(latestFilename)\n diffImage = backSubInstance.getDiff(image)\n resultFileName = filepath + pathRGB + latestFilename + 'motion' + str(\n batchCount) + '.jpg'\n cv2.imwrite(resultFileName, diffImage)\n batchCount += 1\n",
"step-4": "import cv2\nimport numpy\nimport os\nimport glob\nimport ntpath\nfrom backSub import *\nfrom ConfigParser import SafeConfigParser\nfilepath = './tl3Pictures/'\npathRGB = '.diff/'\nextension = '*.jpg'\nbatchCount = 0\nbackSubInstance = backSub()\nif not os.path.exists(filepath + pathRGB):\n os.makedirs(filepath + pathRGB)\nbackSubInstance.setConfig('sample.cfg')\nfor filename in glob.glob(filepath + extension):\n pathAndFile = os.path.splitext(filename)[0]\n latestFilename = ntpath.basename(pathAndFile)\n image = cv2.imread(filepath + latestFilename + '.jpg', cv2.\n CV_LOAD_IMAGE_COLOR)\n print(latestFilename)\n diffImage = backSubInstance.getDiff(image)\n resultFileName = filepath + pathRGB + latestFilename + 'motion' + str(\n batchCount) + '.jpg'\n cv2.imwrite(resultFileName, diffImage)\n batchCount += 1\n",
"step-5": "import cv2\r\nimport numpy\r\nimport os \r\nimport glob\r\nimport ntpath\r\nfrom backSub import *\r\nfrom ConfigParser import SafeConfigParser\r\n\r\n\r\nfilepath = \"./tl3Pictures/\" # where the input files are\r\npathRGB = \".diff/\" # where the result is saved\r\n\r\nextension = \"*.jpg\" # only jpg files considered\r\nbatchCount = 0\r\nbackSubInstance = backSub()\r\n\r\n\r\nif not os.path.exists(filepath + pathRGB):\r\n\tos.makedirs(filepath+pathRGB) #create the result folder if it \r\n\t\t\t\t\t\t\t\t # is not there \r\n\r\nbackSubInstance.setConfig('sample.cfg') # load the backSub parameters \r\n\t\t\t\t\t\t\t\t # from the configuration file\t\r\n\r\nfor filename in glob.glob(filepath + extension): \r\n\t#print(filename) #full file name and path\r\n\tpathAndFile = os.path.splitext(filename)[0]\r\n\t#print(pathAndFile)\t#file name and path without extension \r\n\tlatestFilename = ntpath.basename(pathAndFile)\r\n\t#print(latestFilename) #only file name\r\n\r\n\timage = cv2.imread(filepath + latestFilename + \".jpg\",\\\r\n\t\tcv2.CV_LOAD_IMAGE_COLOR) #read the image from the source\r\n\tprint(latestFilename)\r\n\tdiffImage = backSubInstance.getDiff(image) # get the difference image\r\n\r\n\tresultFileName = filepath + pathRGB + latestFilename + \"motion\"+ \\\r\n\t str(batchCount) + \".jpg\" #contruct the path where to save diffImage\r\n\tcv2.imwrite(resultFileName, diffImage) # write the image to the\r\n\t \t\t\t\t\t\t\t\t\t\t# destination\r\n\tbatchCount +=1 \r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.