index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
listlengths 1
1.25k
| error
bool 2
classes |
---|---|---|---|---|
99,300 |
6f9e1aad4a3275b21c448c663af52ff44b1988d6
|
"""A3 - TEST functions using pytest.raises"""
import pytest
def test_expect_zerodivisionerror_raised(): # passes
with pytest.raises(ZeroDivisionError):
2 / 0
def test_expect_zerodivisionerror_not_raised(): # fails
with pytest.raises(ZeroDivisionError):
2 / 1
def test_expect_zerodivisionerror_raised_other(): # fails
with pytest.raises(ZeroDivisionError):
2 / "not a number"
def test_expect_typeerror_raised(): # passes
with pytest.raises(TypeError):
2 / "not a number"
|
[
"\"\"\"A3 - TEST functions using pytest.raises\"\"\"\n\nimport pytest\n\n\ndef test_expect_zerodivisionerror_raised(): # passes\n with pytest.raises(ZeroDivisionError):\n 2 / 0\n\n\ndef test_expect_zerodivisionerror_not_raised(): # fails\n with pytest.raises(ZeroDivisionError):\n 2 / 1\n\n\ndef test_expect_zerodivisionerror_raised_other(): # fails\n with pytest.raises(ZeroDivisionError):\n 2 / \"not a number\"\n\n\ndef test_expect_typeerror_raised(): # passes\n with pytest.raises(TypeError):\n 2 / \"not a number\"\n",
"<docstring token>\nimport pytest\n\n\ndef test_expect_zerodivisionerror_raised():\n with pytest.raises(ZeroDivisionError):\n 2 / 0\n\n\ndef test_expect_zerodivisionerror_not_raised():\n with pytest.raises(ZeroDivisionError):\n 2 / 1\n\n\ndef test_expect_zerodivisionerror_raised_other():\n with pytest.raises(ZeroDivisionError):\n 2 / 'not a number'\n\n\ndef test_expect_typeerror_raised():\n with pytest.raises(TypeError):\n 2 / 'not a number'\n",
"<docstring token>\n<import token>\n\n\ndef test_expect_zerodivisionerror_raised():\n with pytest.raises(ZeroDivisionError):\n 2 / 0\n\n\ndef test_expect_zerodivisionerror_not_raised():\n with pytest.raises(ZeroDivisionError):\n 2 / 1\n\n\ndef test_expect_zerodivisionerror_raised_other():\n with pytest.raises(ZeroDivisionError):\n 2 / 'not a number'\n\n\ndef test_expect_typeerror_raised():\n with pytest.raises(TypeError):\n 2 / 'not a number'\n",
"<docstring token>\n<import token>\n\n\ndef test_expect_zerodivisionerror_raised():\n with pytest.raises(ZeroDivisionError):\n 2 / 0\n\n\ndef test_expect_zerodivisionerror_not_raised():\n with pytest.raises(ZeroDivisionError):\n 2 / 1\n\n\n<function token>\n\n\ndef test_expect_typeerror_raised():\n with pytest.raises(TypeError):\n 2 / 'not a number'\n",
"<docstring token>\n<import token>\n<function token>\n\n\ndef test_expect_zerodivisionerror_not_raised():\n with pytest.raises(ZeroDivisionError):\n 2 / 1\n\n\n<function token>\n\n\ndef test_expect_typeerror_raised():\n with pytest.raises(TypeError):\n 2 / 'not a number'\n",
"<docstring token>\n<import token>\n<function token>\n\n\ndef test_expect_zerodivisionerror_not_raised():\n with pytest.raises(ZeroDivisionError):\n 2 / 1\n\n\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,301 |
ccebac993b84b2edae8ff37e8c7060349f57b29e
|
import inspect
from common.base_page import BasePage
from selenium.webdriver.common.by import By
class Content(BasePage):
def search(self, audit_state: int = None, shelve_state: int = None):
"""
搜索
:param audit_state: 2 正常,4 等待手动审核,7 手动审核拒绝
:param shelve_state: 2 上架,3 即将上架,4 已下架
:return: 审核状态
"""
self.log_name = 'Content.search'
_elem = {
'审核状态': 'span[title="审核状态"]',
'上架状态': 'span[title="上架状态"]',
'状态选择': 'li.select2-results__option',
'搜索按钮': '//button[text()="Search"]',
'搜索结果审核状态': 'td.field-state',
'搜索结果上架状态': 'td.field-upshelve_status',
}
self.find(locator=_elem['审核状态']).click()
self.finds(locator=_elem['状态选择'])[audit_state].click()
if inspect.stack()[1][3] != 'test_topic_discuss_search':
self.find(locator=_elem['上架状态']).click()
self.finds(locator=_elem['状态选择'])[shelve_state].click()
self.find(By.XPATH, _elem['搜索按钮']).click()
try:
res = self.finds(locator=_elem['搜索结果审核状态'])[0].text
except TypeError:
return None
else:
return res
def refuse_audit(self):
"""
拒绝审核第一篇
:return: 审核状态
"""
self.log_name = 'Content.refuse_audit'
_elem = {
'内容选中': 'input[name="_selected_action"]',
'操作框': 'select[name="action"]',
'拒绝审核': 'option[value="reject_published"]',
'GO': 'button[title = "Run the selected action"]',
'搜索结果审核状态': 'td.field-state',
}
self.finds(locator=_elem['内容选中'])[0].click()
self.find(locator=_elem['操作框']).click()
self.find(locator=_elem['拒绝审核']).click()
self.find(locator=_elem['GO']).click()
res = self.finds(locator=_elem['搜索结果审核状态'])[0].text
return res
def agree_audit(self):
"""
通过审核第一篇
:return: 审核状态
"""
self.log_name = 'Content.agree_audit'
_elem = {
'内容选中': 'input[name="_selected_action"]',
'操作框': 'select[name="action"]',
'通过审核': 'option[value="make_published"]',
'GO': 'button[title = "Run the selected action"]',
'搜索结果审核状态': 'td.field-state',
}
self.finds(locator=_elem['内容选中'])[0].click()
self.find(locator=_elem['操作框']).click()
self.find(locator=_elem['通过审核']).click()
self.find(locator=_elem['GO']).click()
res = self.finds(locator=_elem['搜索结果审核状态'])[0].text
return res
def preview(self, audit_state):
"""
预览
:param audit_state: 审核状态
:return:
"""
self.log_name = 'Content.preview'
_elem = {
'标题': 'td.field-title',
'话题': 'td.field-topic',
'审核状态': 'span[title="审核状态"]',
'状态选择': 'li.select2-results__option',
'搜索按钮': '//button[text()="Search"]',
'预览': '//a[text()="预览"]',
'头像': 'img._2MPl-uOjTQqEjiB7-jYa9c',
'当前内容无法查看': '//h2[text()="当前内容无法查看~"]'
}
self.find(locator=_elem['审核状态']).click()
self.finds(locator=_elem['状态选择'])[audit_state].click()
self.find(By.XPATH, _elem['搜索按钮']).click()
if inspect.stack()[1][3] == 'test_topic_discuss_preview':
exc_title = '呼啦宝贝-' + self.finds(locator=_elem['话题'])[0].text # 获取当前内容title
else:
exc_title = '呼啦宝贝-' + self.finds(locator=_elem['标题'])[0].text # 获取当前内容title
self.finds(by=By.XPATH, locator=_elem['预览'])[0].click() # 打开了新的标签页
self.driver.switch_to.window(self.driver.window_handles[-1]) # 切换到第2个标签页
res_title = self.driver.title # 获取页面title
self.driver.switch_to.window(self.driver.window_handles[0]) # 切换到第1个标签页
return exc_title == res_title
|
[
"import inspect\nfrom common.base_page import BasePage\nfrom selenium.webdriver.common.by import By\n\n\nclass Content(BasePage):\n\n def search(self, audit_state: int = None, shelve_state: int = None):\n \"\"\"\n 搜索\n :param audit_state: 2 正常,4 等待手动审核,7 手动审核拒绝\n :param shelve_state: 2 上架,3 即将上架,4 已下架\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.search'\n _elem = {\n '审核状态': 'span[title=\"审核状态\"]',\n '上架状态': 'span[title=\"上架状态\"]',\n '状态选择': 'li.select2-results__option',\n '搜索按钮': '//button[text()=\"Search\"]',\n '搜索结果审核状态': 'td.field-state',\n '搜索结果上架状态': 'td.field-upshelve_status',\n }\n self.find(locator=_elem['审核状态']).click()\n self.finds(locator=_elem['状态选择'])[audit_state].click()\n if inspect.stack()[1][3] != 'test_topic_discuss_search':\n self.find(locator=_elem['上架状态']).click()\n self.finds(locator=_elem['状态选择'])[shelve_state].click()\n self.find(By.XPATH, _elem['搜索按钮']).click()\n try:\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n except TypeError:\n return None\n else:\n return res\n\n def refuse_audit(self):\n \"\"\"\n 拒绝审核第一篇\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.refuse_audit'\n _elem = {\n '内容选中': 'input[name=\"_selected_action\"]',\n '操作框': 'select[name=\"action\"]',\n '拒绝审核': 'option[value=\"reject_published\"]',\n 'GO': 'button[title = \"Run the selected action\"]',\n '搜索结果审核状态': 'td.field-state',\n }\n self.finds(locator=_elem['内容选中'])[0].click()\n self.find(locator=_elem['操作框']).click()\n self.find(locator=_elem['拒绝审核']).click()\n self.find(locator=_elem['GO']).click()\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n return res\n\n def agree_audit(self):\n \"\"\"\n 通过审核第一篇\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.agree_audit'\n _elem = {\n '内容选中': 'input[name=\"_selected_action\"]',\n '操作框': 'select[name=\"action\"]',\n '通过审核': 'option[value=\"make_published\"]',\n 'GO': 'button[title = \"Run the selected action\"]',\n '搜索结果审核状态': 'td.field-state',\n }\n self.finds(locator=_elem['内容选中'])[0].click()\n self.find(locator=_elem['操作框']).click()\n self.find(locator=_elem['通过审核']).click()\n self.find(locator=_elem['GO']).click()\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n return res\n\n def preview(self, audit_state):\n \"\"\"\n 预览\n :param audit_state: 审核状态\n :return:\n \"\"\"\n self.log_name = 'Content.preview'\n _elem = {\n '标题': 'td.field-title',\n '话题': 'td.field-topic',\n '审核状态': 'span[title=\"审核状态\"]',\n '状态选择': 'li.select2-results__option',\n '搜索按钮': '//button[text()=\"Search\"]',\n '预览': '//a[text()=\"预览\"]',\n '头像': 'img._2MPl-uOjTQqEjiB7-jYa9c',\n '当前内容无法查看': '//h2[text()=\"当前内容无法查看~\"]'\n }\n self.find(locator=_elem['审核状态']).click()\n self.finds(locator=_elem['状态选择'])[audit_state].click()\n self.find(By.XPATH, _elem['搜索按钮']).click()\n if inspect.stack()[1][3] == 'test_topic_discuss_preview':\n exc_title = '呼啦宝贝-' + self.finds(locator=_elem['话题'])[0].text # 获取当前内容title\n else:\n exc_title = '呼啦宝贝-' + self.finds(locator=_elem['标题'])[0].text # 获取当前内容title\n self.finds(by=By.XPATH, locator=_elem['预览'])[0].click() # 打开了新的标签页\n self.driver.switch_to.window(self.driver.window_handles[-1]) # 切换到第2个标签页\n res_title = self.driver.title # 获取页面title\n self.driver.switch_to.window(self.driver.window_handles[0]) # 切换到第1个标签页\n return exc_title == res_title\n",
"import inspect\nfrom common.base_page import BasePage\nfrom selenium.webdriver.common.by import By\n\n\nclass Content(BasePage):\n\n def search(self, audit_state: int=None, shelve_state: int=None):\n \"\"\"\n 搜索\n :param audit_state: 2 正常,4 等待手动审核,7 手动审核拒绝\n :param shelve_state: 2 上架,3 即将上架,4 已下架\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.search'\n _elem = {'审核状态': 'span[title=\"审核状态\"]', '上架状态': 'span[title=\"上架状态\"]',\n '状态选择': 'li.select2-results__option', '搜索按钮':\n '//button[text()=\"Search\"]', '搜索结果审核状态': 'td.field-state',\n '搜索结果上架状态': 'td.field-upshelve_status'}\n self.find(locator=_elem['审核状态']).click()\n self.finds(locator=_elem['状态选择'])[audit_state].click()\n if inspect.stack()[1][3] != 'test_topic_discuss_search':\n self.find(locator=_elem['上架状态']).click()\n self.finds(locator=_elem['状态选择'])[shelve_state].click()\n self.find(By.XPATH, _elem['搜索按钮']).click()\n try:\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n except TypeError:\n return None\n else:\n return res\n\n def refuse_audit(self):\n \"\"\"\n 拒绝审核第一篇\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.refuse_audit'\n _elem = {'内容选中': 'input[name=\"_selected_action\"]', '操作框':\n 'select[name=\"action\"]', '拒绝审核':\n 'option[value=\"reject_published\"]', 'GO':\n 'button[title = \"Run the selected action\"]', '搜索结果审核状态':\n 'td.field-state'}\n self.finds(locator=_elem['内容选中'])[0].click()\n self.find(locator=_elem['操作框']).click()\n self.find(locator=_elem['拒绝审核']).click()\n self.find(locator=_elem['GO']).click()\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n return res\n\n def agree_audit(self):\n \"\"\"\n 通过审核第一篇\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.agree_audit'\n _elem = {'内容选中': 'input[name=\"_selected_action\"]', '操作框':\n 'select[name=\"action\"]', '通过审核':\n 'option[value=\"make_published\"]', 'GO':\n 'button[title = \"Run the selected action\"]', '搜索结果审核状态':\n 'td.field-state'}\n self.finds(locator=_elem['内容选中'])[0].click()\n self.find(locator=_elem['操作框']).click()\n self.find(locator=_elem['通过审核']).click()\n self.find(locator=_elem['GO']).click()\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n return res\n\n def preview(self, audit_state):\n \"\"\"\n 预览\n :param audit_state: 审核状态\n :return:\n \"\"\"\n self.log_name = 'Content.preview'\n _elem = {'标题': 'td.field-title', '话题': 'td.field-topic', '审核状态':\n 'span[title=\"审核状态\"]', '状态选择': 'li.select2-results__option',\n '搜索按钮': '//button[text()=\"Search\"]', '预览': '//a[text()=\"预览\"]',\n '头像': 'img._2MPl-uOjTQqEjiB7-jYa9c', '当前内容无法查看':\n '//h2[text()=\"当前内容无法查看~\"]'}\n self.find(locator=_elem['审核状态']).click()\n self.finds(locator=_elem['状态选择'])[audit_state].click()\n self.find(By.XPATH, _elem['搜索按钮']).click()\n if inspect.stack()[1][3] == 'test_topic_discuss_preview':\n exc_title = '呼啦宝贝-' + self.finds(locator=_elem['话题'])[0].text\n else:\n exc_title = '呼啦宝贝-' + self.finds(locator=_elem['标题'])[0].text\n self.finds(by=By.XPATH, locator=_elem['预览'])[0].click()\n self.driver.switch_to.window(self.driver.window_handles[-1])\n res_title = self.driver.title\n self.driver.switch_to.window(self.driver.window_handles[0])\n return exc_title == res_title\n",
"<import token>\n\n\nclass Content(BasePage):\n\n def search(self, audit_state: int=None, shelve_state: int=None):\n \"\"\"\n 搜索\n :param audit_state: 2 正常,4 等待手动审核,7 手动审核拒绝\n :param shelve_state: 2 上架,3 即将上架,4 已下架\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.search'\n _elem = {'审核状态': 'span[title=\"审核状态\"]', '上架状态': 'span[title=\"上架状态\"]',\n '状态选择': 'li.select2-results__option', '搜索按钮':\n '//button[text()=\"Search\"]', '搜索结果审核状态': 'td.field-state',\n '搜索结果上架状态': 'td.field-upshelve_status'}\n self.find(locator=_elem['审核状态']).click()\n self.finds(locator=_elem['状态选择'])[audit_state].click()\n if inspect.stack()[1][3] != 'test_topic_discuss_search':\n self.find(locator=_elem['上架状态']).click()\n self.finds(locator=_elem['状态选择'])[shelve_state].click()\n self.find(By.XPATH, _elem['搜索按钮']).click()\n try:\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n except TypeError:\n return None\n else:\n return res\n\n def refuse_audit(self):\n \"\"\"\n 拒绝审核第一篇\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.refuse_audit'\n _elem = {'内容选中': 'input[name=\"_selected_action\"]', '操作框':\n 'select[name=\"action\"]', '拒绝审核':\n 'option[value=\"reject_published\"]', 'GO':\n 'button[title = \"Run the selected action\"]', '搜索结果审核状态':\n 'td.field-state'}\n self.finds(locator=_elem['内容选中'])[0].click()\n self.find(locator=_elem['操作框']).click()\n self.find(locator=_elem['拒绝审核']).click()\n self.find(locator=_elem['GO']).click()\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n return res\n\n def agree_audit(self):\n \"\"\"\n 通过审核第一篇\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.agree_audit'\n _elem = {'内容选中': 'input[name=\"_selected_action\"]', '操作框':\n 'select[name=\"action\"]', '通过审核':\n 'option[value=\"make_published\"]', 'GO':\n 'button[title = \"Run the selected action\"]', '搜索结果审核状态':\n 'td.field-state'}\n self.finds(locator=_elem['内容选中'])[0].click()\n self.find(locator=_elem['操作框']).click()\n self.find(locator=_elem['通过审核']).click()\n self.find(locator=_elem['GO']).click()\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n return res\n\n def preview(self, audit_state):\n \"\"\"\n 预览\n :param audit_state: 审核状态\n :return:\n \"\"\"\n self.log_name = 'Content.preview'\n _elem = {'标题': 'td.field-title', '话题': 'td.field-topic', '审核状态':\n 'span[title=\"审核状态\"]', '状态选择': 'li.select2-results__option',\n '搜索按钮': '//button[text()=\"Search\"]', '预览': '//a[text()=\"预览\"]',\n '头像': 'img._2MPl-uOjTQqEjiB7-jYa9c', '当前内容无法查看':\n '//h2[text()=\"当前内容无法查看~\"]'}\n self.find(locator=_elem['审核状态']).click()\n self.finds(locator=_elem['状态选择'])[audit_state].click()\n self.find(By.XPATH, _elem['搜索按钮']).click()\n if inspect.stack()[1][3] == 'test_topic_discuss_preview':\n exc_title = '呼啦宝贝-' + self.finds(locator=_elem['话题'])[0].text\n else:\n exc_title = '呼啦宝贝-' + self.finds(locator=_elem['标题'])[0].text\n self.finds(by=By.XPATH, locator=_elem['预览'])[0].click()\n self.driver.switch_to.window(self.driver.window_handles[-1])\n res_title = self.driver.title\n self.driver.switch_to.window(self.driver.window_handles[0])\n return exc_title == res_title\n",
"<import token>\n\n\nclass Content(BasePage):\n\n def search(self, audit_state: int=None, shelve_state: int=None):\n \"\"\"\n 搜索\n :param audit_state: 2 正常,4 等待手动审核,7 手动审核拒绝\n :param shelve_state: 2 上架,3 即将上架,4 已下架\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.search'\n _elem = {'审核状态': 'span[title=\"审核状态\"]', '上架状态': 'span[title=\"上架状态\"]',\n '状态选择': 'li.select2-results__option', '搜索按钮':\n '//button[text()=\"Search\"]', '搜索结果审核状态': 'td.field-state',\n '搜索结果上架状态': 'td.field-upshelve_status'}\n self.find(locator=_elem['审核状态']).click()\n self.finds(locator=_elem['状态选择'])[audit_state].click()\n if inspect.stack()[1][3] != 'test_topic_discuss_search':\n self.find(locator=_elem['上架状态']).click()\n self.finds(locator=_elem['状态选择'])[shelve_state].click()\n self.find(By.XPATH, _elem['搜索按钮']).click()\n try:\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n except TypeError:\n return None\n else:\n return res\n <function token>\n\n def agree_audit(self):\n \"\"\"\n 通过审核第一篇\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.agree_audit'\n _elem = {'内容选中': 'input[name=\"_selected_action\"]', '操作框':\n 'select[name=\"action\"]', '通过审核':\n 'option[value=\"make_published\"]', 'GO':\n 'button[title = \"Run the selected action\"]', '搜索结果审核状态':\n 'td.field-state'}\n self.finds(locator=_elem['内容选中'])[0].click()\n self.find(locator=_elem['操作框']).click()\n self.find(locator=_elem['通过审核']).click()\n self.find(locator=_elem['GO']).click()\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n return res\n\n def preview(self, audit_state):\n \"\"\"\n 预览\n :param audit_state: 审核状态\n :return:\n \"\"\"\n self.log_name = 'Content.preview'\n _elem = {'标题': 'td.field-title', '话题': 'td.field-topic', '审核状态':\n 'span[title=\"审核状态\"]', '状态选择': 'li.select2-results__option',\n '搜索按钮': '//button[text()=\"Search\"]', '预览': '//a[text()=\"预览\"]',\n '头像': 'img._2MPl-uOjTQqEjiB7-jYa9c', '当前内容无法查看':\n '//h2[text()=\"当前内容无法查看~\"]'}\n self.find(locator=_elem['审核状态']).click()\n self.finds(locator=_elem['状态选择'])[audit_state].click()\n self.find(By.XPATH, _elem['搜索按钮']).click()\n if inspect.stack()[1][3] == 'test_topic_discuss_preview':\n exc_title = '呼啦宝贝-' + self.finds(locator=_elem['话题'])[0].text\n else:\n exc_title = '呼啦宝贝-' + self.finds(locator=_elem['标题'])[0].text\n self.finds(by=By.XPATH, locator=_elem['预览'])[0].click()\n self.driver.switch_to.window(self.driver.window_handles[-1])\n res_title = self.driver.title\n self.driver.switch_to.window(self.driver.window_handles[0])\n return exc_title == res_title\n",
"<import token>\n\n\nclass Content(BasePage):\n\n def search(self, audit_state: int=None, shelve_state: int=None):\n \"\"\"\n 搜索\n :param audit_state: 2 正常,4 等待手动审核,7 手动审核拒绝\n :param shelve_state: 2 上架,3 即将上架,4 已下架\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.search'\n _elem = {'审核状态': 'span[title=\"审核状态\"]', '上架状态': 'span[title=\"上架状态\"]',\n '状态选择': 'li.select2-results__option', '搜索按钮':\n '//button[text()=\"Search\"]', '搜索结果审核状态': 'td.field-state',\n '搜索结果上架状态': 'td.field-upshelve_status'}\n self.find(locator=_elem['审核状态']).click()\n self.finds(locator=_elem['状态选择'])[audit_state].click()\n if inspect.stack()[1][3] != 'test_topic_discuss_search':\n self.find(locator=_elem['上架状态']).click()\n self.finds(locator=_elem['状态选择'])[shelve_state].click()\n self.find(By.XPATH, _elem['搜索按钮']).click()\n try:\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n except TypeError:\n return None\n else:\n return res\n <function token>\n\n def agree_audit(self):\n \"\"\"\n 通过审核第一篇\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.agree_audit'\n _elem = {'内容选中': 'input[name=\"_selected_action\"]', '操作框':\n 'select[name=\"action\"]', '通过审核':\n 'option[value=\"make_published\"]', 'GO':\n 'button[title = \"Run the selected action\"]', '搜索结果审核状态':\n 'td.field-state'}\n self.finds(locator=_elem['内容选中'])[0].click()\n self.find(locator=_elem['操作框']).click()\n self.find(locator=_elem['通过审核']).click()\n self.find(locator=_elem['GO']).click()\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n return res\n <function token>\n",
"<import token>\n\n\nclass Content(BasePage):\n\n def search(self, audit_state: int=None, shelve_state: int=None):\n \"\"\"\n 搜索\n :param audit_state: 2 正常,4 等待手动审核,7 手动审核拒绝\n :param shelve_state: 2 上架,3 即将上架,4 已下架\n :return: 审核状态\n \"\"\"\n self.log_name = 'Content.search'\n _elem = {'审核状态': 'span[title=\"审核状态\"]', '上架状态': 'span[title=\"上架状态\"]',\n '状态选择': 'li.select2-results__option', '搜索按钮':\n '//button[text()=\"Search\"]', '搜索结果审核状态': 'td.field-state',\n '搜索结果上架状态': 'td.field-upshelve_status'}\n self.find(locator=_elem['审核状态']).click()\n self.finds(locator=_elem['状态选择'])[audit_state].click()\n if inspect.stack()[1][3] != 'test_topic_discuss_search':\n self.find(locator=_elem['上架状态']).click()\n self.finds(locator=_elem['状态选择'])[shelve_state].click()\n self.find(By.XPATH, _elem['搜索按钮']).click()\n try:\n res = self.finds(locator=_elem['搜索结果审核状态'])[0].text\n except TypeError:\n return None\n else:\n return res\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass Content(BasePage):\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,302 |
d1d312dc34751afd9fc5c47daeb7344376af6812
|
"""
实现一个 MyCalendar 类来存放你的日程安排。如果要添加的时间内没有其他安排,则可以存储这个新的日程安排。
MyCalendar 有一个 book(int start, int end)方法。它意味着在 start 到 end 时间内增加一个日程安排,注意,这里的时间是半开区间,即 [start, end), 实数 x 的范围为, start <= x < end。
当两个日程安排有一些时间上的交叉时(例如两个日程安排都在同一时间内),就会产生重复预订。
每次调用 MyCalendar.book方法时,如果可以将日程安排成功添加到日历中而不会导致重复预订,返回 true。否则,返回 false 并且不要将该日程安排添加到日历中。
请按照以下步骤调用 MyCalendar 类: MyCalendar cal = new MyCalendar(); MyCalendar.book(start, end)
示例 1:
MyCalendar();
MyCalendar.book(10, 20); // returns true
MyCalendar.book(15, 25); // returns false
MyCalendar.book(20, 30); // returns true
解释:
第一个日程安排可以添加到日历中. 第二个日程安排不能添加到日历中,因为时间 15 已经被第一个日程安排预定了。
第三个日程安排可以添加到日历中,因为第一个日程安排并不包含时间 20 。
"""
# 2021.03.10 处理时间冲突,我是一流的
class MyCalendar:
def __init__(self):
self.books = []
def book(self, start: int, end: int) -> bool:
for i in self.books:
if end > i[0] and start < i[1]:
return False
self.books.append([start, end])
return True
# Your MyCalendar object will be instantiated and called as such:
# obj = MyCalendar()
# param_1 = obj.book(start,end)
# 2021.03.10 平衡树
# 反正我是没看懂
class Node:
__slots__ = 'start', 'end', 'left', 'right'
def __init__(self, start, end):
self.start = start
self.end = end
self.left = self.right = None
def insert(self, node):
if node.start >= self.end:
if not self.right:
self.right = node
return True
return self.right.insert(node)
elif node.end <= self.start:
if not self.left:
self.left = node
return True
return self.left.insert(node)
else:
return False
class MyCalendar2(object):
def __init__(self):
self.root = None
def book(self, start, end):
if self.root is None:
self.root = Node(start, end)
return True
return self.root.insert(Node(start, end))
|
[
"\"\"\"\n\n实现一个 MyCalendar 类来存放你的日程安排。如果要添加的时间内没有其他安排,则可以存储这个新的日程安排。\n\nMyCalendar 有一个 book(int start, int end)方法。它意味着在 start 到 end 时间内增加一个日程安排,注意,这里的时间是半开区间,即 [start, end), 实数 x 的范围为, start <= x < end。\n\n当两个日程安排有一些时间上的交叉时(例如两个日程安排都在同一时间内),就会产生重复预订。\n\n每次调用 MyCalendar.book方法时,如果可以将日程安排成功添加到日历中而不会导致重复预订,返回 true。否则,返回 false 并且不要将该日程安排添加到日历中。\n\n请按照以下步骤调用 MyCalendar 类: MyCalendar cal = new MyCalendar(); MyCalendar.book(start, end)\n\n示例 1:\n\nMyCalendar();\nMyCalendar.book(10, 20); // returns true\nMyCalendar.book(15, 25); // returns false\nMyCalendar.book(20, 30); // returns true\n解释: \n第一个日程安排可以添加到日历中. 第二个日程安排不能添加到日历中,因为时间 15 已经被第一个日程安排预定了。\n第三个日程安排可以添加到日历中,因为第一个日程安排并不包含时间 20 。\n\"\"\"\n\n# 2021.03.10 处理时间冲突,我是一流的\nclass MyCalendar:\n\n def __init__(self):\n self.books = []\n\n def book(self, start: int, end: int) -> bool:\n for i in self.books:\n if end > i[0] and start < i[1]:\n return False \n self.books.append([start, end])\n return True\n\n# Your MyCalendar object will be instantiated and called as such:\n# obj = MyCalendar()\n# param_1 = obj.book(start,end)\n\n\n# 2021.03.10 平衡树\n# 反正我是没看懂\nclass Node:\n __slots__ = 'start', 'end', 'left', 'right'\n def __init__(self, start, end):\n self.start = start\n self.end = end\n self.left = self.right = None\n\n def insert(self, node):\n if node.start >= self.end:\n if not self.right:\n self.right = node\n return True\n return self.right.insert(node)\n elif node.end <= self.start:\n if not self.left:\n self.left = node\n return True\n return self.left.insert(node)\n else:\n return False\n\nclass MyCalendar2(object):\n def __init__(self):\n self.root = None\n\n def book(self, start, end):\n if self.root is None:\n self.root = Node(start, end)\n return True\n return self.root.insert(Node(start, end))\n",
"<docstring token>\n\n\nclass MyCalendar:\n\n def __init__(self):\n self.books = []\n\n def book(self, start: int, end: int) ->bool:\n for i in self.books:\n if end > i[0] and start < i[1]:\n return False\n self.books.append([start, end])\n return True\n\n\nclass Node:\n __slots__ = 'start', 'end', 'left', 'right'\n\n def __init__(self, start, end):\n self.start = start\n self.end = end\n self.left = self.right = None\n\n def insert(self, node):\n if node.start >= self.end:\n if not self.right:\n self.right = node\n return True\n return self.right.insert(node)\n elif node.end <= self.start:\n if not self.left:\n self.left = node\n return True\n return self.left.insert(node)\n else:\n return False\n\n\nclass MyCalendar2(object):\n\n def __init__(self):\n self.root = None\n\n def book(self, start, end):\n if self.root is None:\n self.root = Node(start, end)\n return True\n return self.root.insert(Node(start, end))\n",
"<docstring token>\n\n\nclass MyCalendar:\n\n def __init__(self):\n self.books = []\n <function token>\n\n\nclass Node:\n __slots__ = 'start', 'end', 'left', 'right'\n\n def __init__(self, start, end):\n self.start = start\n self.end = end\n self.left = self.right = None\n\n def insert(self, node):\n if node.start >= self.end:\n if not self.right:\n self.right = node\n return True\n return self.right.insert(node)\n elif node.end <= self.start:\n if not self.left:\n self.left = node\n return True\n return self.left.insert(node)\n else:\n return False\n\n\nclass MyCalendar2(object):\n\n def __init__(self):\n self.root = None\n\n def book(self, start, end):\n if self.root is None:\n self.root = Node(start, end)\n return True\n return self.root.insert(Node(start, end))\n",
"<docstring token>\n\n\nclass MyCalendar:\n <function token>\n <function token>\n\n\nclass Node:\n __slots__ = 'start', 'end', 'left', 'right'\n\n def __init__(self, start, end):\n self.start = start\n self.end = end\n self.left = self.right = None\n\n def insert(self, node):\n if node.start >= self.end:\n if not self.right:\n self.right = node\n return True\n return self.right.insert(node)\n elif node.end <= self.start:\n if not self.left:\n self.left = node\n return True\n return self.left.insert(node)\n else:\n return False\n\n\nclass MyCalendar2(object):\n\n def __init__(self):\n self.root = None\n\n def book(self, start, end):\n if self.root is None:\n self.root = Node(start, end)\n return True\n return self.root.insert(Node(start, end))\n",
"<docstring token>\n<class token>\n\n\nclass Node:\n __slots__ = 'start', 'end', 'left', 'right'\n\n def __init__(self, start, end):\n self.start = start\n self.end = end\n self.left = self.right = None\n\n def insert(self, node):\n if node.start >= self.end:\n if not self.right:\n self.right = node\n return True\n return self.right.insert(node)\n elif node.end <= self.start:\n if not self.left:\n self.left = node\n return True\n return self.left.insert(node)\n else:\n return False\n\n\nclass MyCalendar2(object):\n\n def __init__(self):\n self.root = None\n\n def book(self, start, end):\n if self.root is None:\n self.root = Node(start, end)\n return True\n return self.root.insert(Node(start, end))\n",
"<docstring token>\n<class token>\n\n\nclass Node:\n <assignment token>\n\n def __init__(self, start, end):\n self.start = start\n self.end = end\n self.left = self.right = None\n\n def insert(self, node):\n if node.start >= self.end:\n if not self.right:\n self.right = node\n return True\n return self.right.insert(node)\n elif node.end <= self.start:\n if not self.left:\n self.left = node\n return True\n return self.left.insert(node)\n else:\n return False\n\n\nclass MyCalendar2(object):\n\n def __init__(self):\n self.root = None\n\n def book(self, start, end):\n if self.root is None:\n self.root = Node(start, end)\n return True\n return self.root.insert(Node(start, end))\n",
"<docstring token>\n<class token>\n\n\nclass Node:\n <assignment token>\n <function token>\n\n def insert(self, node):\n if node.start >= self.end:\n if not self.right:\n self.right = node\n return True\n return self.right.insert(node)\n elif node.end <= self.start:\n if not self.left:\n self.left = node\n return True\n return self.left.insert(node)\n else:\n return False\n\n\nclass MyCalendar2(object):\n\n def __init__(self):\n self.root = None\n\n def book(self, start, end):\n if self.root is None:\n self.root = Node(start, end)\n return True\n return self.root.insert(Node(start, end))\n",
"<docstring token>\n<class token>\n\n\nclass Node:\n <assignment token>\n <function token>\n <function token>\n\n\nclass MyCalendar2(object):\n\n def __init__(self):\n self.root = None\n\n def book(self, start, end):\n if self.root is None:\n self.root = Node(start, end)\n return True\n return self.root.insert(Node(start, end))\n",
"<docstring token>\n<class token>\n<class token>\n\n\nclass MyCalendar2(object):\n\n def __init__(self):\n self.root = None\n\n def book(self, start, end):\n if self.root is None:\n self.root = Node(start, end)\n return True\n return self.root.insert(Node(start, end))\n",
"<docstring token>\n<class token>\n<class token>\n\n\nclass MyCalendar2(object):\n <function token>\n\n def book(self, start, end):\n if self.root is None:\n self.root = Node(start, end)\n return True\n return self.root.insert(Node(start, end))\n",
"<docstring token>\n<class token>\n<class token>\n\n\nclass MyCalendar2(object):\n <function token>\n <function token>\n",
"<docstring token>\n<class token>\n<class token>\n<class token>\n"
] | false |
99,303 |
e36bb110da59364a82e8a123e3a440ab2a200d40
|
#!/usr/bin/env python3
# encoding=utf8
import argparse
import os
from time import time
from zipfile import ZipFile, BadZipFile
import shutil
from mylib.ext.ostk import clipboard as cb
from mylib.easy import ez_thread_factory
from mylib.__deprecated__ import list_files
from queue import Queue
ap = argparse.ArgumentParser()
ap.add_argument('-s', '--src', nargs='*')
ap.add_argument('-d', '--dest-dir')
ap.add_argument('-r', '--recursive')
args = ap.parse_args()
src = args.src
dest = args.dest_dir
recursive = args.recursive
print(f'-> {dest}')
q = Queue()
def progress():
w = shutil.get_terminal_size()[0] - 1
m = (w - 5) // 4
t0 = time()
while True:
p = q.get()
if p is None:
break
ps = f'{" " * w}\r{p[:m]} ... {p[-m:]}'
t1 = time()
if t1 - t0 > 0.2:
print(ps, end='\r')
t0 = t1
t = ez_thread_factory(daemon=True)(progress)
t.run()
files_l = list_files(src or cb, recursive=recursive, progress_queue=q)
x, y, z = 0, 0, 0
print()
for fp in files_l:
z += 1
try:
zf = ZipFile(fp)
except BadZipFile:
continue
y += 1
for f in zf.namelist():
if f.endswith('.webp'):
break
else:
zf.close()
dfp = os.path.join(dest, os.path.split(fp)[-1])
shutil.move(fp, dfp)
x += 1
print(f'* {fp}')
print(f'| {x} | {y} | {z} |', end='\r')
|
[
"#!/usr/bin/env python3\n# encoding=utf8\nimport argparse\nimport os\nfrom time import time\nfrom zipfile import ZipFile, BadZipFile\nimport shutil\nfrom mylib.ext.ostk import clipboard as cb\nfrom mylib.easy import ez_thread_factory\nfrom mylib.__deprecated__ import list_files\nfrom queue import Queue\n\nap = argparse.ArgumentParser()\nap.add_argument('-s', '--src', nargs='*')\nap.add_argument('-d', '--dest-dir')\nap.add_argument('-r', '--recursive')\nargs = ap.parse_args()\n\nsrc = args.src\ndest = args.dest_dir\nrecursive = args.recursive\n\nprint(f'-> {dest}')\nq = Queue()\n\n\ndef progress():\n w = shutil.get_terminal_size()[0] - 1\n m = (w - 5) // 4\n t0 = time()\n while True:\n p = q.get()\n if p is None:\n break\n ps = f'{\" \" * w}\\r{p[:m]} ... {p[-m:]}'\n t1 = time()\n if t1 - t0 > 0.2:\n print(ps, end='\\r')\n t0 = t1\n\n\nt = ez_thread_factory(daemon=True)(progress)\nt.run()\nfiles_l = list_files(src or cb, recursive=recursive, progress_queue=q)\nx, y, z = 0, 0, 0\nprint()\nfor fp in files_l:\n z += 1\n try:\n zf = ZipFile(fp)\n except BadZipFile:\n continue\n y += 1\n for f in zf.namelist():\n if f.endswith('.webp'):\n break\n else:\n zf.close()\n dfp = os.path.join(dest, os.path.split(fp)[-1])\n shutil.move(fp, dfp)\n x += 1\n print(f'* {fp}')\n print(f'| {x} | {y} | {z} |', end='\\r')\n",
"import argparse\nimport os\nfrom time import time\nfrom zipfile import ZipFile, BadZipFile\nimport shutil\nfrom mylib.ext.ostk import clipboard as cb\nfrom mylib.easy import ez_thread_factory\nfrom mylib.__deprecated__ import list_files\nfrom queue import Queue\nap = argparse.ArgumentParser()\nap.add_argument('-s', '--src', nargs='*')\nap.add_argument('-d', '--dest-dir')\nap.add_argument('-r', '--recursive')\nargs = ap.parse_args()\nsrc = args.src\ndest = args.dest_dir\nrecursive = args.recursive\nprint(f'-> {dest}')\nq = Queue()\n\n\ndef progress():\n w = shutil.get_terminal_size()[0] - 1\n m = (w - 5) // 4\n t0 = time()\n while True:\n p = q.get()\n if p is None:\n break\n ps = f\"{' ' * w}\\r{p[:m]} ... {p[-m:]}\"\n t1 = time()\n if t1 - t0 > 0.2:\n print(ps, end='\\r')\n t0 = t1\n\n\nt = ez_thread_factory(daemon=True)(progress)\nt.run()\nfiles_l = list_files(src or cb, recursive=recursive, progress_queue=q)\nx, y, z = 0, 0, 0\nprint()\nfor fp in files_l:\n z += 1\n try:\n zf = ZipFile(fp)\n except BadZipFile:\n continue\n y += 1\n for f in zf.namelist():\n if f.endswith('.webp'):\n break\n else:\n zf.close()\n dfp = os.path.join(dest, os.path.split(fp)[-1])\n shutil.move(fp, dfp)\n x += 1\n print(f'* {fp}')\n print(f'| {x} | {y} | {z} |', end='\\r')\n",
"<import token>\nap = argparse.ArgumentParser()\nap.add_argument('-s', '--src', nargs='*')\nap.add_argument('-d', '--dest-dir')\nap.add_argument('-r', '--recursive')\nargs = ap.parse_args()\nsrc = args.src\ndest = args.dest_dir\nrecursive = args.recursive\nprint(f'-> {dest}')\nq = Queue()\n\n\ndef progress():\n w = shutil.get_terminal_size()[0] - 1\n m = (w - 5) // 4\n t0 = time()\n while True:\n p = q.get()\n if p is None:\n break\n ps = f\"{' ' * w}\\r{p[:m]} ... {p[-m:]}\"\n t1 = time()\n if t1 - t0 > 0.2:\n print(ps, end='\\r')\n t0 = t1\n\n\nt = ez_thread_factory(daemon=True)(progress)\nt.run()\nfiles_l = list_files(src or cb, recursive=recursive, progress_queue=q)\nx, y, z = 0, 0, 0\nprint()\nfor fp in files_l:\n z += 1\n try:\n zf = ZipFile(fp)\n except BadZipFile:\n continue\n y += 1\n for f in zf.namelist():\n if f.endswith('.webp'):\n break\n else:\n zf.close()\n dfp = os.path.join(dest, os.path.split(fp)[-1])\n shutil.move(fp, dfp)\n x += 1\n print(f'* {fp}')\n print(f'| {x} | {y} | {z} |', end='\\r')\n",
"<import token>\n<assignment token>\nap.add_argument('-s', '--src', nargs='*')\nap.add_argument('-d', '--dest-dir')\nap.add_argument('-r', '--recursive')\n<assignment token>\nprint(f'-> {dest}')\n<assignment token>\n\n\ndef progress():\n w = shutil.get_terminal_size()[0] - 1\n m = (w - 5) // 4\n t0 = time()\n while True:\n p = q.get()\n if p is None:\n break\n ps = f\"{' ' * w}\\r{p[:m]} ... {p[-m:]}\"\n t1 = time()\n if t1 - t0 > 0.2:\n print(ps, end='\\r')\n t0 = t1\n\n\n<assignment token>\nt.run()\n<assignment token>\nprint()\nfor fp in files_l:\n z += 1\n try:\n zf = ZipFile(fp)\n except BadZipFile:\n continue\n y += 1\n for f in zf.namelist():\n if f.endswith('.webp'):\n break\n else:\n zf.close()\n dfp = os.path.join(dest, os.path.split(fp)[-1])\n shutil.move(fp, dfp)\n x += 1\n print(f'* {fp}')\n print(f'| {x} | {y} | {z} |', end='\\r')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef progress():\n w = shutil.get_terminal_size()[0] - 1\n m = (w - 5) // 4\n t0 = time()\n while True:\n p = q.get()\n if p is None:\n break\n ps = f\"{' ' * w}\\r{p[:m]} ... {p[-m:]}\"\n t1 = time()\n if t1 - t0 > 0.2:\n print(ps, end='\\r')\n t0 = t1\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,304 |
180cb9ef6f552bd477a548217f8acf539a92d4ca
|
"""
Provides serialization for API responses.
See `DRF serializer documentation <http://www.django-rest-framework.org/api-guide/serializers/>`_
Used by the View classes api/views.py to serialize API responses as JSON or HTML.
See DEFAULT_RENDERER_CLASSES setting in core.settings.contrib for the enabled renderers.
"""
# -*- coding: utf-8 -*-
import cPickle
import json
import logging
from rest_framework_gis import serializers as geo_serializers
from django.contrib.gis.geos import GEOSGeometry
from django.utils import timezone
from django.utils.translation import ugettext as _
from rest_framework import serializers
import validators
from jobs.models import (
ExportConfig, ExportFormat, Job, Region, RegionMask, Tag
)
from tasks.models import (
ExportRun, ExportTask, ExportTaskException, ExportTaskResult
)
try:
from collections import OrderedDict
# python 2.6
except ImportError:
from ordereddict import OrderedDict
# Get an instance of a logger
logger = logging.getLogger(__name__)
class TagSerializer(serializers.ModelSerializer):
"""Serialize the Tag model."""
class Meta:
model = Tag
fields = ('key', 'value', 'data_model', 'geom_types')
class SimpleExportConfigSerializer(serializers.Serializer):
"""Return a sub-set of ExportConfig model attributes."""
uid = serializers.UUIDField(read_only=True)
name = serializers.CharField()
config_type = serializers.CharField()
filename = serializers.CharField()
published = serializers.BooleanField()
created = serializers.SerializerMethodField()
url = serializers.HyperlinkedIdentityField(
view_name='api:configs-detail',
lookup_field='uid'
)
def get_created(self, obj):
return obj.created_at
class ExportConfigSerializer(serializers.Serializer):
"""Return the full set of ExportConfig model attributes."""
uid = serializers.UUIDField(read_only=True)
url = serializers.HyperlinkedIdentityField(
view_name='api:configs-detail',
lookup_field='uid'
)
name = serializers.CharField(max_length=255)
config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION', 'TRANSFORM'])
filename = serializers.CharField(max_length=255, read_only=True, default='')
size = serializers.SerializerMethodField()
content_type = serializers.CharField(max_length=50, read_only=True)
upload = serializers.FileField(allow_empty_file=False, max_length=100)
published = serializers.BooleanField()
created = serializers.SerializerMethodField()
owner = serializers.SerializerMethodField(read_only=True)
user = serializers.HiddenField(
default=serializers.CurrentUserDefault()
)
def create(self, validated_data):
"""Create an ExportConfig instance."""
return ExportConfig.objects.create(**validated_data)
def update(self, instance, validated_data):
"""Update an ExportConfig instance."""
instance.config_type = validated_data.get('config_type', instance.config_type)
instance.upload.delete(False) # delete the old file..
instance.upload = validated_data.get('upload', instance.upload)
instance.name = validated_data.get('name', instance.name)
instance.filename = validated_data.get('filename', instance.filename)
instance.content_type = validated_data.get('content_type', instance.content_type)
instance.updated_at = timezone.now()
instance.save()
return instance
def validate(self, data):
"""Validate the form data."""
logger.debug(data)
upload = data['upload']
config_type = data['config_type']
content_type = validators.validate_content_type(upload, config_type)
if config_type == 'PRESET':
validators.validate_preset(upload)
data['content_type'] = content_type
fname = data['upload'].name
data['filename'] = fname.replace(' ', '_').lower()
return data
def get_size(self, obj):
size = obj.upload.size
return size
def get_created(self, obj):
return obj.created_at
def get_owner(self, obj):
return obj.user.username
class ExportTaskResultSerializer(serializers.ModelSerializer):
"""Serialize ExportTaskResult models."""
url = serializers.SerializerMethodField()
size = serializers.SerializerMethodField()
class Meta:
model = ExportTaskResult
fields = ('filename', 'size', 'url',)
def get_url(self, obj):
request = self.context['request']
return request.build_absolute_uri(obj.download_url)
def get_size(self, obj):
return "{0:.3f} MB".format(obj.size)
class ExportTaskExceptionSerializer(serializers.ModelSerializer):
"""Serialize ExportTaskExceptions."""
exception = serializers.SerializerMethodField()
class Meta:
model = ExportTaskException
fields = ('exception',)
def get_exception(self, obj):
exc_info = cPickle.loads(str(obj.exception)).exc_info
return str(exc_info[1])
class ExportTaskSerializer(serializers.ModelSerializer):
"""Serialize ExportTasks models."""
result = serializers.SerializerMethodField()
errors = serializers.SerializerMethodField()
started_at = serializers.SerializerMethodField()
finished_at = serializers.SerializerMethodField()
duration = serializers.SerializerMethodField()
url = serializers.HyperlinkedIdentityField(
view_name='api:tasks-detail',
lookup_field='uid'
)
class Meta:
model = ExportTask
fields = ('uid', 'url', 'name', 'status', 'started_at', 'finished_at', 'duration', 'result', 'errors',)
def get_result(self, obj):
"""Serialize the ExportTaskResult for this ExportTask."""
try:
result = obj.result
serializer = ExportTaskResultSerializer(result, many=False, context=self.context)
return serializer.data
except ExportTaskResult.DoesNotExist as e:
return None # no result yet
def get_errors(self, obj):
"""Serialize the ExportTaskExceptions for this ExportTask."""
try:
errors = obj.exceptions
serializer = ExportTaskExceptionSerializer(errors, many=True, context=self.context)
return serializer.data
except ExportTaskException.DoesNotExist as e:
return None
def get_started_at(self, obj):
if (not obj.started_at):
return None # not started yet
else:
return obj.started_at
def get_finished_at(self, obj):
if (not obj.finished_at):
return None # not finished yet
else:
return obj.finished_at
def get_duration(self, obj):
"""Get the duration for this ExportTask."""
started = obj.started_at
finished = obj.finished_at
if started and finished:
return str(finished - started)
else:
return None # can't compute yet
class SimpleJobSerializer(serializers.Serializer):
"""Return a sub-set of Job model attributes."""
uid = serializers.SerializerMethodField()
name = serializers.CharField()
description = serializers.CharField()
url = serializers.HyperlinkedIdentityField(
view_name='api:jobs-detail',
lookup_field='uid'
)
extent = serializers.SerializerMethodField()
def get_uid(self, obj):
return obj.uid
def get_extent(self, obj):
"""Return the Job's extent as a GeoJSON Feature."""
uid = str(obj.uid)
name = obj.name
geom = obj.the_geom
geometry = json.loads(GEOSGeometry(geom).geojson)
feature = OrderedDict()
feature['type'] = 'Feature'
feature['properties'] = {'uid': uid, 'name': name}
feature['geometry'] = geometry
return feature
class ExportRunSerializer(serializers.ModelSerializer):
"""Serialize ExportRun."""
url = serializers.HyperlinkedIdentityField(
view_name='api:runs-detail',
lookup_field='uid'
)
job = SimpleJobSerializer() # nest the job details
tasks = ExportTaskSerializer(many=True)
finished_at = serializers.SerializerMethodField()
duration = serializers.SerializerMethodField()
user = serializers.SerializerMethodField()
class Meta:
model = ExportRun
fields = ('uid', 'url', 'started_at', 'finished_at', 'duration', 'user', 'status', 'job', 'tasks')
def get_finished_at(self, obj):
if (not obj.finished_at):
return {}
else:
return obj.finished_at
def get_duration(self, obj):
"""Return the duration of the the run."""
started = obj.started_at
finished = obj.finished_at
if started and finished:
return str(finished - started)
else:
return None
def get_user(self, obj):
return obj.user.username
class UserSerializer(serializers.Serializer):
id = serializers.IntegerField()
class RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):
"""Return a GeoJSON representation of the region mask."""
class Meta:
model = RegionMask
geo_field = 'the_geom'
fields = ('the_geom',)
class RegionSerializer(geo_serializers.GeoFeatureModelSerializer):
"""Serializer returning GeoJSON representation of Regions."""
url = serializers.HyperlinkedIdentityField(
view_name='api:regions-detail',
lookup_field='uid'
)
id = serializers.SerializerMethodField()
class Meta:
model = Region
geo_field = 'the_geom'
fields = ('id', 'uid', 'name', 'description', 'url', 'the_geom')
def get_id(self, obj):
return obj.uid
class SimpleRegionSerializer(serializers.ModelSerializer):
"""Serializer for returning Region model data without geometry."""
url = serializers.HyperlinkedIdentityField(
view_name='api:regions-detail',
lookup_field='uid'
)
class Meta:
model = Region
fields = ('uid', 'name', 'description', 'url')
class ExportFormatSerializer(serializers.ModelSerializer):
"""Return a representation of the ExportFormat model."""
url = serializers.HyperlinkedIdentityField(
view_name='api:formats-detail',
lookup_field='slug'
)
class Meta:
model = ExportFormat
fields = ('uid', 'url', 'slug', 'name', 'description')
class ListJobSerializer(serializers.Serializer):
"""
Return a sub-set of Job model attributes.
Provides a stripped down set of export attributes.
Removes the selected Tags from the Job representation.
Used to display the list of exports in the export browser
where tag info is not required.
"""
uid = serializers.SerializerMethodField()
url = serializers.HyperlinkedIdentityField(
view_name='api:jobs-detail',
lookup_field='uid'
)
name = serializers.CharField()
description = serializers.CharField()
event = serializers.CharField()
created_at = serializers.DateTimeField(read_only=True)
owner = serializers.SerializerMethodField(read_only=True)
extent = serializers.SerializerMethodField()
region = SimpleRegionSerializer(read_only=True)
published = serializers.BooleanField()
def get_uid(self, obj):
return obj.uid
def get_extent(self, obj):
"""Return the export extent as a GeoJSON Feature."""
uid = str(obj.uid)
name = obj.name
geom = obj.the_geom
geometry = json.loads(GEOSGeometry(geom).geojson)
feature = OrderedDict()
feature['type'] = 'Feature'
feature['properties'] = {'uid': uid, 'name': name}
feature['geometry'] = geometry
return feature
def get_owner(self, obj):
return obj.user.username
class JobSerializer(serializers.Serializer):
"""
Return a full representation of an export Job.
This is the core representation of the API.
"""
"""
List of the available Export Formats.
This list should be updated to add support for
additional export formats.
"""
EXPORT_FORMAT_CHOICES = (
('shp', 'Shapefile Format'),
('obf', 'OBF Format'),
('kml', 'KML Format'),
('garmin', 'Garmin Format'),
('sqlite', 'SQLITE Format'),
('thematic', 'Thematic Shapefile Format')
)
formats = serializers.MultipleChoiceField(
choices=EXPORT_FORMAT_CHOICES,
allow_blank=False,
write_only=True,
error_messages={
'invalid_choice': _("invalid export format."),
'not_a_list': _('Expected a list of items but got type "{input_type}".')
}
)
uid = serializers.UUIDField(read_only=True)
url = serializers.HyperlinkedIdentityField(
view_name='api:jobs-detail',
lookup_field='uid'
)
name = serializers.CharField(
max_length=100,
)
description = serializers.CharField(
max_length=255,
)
event = serializers.CharField(
max_length=100,
allow_blank=True,
required=False
)
created_at = serializers.DateTimeField(read_only=True)
owner = serializers.SerializerMethodField(read_only=True)
exports = serializers.SerializerMethodField()
configurations = serializers.SerializerMethodField()
published = serializers.BooleanField(required=False)
feature_save = serializers.BooleanField(required=False)
feature_pub = serializers.BooleanField(required=False)
xmin = serializers.FloatField(
max_value=180, min_value=-180, write_only=True,
error_messages={
'required': _('xmin is required.'),
'invalid': _('invalid xmin value.'),
}
)
ymin = serializers.FloatField(
max_value=90, min_value=-90, write_only=True,
error_messages={
'required': _('ymin is required.'),
'invalid': _('invalid ymin value.'),
}
)
xmax = serializers.FloatField(
max_value=180, min_value=-180, write_only=True,
error_messages={
'required': _('xmax is required.'),
'invalid': _('invalid xmax value.'),
}
)
ymax = serializers.FloatField(
max_value=90, min_value=-90, write_only=True,
error_messages={
'required': _('ymax is required.'),
'invalid': _('invalid ymax value.'),
}
)
region = SimpleRegionSerializer(read_only=True)
extent = serializers.SerializerMethodField(read_only=True)
user = serializers.HiddenField(
default=serializers.CurrentUserDefault()
)
tags = serializers.SerializerMethodField()
def create(self, validated_data):
"""Creates an export Job."""
return Job.objects.create(**validated_data)
def update(self, instance, validated_data):
"""Not implemented as Jobs are cloned rather than updated."""
pass
def validate(self, data):
"""
Validates the data submitted during Job creation.
See api/validators.py for validation code.
"""
user = data['user']
validators.validate_formats(data)
extents = validators.validate_bbox_params(data)
the_geom = validators.validate_bbox(extents, user=user)
data['the_geom'] = the_geom
regions = Region.objects.filter(the_geom__intersects=the_geom).intersection(the_geom, field_name='the_geom')
# sort the returned regions by area of intersection, largest first.
sorted_regions = sorted(regions.all(), key=lambda a: a.intersection.area, reverse=True)
data['region'] = validators.validate_region(sorted_regions)
# remove unwanted fields, these are pulled from the request in the view if the serializer is valid
data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'), data.pop('formats')
return data
def get_extent(self, obj):
"""Return the export extent as a GeoJSON Feature."""
uid = str(obj.uid)
name = obj.name
geom = obj.the_geom
geometry = json.loads(GEOSGeometry(geom).geojson)
feature = OrderedDict()
feature['type'] = 'Feature'
feature['properties'] = {'uid': uid, 'name': name}
feature['geometry'] = geometry
return feature
def get_exports(self, obj):
"""Return the export formats selected for this export."""
formats = [format for format in obj.formats.all()]
serializer = ExportFormatSerializer(formats, many=True, context={'request': self.context['request']})
return serializer.data
def get_configurations(self, obj):
"""Return the configurations selected for this export."""
configs = obj.configs.all()
serializer = SimpleExportConfigSerializer(configs, many=True,
context={'request': self.context['request']})
return serializer.data
def get_tags(self, obj):
"""Return the Tags selected for this export."""
tags = obj.tags.all()
serializer = TagSerializer(tags, many=True)
return serializer.data
def get_owner(self, obj):
"""Return the username for the owner of this export."""
return obj.user.username
|
[
"\"\"\"\nProvides serialization for API responses.\n\nSee `DRF serializer documentation <http://www.django-rest-framework.org/api-guide/serializers/>`_\nUsed by the View classes api/views.py to serialize API responses as JSON or HTML.\nSee DEFAULT_RENDERER_CLASSES setting in core.settings.contrib for the enabled renderers.\n\"\"\"\n# -*- coding: utf-8 -*-\nimport cPickle\nimport json\nimport logging\n\nfrom rest_framework_gis import serializers as geo_serializers\n\nfrom django.contrib.gis.geos import GEOSGeometry\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\n\nfrom rest_framework import serializers\n\nimport validators\nfrom jobs.models import (\n ExportConfig, ExportFormat, Job, Region, RegionMask, Tag\n)\nfrom tasks.models import (\n ExportRun, ExportTask, ExportTaskException, ExportTaskResult\n)\n\ntry:\n from collections import OrderedDict\n# python 2.6\nexcept ImportError:\n from ordereddict import OrderedDict\n\n# Get an instance of a logger\nlogger = logging.getLogger(__name__)\n\n\nclass TagSerializer(serializers.ModelSerializer):\n \"\"\"Serialize the Tag model.\"\"\"\n class Meta:\n model = Tag\n fields = ('key', 'value', 'data_model', 'geom_types')\n\n\nclass SimpleExportConfigSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n name = serializers.CharField()\n config_type = serializers.CharField()\n filename = serializers.CharField()\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(\n view_name='api:configs-detail',\n lookup_field='uid'\n )\n\n def get_created(self, obj):\n return obj.created_at\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n \"\"\"Return the full set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(\n view_name='api:configs-detail',\n lookup_field='uid'\n )\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION', 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default='')\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(\n default=serializers.CurrentUserDefault()\n )\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.config_type)\n instance.upload.delete(False) # delete the old file..\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance.content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n class Meta:\n model = ExportTaskResult\n fields = ('filename', 'size', 'url',)\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return \"{0:.3f} MB\".format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n class Meta:\n model = ExportTaskException\n fields = ('exception',)\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(\n view_name='api:tasks-detail',\n lookup_field='uid'\n )\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at', 'finished_at', 'duration', 'result', 'errors',)\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False, context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None # no result yet\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True, context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if (not obj.started_at):\n return None # not started yet\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if (not obj.finished_at):\n return None # not finished yet\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None # can't compute yet\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(\n view_name='api:jobs-detail',\n lookup_field='uid'\n )\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(\n view_name='api:runs-detail',\n lookup_field='uid'\n )\n job = SimpleJobSerializer() # nest the job details\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration', 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if (not obj.finished_at):\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = ('the_geom',)\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(\n view_name='api:regions-detail',\n lookup_field='uid'\n )\n id = serializers.SerializerMethodField()\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = ('id', 'uid', 'name', 'description', 'url', 'the_geom')\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(\n view_name='api:regions-detail',\n lookup_field='uid'\n )\n\n class Meta:\n model = Region\n fields = ('uid', 'name', 'description', 'url')\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(\n view_name='api:formats-detail',\n lookup_field='slug'\n )\n\n class Meta:\n model = ExportFormat\n fields = ('uid', 'url', 'slug', 'name', 'description')\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(\n view_name='api:jobs-detail',\n lookup_field='uid'\n )\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = (\n ('shp', 'Shapefile Format'),\n ('obf', 'OBF Format'),\n ('kml', 'KML Format'),\n ('garmin', 'Garmin Format'),\n ('sqlite', 'SQLITE Format'),\n ('thematic', 'Thematic Shapefile Format')\n )\n\n formats = serializers.MultipleChoiceField(\n choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False,\n write_only=True,\n error_messages={\n 'invalid_choice': _(\"invalid export format.\"),\n 'not_a_list': _('Expected a list of items but got type \"{input_type}\".')\n }\n )\n\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(\n view_name='api:jobs-detail',\n lookup_field='uid'\n )\n name = serializers.CharField(\n max_length=100,\n )\n description = serializers.CharField(\n max_length=255,\n )\n event = serializers.CharField(\n max_length=100,\n allow_blank=True,\n required=False\n )\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(\n max_value=180, min_value=-180, write_only=True,\n error_messages={\n 'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.'),\n }\n )\n ymin = serializers.FloatField(\n max_value=90, min_value=-90, write_only=True,\n error_messages={\n 'required': _('ymin is required.'),\n 'invalid': _('invalid ymin value.'),\n }\n )\n xmax = serializers.FloatField(\n max_value=180, min_value=-180, write_only=True,\n error_messages={\n 'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.'),\n }\n )\n ymax = serializers.FloatField(\n max_value=90, min_value=-90, write_only=True,\n error_messages={\n 'required': _('ymax is required.'),\n 'invalid': _('invalid ymax value.'),\n }\n )\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(\n default=serializers.CurrentUserDefault()\n )\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom).intersection(the_geom, field_name='the_geom')\n # sort the returned regions by area of intersection, largest first.\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection.area, reverse=True) \n data['region'] = validators.validate_region(sorted_regions)\n # remove unwanted fields, these are pulled from the request in the view if the serializer is valid\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\nimport cPickle\nimport json\nimport logging\nfrom rest_framework_gis import serializers as geo_serializers\nfrom django.contrib.gis.geos import GEOSGeometry\nfrom django.utils import timezone\nfrom django.utils.translation import ugettext as _\nfrom rest_framework import serializers\nimport validators\nfrom jobs.models import ExportConfig, ExportFormat, Job, Region, RegionMask, Tag\nfrom tasks.models import ExportRun, ExportTask, ExportTaskException, ExportTaskResult\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\nlogger = logging.getLogger(__name__)\n\n\nclass TagSerializer(serializers.ModelSerializer):\n \"\"\"Serialize the Tag model.\"\"\"\n\n\n class Meta:\n model = Tag\n fields = 'key', 'value', 'data_model', 'geom_types'\n\n\nclass SimpleExportConfigSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n name = serializers.CharField()\n config_type = serializers.CharField()\n filename = serializers.CharField()\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n\n def get_created(self, obj):\n return obj.created_at\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n \"\"\"Return the full set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION',\n 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default=''\n )\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\nlogger = logging.getLogger(__name__)\n\n\nclass TagSerializer(serializers.ModelSerializer):\n \"\"\"Serialize the Tag model.\"\"\"\n\n\n class Meta:\n model = Tag\n fields = 'key', 'value', 'data_model', 'geom_types'\n\n\nclass SimpleExportConfigSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n name = serializers.CharField()\n config_type = serializers.CharField()\n filename = serializers.CharField()\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n\n def get_created(self, obj):\n return obj.created_at\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n \"\"\"Return the full set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION',\n 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default=''\n )\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n<assignment token>\n\n\nclass TagSerializer(serializers.ModelSerializer):\n \"\"\"Serialize the Tag model.\"\"\"\n\n\n class Meta:\n model = Tag\n fields = 'key', 'value', 'data_model', 'geom_types'\n\n\nclass SimpleExportConfigSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n name = serializers.CharField()\n config_type = serializers.CharField()\n filename = serializers.CharField()\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n\n def get_created(self, obj):\n return obj.created_at\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n \"\"\"Return the full set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION',\n 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default=''\n )\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n\n\nclass TagSerializer(serializers.ModelSerializer):\n \"\"\"Serialize the Tag model.\"\"\"\n\n\n class Meta:\n model = Tag\n fields = 'key', 'value', 'data_model', 'geom_types'\n\n\nclass SimpleExportConfigSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n name = serializers.CharField()\n config_type = serializers.CharField()\n filename = serializers.CharField()\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n\n def get_created(self, obj):\n return obj.created_at\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n \"\"\"Return the full set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION',\n 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default=''\n )\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n\n\nclass TagSerializer(serializers.ModelSerializer):\n <docstring token>\n\n\n class Meta:\n model = Tag\n fields = 'key', 'value', 'data_model', 'geom_types'\n\n\nclass SimpleExportConfigSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n name = serializers.CharField()\n config_type = serializers.CharField()\n filename = serializers.CharField()\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n\n def get_created(self, obj):\n return obj.created_at\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n \"\"\"Return the full set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION',\n 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default=''\n )\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass SimpleExportConfigSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n name = serializers.CharField()\n config_type = serializers.CharField()\n filename = serializers.CharField()\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n\n def get_created(self, obj):\n return obj.created_at\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n \"\"\"Return the full set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION',\n 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default=''\n )\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass SimpleExportConfigSerializer(serializers.Serializer):\n <docstring token>\n uid = serializers.UUIDField(read_only=True)\n name = serializers.CharField()\n config_type = serializers.CharField()\n filename = serializers.CharField()\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n\n def get_created(self, obj):\n return obj.created_at\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n \"\"\"Return the full set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION',\n 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default=''\n )\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass SimpleExportConfigSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_created(self, obj):\n return obj.created_at\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n \"\"\"Return the full set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION',\n 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default=''\n )\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n\n\nclass SimpleExportConfigSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n \"\"\"Return the full set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION',\n 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default=''\n )\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n \"\"\"Return the full set of ExportConfig model attributes.\"\"\"\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION',\n 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default=''\n )\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n <docstring token>\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:configs-detail', lookup_field='uid')\n name = serializers.CharField(max_length=255)\n config_type = serializers.ChoiceField(['PRESET', 'TRANSLATION',\n 'TRANSFORM'])\n filename = serializers.CharField(max_length=255, read_only=True, default=''\n )\n size = serializers.SerializerMethodField()\n content_type = serializers.CharField(max_length=50, read_only=True)\n upload = serializers.FileField(allow_empty_file=False, max_length=100)\n published = serializers.BooleanField()\n created = serializers.SerializerMethodField()\n owner = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Update an ExportConfig instance.\"\"\"\n instance.config_type = validated_data.get('config_type', instance.\n config_type)\n instance.upload.delete(False)\n instance.upload = validated_data.get('upload', instance.upload)\n instance.name = validated_data.get('name', instance.name)\n instance.filename = validated_data.get('filename', instance.filename)\n instance.content_type = validated_data.get('content_type', instance\n .content_type)\n instance.updated_at = timezone.now()\n instance.save()\n return instance\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n <function token>\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n <function token>\n\n def validate(self, data):\n \"\"\"Validate the form data.\"\"\"\n logger.debug(data)\n upload = data['upload']\n config_type = data['config_type']\n content_type = validators.validate_content_type(upload, config_type)\n if config_type == 'PRESET':\n validators.validate_preset(upload)\n data['content_type'] = content_type\n fname = data['upload'].name\n data['filename'] = fname.replace(' ', '_').lower()\n return data\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n <function token>\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def create(self, validated_data):\n \"\"\"Create an ExportConfig instance.\"\"\"\n return ExportConfig.objects.create(**validated_data)\n <function token>\n <function token>\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n <function token>\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n def get_size(self, obj):\n size = obj.upload.size\n return size\n\n def get_created(self, obj):\n return obj.created_at\n <function token>\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def get_created(self, obj):\n return obj.created_at\n <function token>\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass ExportConfigSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskResult models.\"\"\"\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n <docstring token>\n url = serializers.SerializerMethodField()\n size = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n\n def get_url(self, obj):\n request = self.context['request']\n return request.build_absolute_uri(obj.download_url)\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n <function token>\n\n def get_size(self, obj):\n return '{0:.3f} MB'.format(obj.size)\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskResultSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportTaskResult\n fields = 'filename', 'size', 'url'\n <function token>\n <function token>\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTaskExceptions.\"\"\"\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n <docstring token>\n exception = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n\n def get_exception(self, obj):\n exc_info = cPickle.loads(str(obj.exception)).exc_info\n return str(exc_info[1])\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskExceptionSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n\n\n class Meta:\n model = ExportTaskException\n fields = 'exception',\n <function token>\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportTasks models.\"\"\"\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n <docstring token>\n result = serializers.SerializerMethodField()\n errors = serializers.SerializerMethodField()\n started_at = serializers.SerializerMethodField()\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:tasks-detail',\n lookup_field='uid')\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n\n def get_result(self, obj):\n \"\"\"Serialize the ExportTaskResult for this ExportTask.\"\"\"\n try:\n result = obj.result\n serializer = ExportTaskResultSerializer(result, many=False,\n context=self.context)\n return serializer.data\n except ExportTaskResult.DoesNotExist as e:\n return None\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n <function token>\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Get the duration for this ExportTask.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n <function token>\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return None\n else:\n return obj.finished_at\n <function token>\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n <function token>\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n\n def get_started_at(self, obj):\n if not obj.started_at:\n return None\n else:\n return obj.started_at\n <function token>\n <function token>\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n <function token>\n\n def get_errors(self, obj):\n \"\"\"Serialize the ExportTaskExceptions for this ExportTask.\"\"\"\n try:\n errors = obj.exceptions\n serializer = ExportTaskExceptionSerializer(errors, many=True,\n context=self.context)\n return serializer.data\n except ExportTaskException.DoesNotExist as e:\n return None\n <function token>\n <function token>\n <function token>\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportTaskSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportTask\n fields = ('uid', 'url', 'name', 'status', 'started_at',\n 'finished_at', 'duration', 'result', 'errors')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n \"\"\"Return a sub-set of Job model attributes.\"\"\"\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n <docstring token>\n uid = serializers.SerializerMethodField()\n name = serializers.CharField()\n description = serializers.CharField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n extent = serializers.SerializerMethodField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the Job's extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_uid(self, obj):\n return obj.uid\n <function token>\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SimpleJobSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n \"\"\"Serialize ExportRun.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n <docstring token>\n url = serializers.HyperlinkedIdentityField(view_name='api:runs-detail',\n lookup_field='uid')\n job = SimpleJobSerializer()\n tasks = ExportTaskSerializer(many=True)\n finished_at = serializers.SerializerMethodField()\n duration = serializers.SerializerMethodField()\n user = serializers.SerializerMethodField()\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n\n def get_finished_at(self, obj):\n if not obj.finished_at:\n return {}\n else:\n return obj.finished_at\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n <function token>\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n\n def get_user(self, obj):\n return obj.user.username\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n <function token>\n\n def get_duration(self, obj):\n \"\"\"Return the duration of the the run.\"\"\"\n started = obj.started_at\n finished = obj.finished_at\n if started and finished:\n return str(finished - started)\n else:\n return None\n <function token>\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportRunSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = ExportRun\n fields = ('uid', 'url', 'started_at', 'finished_at', 'duration',\n 'user', 'status', 'job', 'tasks')\n <function token>\n <function token>\n <function token>\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass UserSerializer(serializers.Serializer):\n id = serializers.IntegerField()\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass UserSerializer(serializers.Serializer):\n <assignment token>\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Return a GeoJSON representation of the region mask.\"\"\"\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegionMaskSerializer(geo_serializers.GeoFeatureModelSerializer):\n <docstring token>\n\n\n class Meta:\n model = RegionMask\n geo_field = 'the_geom'\n fields = 'the_geom',\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n \"\"\"Serializer returning GeoJSON representation of Regions.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n <docstring token>\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n id = serializers.SerializerMethodField()\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n\n def get_id(self, obj):\n return obj.uid\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegionSerializer(geo_serializers.GeoFeatureModelSerializer):\n <docstring token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = Region\n geo_field = 'the_geom'\n fields = 'id', 'uid', 'name', 'description', 'url', 'the_geom'\n <function token>\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer for returning Region model data without geometry.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n <docstring token>\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:regions-detail', lookup_field='uid')\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass SimpleRegionSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n\n\n class Meta:\n model = Region\n fields = 'uid', 'name', 'description', 'url'\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n \"\"\"Return a representation of the ExportFormat model.\"\"\"\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n <docstring token>\n url = serializers.HyperlinkedIdentityField(view_name=\n 'api:formats-detail', lookup_field='slug')\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ExportFormatSerializer(serializers.ModelSerializer):\n <docstring token>\n <assignment token>\n\n\n class Meta:\n model = ExportFormat\n fields = 'uid', 'url', 'slug', 'name', 'description'\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ListJobSerializer(serializers.Serializer):\n \"\"\"\n Return a sub-set of Job model attributes.\n\n Provides a stripped down set of export attributes.\n Removes the selected Tags from the Job representation.\n Used to display the list of exports in the export browser\n where tag info is not required.\n \"\"\"\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ListJobSerializer(serializers.Serializer):\n <docstring token>\n uid = serializers.SerializerMethodField()\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField()\n description = serializers.CharField()\n event = serializers.CharField()\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n extent = serializers.SerializerMethodField()\n region = SimpleRegionSerializer(read_only=True)\n published = serializers.BooleanField()\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ListJobSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_uid(self, obj):\n return obj.uid\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ListJobSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ListJobSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def get_owner(self, obj):\n return obj.user.username\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass ListJobSerializer(serializers.Serializer):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JobSerializer(serializers.Serializer):\n \"\"\"\n Return a full representation of an export Job.\n\n This is the core representation of the API.\n \"\"\"\n \"\"\"\n List of the available Export Formats.\n \n This list should be updated to add support for\n additional export formats.\n \"\"\"\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JobSerializer(serializers.Serializer):\n <docstring token>\n <docstring token>\n EXPORT_FORMAT_CHOICES = ('shp', 'Shapefile Format'), ('obf', 'OBF Format'\n ), ('kml', 'KML Format'), ('garmin', 'Garmin Format'), ('sqlite',\n 'SQLITE Format'), ('thematic', 'Thematic Shapefile Format')\n formats = serializers.MultipleChoiceField(choices=EXPORT_FORMAT_CHOICES,\n allow_blank=False, write_only=True, error_messages={\n 'invalid_choice': _('invalid export format.'), 'not_a_list': _(\n 'Expected a list of items but got type \"{input_type}\".')})\n uid = serializers.UUIDField(read_only=True)\n url = serializers.HyperlinkedIdentityField(view_name='api:jobs-detail',\n lookup_field='uid')\n name = serializers.CharField(max_length=100)\n description = serializers.CharField(max_length=255)\n event = serializers.CharField(max_length=100, allow_blank=True,\n required=False)\n created_at = serializers.DateTimeField(read_only=True)\n owner = serializers.SerializerMethodField(read_only=True)\n exports = serializers.SerializerMethodField()\n configurations = serializers.SerializerMethodField()\n published = serializers.BooleanField(required=False)\n feature_save = serializers.BooleanField(required=False)\n feature_pub = serializers.BooleanField(required=False)\n xmin = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmin is required.'),\n 'invalid': _('invalid xmin value.')})\n ymin = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymin is required.'), 'invalid':\n _('invalid ymin value.')})\n xmax = serializers.FloatField(max_value=180, min_value=-180, write_only\n =True, error_messages={'required': _('xmax is required.'),\n 'invalid': _('invalid xmax value.')})\n ymax = serializers.FloatField(max_value=90, min_value=-90, write_only=\n True, error_messages={'required': _('ymax is required.'), 'invalid':\n _('invalid ymax value.')})\n region = SimpleRegionSerializer(read_only=True)\n extent = serializers.SerializerMethodField(read_only=True)\n user = serializers.HiddenField(default=serializers.CurrentUserDefault())\n tags = serializers.SerializerMethodField()\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JobSerializer(serializers.Serializer):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n\n def update(self, instance, validated_data):\n \"\"\"Not implemented as Jobs are cloned rather than updated.\"\"\"\n pass\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JobSerializer(serializers.Serializer):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n <function token>\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n\n def get_extent(self, obj):\n \"\"\"Return the export extent as a GeoJSON Feature.\"\"\"\n uid = str(obj.uid)\n name = obj.name\n geom = obj.the_geom\n geometry = json.loads(GEOSGeometry(geom).geojson)\n feature = OrderedDict()\n feature['type'] = 'Feature'\n feature['properties'] = {'uid': uid, 'name': name}\n feature['geometry'] = geometry\n return feature\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JobSerializer(serializers.Serializer):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n <function token>\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n <function token>\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n\n def get_owner(self, obj):\n \"\"\"Return the username for the owner of this export.\"\"\"\n return obj.user.username\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JobSerializer(serializers.Serializer):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n <function token>\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n <function token>\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n\n def get_tags(self, obj):\n \"\"\"Return the Tags selected for this export.\"\"\"\n tags = obj.tags.all()\n serializer = TagSerializer(tags, many=True)\n return serializer.data\n <function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JobSerializer(serializers.Serializer):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def create(self, validated_data):\n \"\"\"Creates an export Job.\"\"\"\n return Job.objects.create(**validated_data)\n <function token>\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n <function token>\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JobSerializer(serializers.Serializer):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n <function token>\n\n def get_exports(self, obj):\n \"\"\"Return the export formats selected for this export.\"\"\"\n formats = [format for format in obj.formats.all()]\n serializer = ExportFormatSerializer(formats, many=True, context={\n 'request': self.context['request']})\n return serializer.data\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JobSerializer(serializers.Serializer):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n <function token>\n <function token>\n\n def get_configurations(self, obj):\n \"\"\"Return the configurations selected for this export.\"\"\"\n configs = obj.configs.all()\n serializer = SimpleExportConfigSerializer(configs, many=True,\n context={'request': self.context['request']})\n return serializer.data\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JobSerializer(serializers.Serializer):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def validate(self, data):\n \"\"\"\n Validates the data submitted during Job creation.\n\n See api/validators.py for validation code.\n \"\"\"\n user = data['user']\n validators.validate_formats(data)\n extents = validators.validate_bbox_params(data)\n the_geom = validators.validate_bbox(extents, user=user)\n data['the_geom'] = the_geom\n regions = Region.objects.filter(the_geom__intersects=the_geom\n ).intersection(the_geom, field_name='the_geom')\n sorted_regions = sorted(regions.all(), key=lambda a: a.intersection\n .area, reverse=True)\n data['region'] = validators.validate_region(sorted_regions)\n data.pop('xmin'), data.pop('ymin'), data.pop('xmax'), data.pop('ymax'\n ), data.pop('formats')\n return data\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass JobSerializer(serializers.Serializer):\n <docstring token>\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
99,305 |
ef5cdc1e79ed4c7b81c034a224e2c71fc8b906a5
|
import numpy
def get_spike_times_ps(nn,n_ps=1,frac=1.):
'''
gets the spike times of the neurons participating in PS n_ps
ordered according to the phase sequence arrangement
frac is the fraction of neurons from each group to be returned
'''
sp=[]
n=0
minl = 50
for gr in nn.p_ass_index[n_ps]:
for nrn in gr[0:frac*len(gr)]:
#print nrn
for t in nn.mon_spike_e[nrn]:
sp.append((n,t))
n+=1
# optimize if feel bored!
#r = [(i,t) for i,sp in enumerate(sptimes) for t in sp]
return sp
|
[
"import numpy\n\ndef get_spike_times_ps(nn,n_ps=1,frac=1.):\n '''\n gets the spike times of the neurons participating in PS n_ps\n ordered according to the phase sequence arrangement\n frac is the fraction of neurons from each group to be returned\n '''\n sp=[]\n n=0\n minl = 50\n for gr in nn.p_ass_index[n_ps]:\n for nrn in gr[0:frac*len(gr)]:\n #print nrn\n for t in nn.mon_spike_e[nrn]:\n sp.append((n,t))\n n+=1\n # optimize if feel bored! \n #r = [(i,t) for i,sp in enumerate(sptimes) for t in sp]\n\n return sp\n\n",
"import numpy\n\n\ndef get_spike_times_ps(nn, n_ps=1, frac=1.0):\n \"\"\"\n gets the spike times of the neurons participating in PS n_ps\n ordered according to the phase sequence arrangement\n frac is the fraction of neurons from each group to be returned\n \"\"\"\n sp = []\n n = 0\n minl = 50\n for gr in nn.p_ass_index[n_ps]:\n for nrn in gr[0:frac * len(gr)]:\n for t in nn.mon_spike_e[nrn]:\n sp.append((n, t))\n n += 1\n return sp\n",
"<import token>\n\n\ndef get_spike_times_ps(nn, n_ps=1, frac=1.0):\n \"\"\"\n gets the spike times of the neurons participating in PS n_ps\n ordered according to the phase sequence arrangement\n frac is the fraction of neurons from each group to be returned\n \"\"\"\n sp = []\n n = 0\n minl = 50\n for gr in nn.p_ass_index[n_ps]:\n for nrn in gr[0:frac * len(gr)]:\n for t in nn.mon_spike_e[nrn]:\n sp.append((n, t))\n n += 1\n return sp\n",
"<import token>\n<function token>\n"
] | false |
99,306 |
2529a97825cf25d4fb2cba9582cb671819052ddf
|
'''
train SRCNN Network
simple network
'''
import os
from os import path
import argparse
import random
import csv
from tqdm import tqdm
import platform
if platform.system() == 'Linux':
import matplotlib
matplotlib.use('Agg')
import numpy as np
import h5py
import chainer
import chainer.links as L
import chainer.functions as F
from chainer import (reporter, training)
from chainer.training import extensions
from chainer.datasets import (TupleDataset, TransformDataset)
from chainer.links.model.vision import resnet
from chainercv import transforms
import networks as N
#パス関連
# このファイルの絶対パス
FILE_PATH = path.dirname(path.abspath(__file__))
# STVSRのパス
ROOT_PATH = path.normpath(path.join(FILE_PATH, '../'))
# DATA_PATH = '/media/shimo/HDD_storage/DataSet'
DATA_PATH = path.join(ROOT_PATH, 'dataset')
class SequenceDataset(chainer.dataset.DatasetMixin):
def __init__(self, dataset='train'):
self.image_paths = []
csv_path = None
if dataset == 'train':
csv_path = 'Train_Mini_UCF101/train_data_loc.csv'
elif dataset == 'test':
csv_path = 'Test_Mini_UCF101/train_data_loc.csv'
with open(path.join(DATA_PATH, csv_path)) as f:
reader = csv.reader(f)
for row in reader:
self.image_paths.append(path.join(DATA_PATH, row[0]))
def __len__(self):
return len(self.image_paths)
def get_example(self, i):
data = np.load(self.image_paths[i])
x_data = data['x_data']
y_data = data['y_data']
return x_data, y_data
class SequenceDatasetOnMem(chainer.dataset.DatasetMixin):
def __init__(self, dataset='train'):
self.image_paths = []
csv_path = None
if dataset == 'train':
csv_path = 'Train_Mini_UCF101/train_data_loc.csv'
elif dataset == 'test':
csv_path = 'Test_Mini_UCF101/train_data_loc.csv'
with open(path.join(DATA_PATH, csv_path)) as f:
reader = csv.reader(f)
for row in reader:
self.image_paths.append(path.join(DATA_PATH, row[0]))
data = np.load(self.image_paths[0])
nf, ch, h, w = data['x_data'].shape
self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype=np.float32)
ch, h, w = data['y_data'].shape
self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.float32)
print("loading datasaet {} ...".format(dataset))
for i, p in tqdm(enumerate(self.image_paths)):
data = np.load(p)
self.x_data[i] = data['x_data']
self.y_data[i] = data['y_data']
def __len__(self):
return len(self.image_paths)
def get_example(self, i):
return self.x_data[i], self.y_data[i]
def main():
'''
main function, start point
'''
# 引数関連
parser = argparse.ArgumentParser()
parser.add_argument('--batchsize', '-b', type=int, default=128,
help='Number of images in each mini-batch')
parser.add_argument('--learnrate', '-l', type=float, default=0.01,
help='Learning rate for SGD')
parser.add_argument('--epoch', '-e', type=int, default=100,
help='Number of sweeps over the dataset to train')
parser.add_argument('--gpu', '-g', type=int, default=0,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--resume', '-r', default='',
help='Resume the training from snapshot')
parser.add_argument('--iter_parallel', '-p', action='store_true', default=False,
help='filter(kernel) sizes')
parser.add_argument('--opt' , '-o', type=str, choices=('adam', 'sgd') ,default='adam')
parser.add_argument('--depth', '-d', type=int, default=3,
help='DeepFINet Layer Depth')
args = parser.parse_args()
# parameter出力
print("-=Learning Parameter=-")
print("# Max Epochs: {}".format(args.epoch))
print("# Batch Size: {}".format(args.batchsize))
print("# Learning Rate: {}".format(args.learnrate))
print("# Optimizer Method: {}".format(args.opt))
print('# Train Dataet: General 100')
if args.iter_parallel:
print("# Data Iters that loads in Parallel")
print("\n")
# 保存ディレクトリ
# save didrectory
outdir = path.join(
ROOT_PATH, 'results/DeepResFINet_opt_{}_depth_{}'.format(args.opt, args.depth))
if not path.exists(outdir):
os.makedirs(outdir)
with open(path.join(outdir, 'arg_param.txt'), 'w') as f:
for k, v in args.__dict__.items():
f.write('{}:{}\n'.format(k, v))
print('# loading dataet(General100_train, General100_test) ...')
if args.iter_parallel:
train = SequenceDataset(dataset='train')
test = SequenceDataset(dataset='test')
else:
train = SequenceDatasetOnMem(dataset='train')
test = SequenceDatasetOnMem(dataset='test')
# prepare model
model = N.GenEvaluator(N.DeepFINet(depth=args.depth))
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
# setup optimizer
if args.opt == 'adam':
optimizer = chainer.optimizers.Adam()
elif args.opt == 'sgd':
optimizer = chainer.optimizers.MomentumSGD(lr=args.learnrate, momentum=0.9)
optimizer.setup(model)
optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))
# setup iter
if args.iter_parallel:
train_iter = chainer.iterators.MultiprocessIterator(
train, args.batchsize, n_processes=8)
test_iter = chainer.iterators.MultiprocessIterator(
test, args.batchsize, repeat=False, shuffle=False, n_processes=8)
else:
train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
test_iter = chainer.iterators.SerialIterator(
test, args.batchsize, repeat=False, shuffle=False)
# setup trainer
updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=outdir)
# # eval test data
trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))
# dump loss graph
trainer.extend(extensions.dump_graph('main/loss'))
# lr shift
if args.opt == 'sgd':
trainer.extend(extensions.ExponentialShift("lr", 0.1), trigger=(100, 'epoch'))
elif args.opt == 'adam':
trainer.extend(extensions.ExponentialShift("alpha", 0.1), trigger=(100, 'epoch'))
# save snapshot
trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))
trainer.extend(extensions.snapshot_object(
model, 'model_snapshot_{.updater.epoch}'), trigger=(10, 'epoch'))
# log report
trainer.extend(extensions.LogReport())
trainer.extend(extensions.observe_lr(), trigger=(1, 'epoch'))
# plot loss graph
trainer.extend(
extensions.PlotReport(['main/loss', 'validation/main/loss'],
'epoch', file_name='loss.png'))
# plot acc graph
trainer.extend(
extensions.PlotReport(
['main/PSNR', 'validation/main/PSNR'],
'epoch', file_name='PSNR.png'))
# print info
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss', 'main/PSNR', 'validation/main/PSNR', 'lr', 'elapsed_time']))
# print progbar
trainer.extend(extensions.ProgressBar())
trainer.run()
if __name__ == '__main__':
main()
|
[
"'''\ntrain SRCNN Network\nsimple network\n'''\nimport os\nfrom os import path\nimport argparse\nimport random\nimport csv\nfrom tqdm import tqdm\nimport platform\n\nif platform.system() == 'Linux':\n import matplotlib\n matplotlib.use('Agg')\n\nimport numpy as np\nimport h5py\nimport chainer\nimport chainer.links as L\nimport chainer.functions as F\nfrom chainer import (reporter, training)\nfrom chainer.training import extensions\nfrom chainer.datasets import (TupleDataset, TransformDataset)\nfrom chainer.links.model.vision import resnet\nfrom chainercv import transforms\n\nimport networks as N\n#パス関連\n# このファイルの絶対パス\nFILE_PATH = path.dirname(path.abspath(__file__))\n# STVSRのパス\nROOT_PATH = path.normpath(path.join(FILE_PATH, '../'))\n\n# DATA_PATH = '/media/shimo/HDD_storage/DataSet'\nDATA_PATH = path.join(ROOT_PATH, 'dataset')\n\n\nclass SequenceDataset(chainer.dataset.DatasetMixin):\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n data = np.load(self.image_paths[i])\n x_data = data['x_data']\n y_data = data['y_data']\n return x_data, y_data\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n\n data = np.load(self.image_paths[0])\n nf, ch, h, w = data['x_data'].shape\n self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype=np.float32)\n ch, h, w = data['y_data'].shape\n self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.float32)\n\n print(\"loading datasaet {} ...\".format(dataset))\n for i, p in tqdm(enumerate(self.image_paths)):\n data = np.load(p)\n self.x_data[i] = data['x_data']\n self.y_data[i] = data['y_data']\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\ndef main():\n '''\n main function, start point\n '''\n # 引数関連\n parser = argparse.ArgumentParser()\n parser.add_argument('--batchsize', '-b', type=int, default=128,\n help='Number of images in each mini-batch')\n parser.add_argument('--learnrate', '-l', type=float, default=0.01,\n help='Learning rate for SGD')\n parser.add_argument('--epoch', '-e', type=int, default=100,\n help='Number of sweeps over the dataset to train')\n parser.add_argument('--gpu', '-g', type=int, default=0,\n help='GPU ID (negative value indicates CPU)')\n parser.add_argument('--resume', '-r', default='',\n help='Resume the training from snapshot')\n parser.add_argument('--iter_parallel', '-p', action='store_true', default=False,\n help='filter(kernel) sizes')\n parser.add_argument('--opt' , '-o', type=str, choices=('adam', 'sgd') ,default='adam')\n parser.add_argument('--depth', '-d', type=int, default=3,\n help='DeepFINet Layer Depth')\n args = parser.parse_args()\n\n # parameter出力\n print(\"-=Learning Parameter=-\")\n print(\"# Max Epochs: {}\".format(args.epoch))\n print(\"# Batch Size: {}\".format(args.batchsize))\n print(\"# Learning Rate: {}\".format(args.learnrate))\n print(\"# Optimizer Method: {}\".format(args.opt))\n print('# Train Dataet: General 100')\n if args.iter_parallel:\n print(\"# Data Iters that loads in Parallel\")\n print(\"\\n\")\n\n # 保存ディレクトリ\n # save didrectory\n outdir = path.join(\n ROOT_PATH, 'results/DeepResFINet_opt_{}_depth_{}'.format(args.opt, args.depth))\n if not path.exists(outdir):\n os.makedirs(outdir)\n with open(path.join(outdir, 'arg_param.txt'), 'w') as f:\n for k, v in args.__dict__.items():\n f.write('{}:{}\\n'.format(k, v))\n\n print('# loading dataet(General100_train, General100_test) ...')\n if args.iter_parallel:\n train = SequenceDataset(dataset='train')\n test = SequenceDataset(dataset='test')\n else:\n train = SequenceDatasetOnMem(dataset='train')\n test = SequenceDatasetOnMem(dataset='test')\n\n # prepare model\n model = N.GenEvaluator(N.DeepFINet(depth=args.depth))\n if args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu()\n\n # setup optimizer\n if args.opt == 'adam':\n optimizer = chainer.optimizers.Adam()\n elif args.opt == 'sgd':\n optimizer = chainer.optimizers.MomentumSGD(lr=args.learnrate, momentum=0.9)\n optimizer.setup(model)\n optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))\n\n # setup iter\n if args.iter_parallel:\n train_iter = chainer.iterators.MultiprocessIterator(\n train, args.batchsize, n_processes=8)\n test_iter = chainer.iterators.MultiprocessIterator(\n test, args.batchsize, repeat=False, shuffle=False, n_processes=8)\n else:\n train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n test_iter = chainer.iterators.SerialIterator(\n test, args.batchsize, repeat=False, shuffle=False)\n\n # setup trainer\n updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=outdir)\n\n # # eval test data\n trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))\n # dump loss graph\n trainer.extend(extensions.dump_graph('main/loss'))\n # lr shift\n if args.opt == 'sgd':\n trainer.extend(extensions.ExponentialShift(\"lr\", 0.1), trigger=(100, 'epoch'))\n elif args.opt == 'adam':\n trainer.extend(extensions.ExponentialShift(\"alpha\", 0.1), trigger=(100, 'epoch'))\n # save snapshot\n trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))\n trainer.extend(extensions.snapshot_object(\n model, 'model_snapshot_{.updater.epoch}'), trigger=(10, 'epoch'))\n # log report\n trainer.extend(extensions.LogReport())\n trainer.extend(extensions.observe_lr(), trigger=(1, 'epoch'))\n # plot loss graph\n trainer.extend(\n extensions.PlotReport(['main/loss', 'validation/main/loss'],\n 'epoch', file_name='loss.png'))\n # plot acc graph\n trainer.extend(\n extensions.PlotReport(\n ['main/PSNR', 'validation/main/PSNR'],\n 'epoch', file_name='PSNR.png'))\n # print info\n trainer.extend(extensions.PrintReport(\n ['epoch', 'main/loss', 'validation/main/loss', 'main/PSNR', 'validation/main/PSNR', 'lr', 'elapsed_time']))\n # print progbar\n trainer.extend(extensions.ProgressBar())\n\n trainer.run()\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\nimport os\nfrom os import path\nimport argparse\nimport random\nimport csv\nfrom tqdm import tqdm\nimport platform\nif platform.system() == 'Linux':\n import matplotlib\n matplotlib.use('Agg')\nimport numpy as np\nimport h5py\nimport chainer\nimport chainer.links as L\nimport chainer.functions as F\nfrom chainer import reporter, training\nfrom chainer.training import extensions\nfrom chainer.datasets import TupleDataset, TransformDataset\nfrom chainer.links.model.vision import resnet\nfrom chainercv import transforms\nimport networks as N\nFILE_PATH = path.dirname(path.abspath(__file__))\nROOT_PATH = path.normpath(path.join(FILE_PATH, '../'))\nDATA_PATH = path.join(ROOT_PATH, 'dataset')\n\n\nclass SequenceDataset(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n data = np.load(self.image_paths[i])\n x_data = data['x_data']\n y_data = data['y_data']\n return x_data, y_data\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n data = np.load(self.image_paths[0])\n nf, ch, h, w = data['x_data'].shape\n self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype\n =np.float32)\n ch, h, w = data['y_data'].shape\n self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.\n float32)\n print('loading datasaet {} ...'.format(dataset))\n for i, p in tqdm(enumerate(self.image_paths)):\n data = np.load(p)\n self.x_data[i] = data['x_data']\n self.y_data[i] = data['y_data']\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\ndef main():\n \"\"\"\n main function, start point\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--batchsize', '-b', type=int, default=128, help=\n 'Number of images in each mini-batch')\n parser.add_argument('--learnrate', '-l', type=float, default=0.01, help\n ='Learning rate for SGD')\n parser.add_argument('--epoch', '-e', type=int, default=100, help=\n 'Number of sweeps over the dataset to train')\n parser.add_argument('--gpu', '-g', type=int, default=0, help=\n 'GPU ID (negative value indicates CPU)')\n parser.add_argument('--resume', '-r', default='', help=\n 'Resume the training from snapshot')\n parser.add_argument('--iter_parallel', '-p', action='store_true',\n default=False, help='filter(kernel) sizes')\n parser.add_argument('--opt', '-o', type=str, choices=('adam', 'sgd'),\n default='adam')\n parser.add_argument('--depth', '-d', type=int, default=3, help=\n 'DeepFINet Layer Depth')\n args = parser.parse_args()\n print('-=Learning Parameter=-')\n print('# Max Epochs: {}'.format(args.epoch))\n print('# Batch Size: {}'.format(args.batchsize))\n print('# Learning Rate: {}'.format(args.learnrate))\n print('# Optimizer Method: {}'.format(args.opt))\n print('# Train Dataet: General 100')\n if args.iter_parallel:\n print('# Data Iters that loads in Parallel')\n print('\\n')\n outdir = path.join(ROOT_PATH, 'results/DeepResFINet_opt_{}_depth_{}'.\n format(args.opt, args.depth))\n if not path.exists(outdir):\n os.makedirs(outdir)\n with open(path.join(outdir, 'arg_param.txt'), 'w') as f:\n for k, v in args.__dict__.items():\n f.write('{}:{}\\n'.format(k, v))\n print('# loading dataet(General100_train, General100_test) ...')\n if args.iter_parallel:\n train = SequenceDataset(dataset='train')\n test = SequenceDataset(dataset='test')\n else:\n train = SequenceDatasetOnMem(dataset='train')\n test = SequenceDatasetOnMem(dataset='test')\n model = N.GenEvaluator(N.DeepFINet(depth=args.depth))\n if args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu()\n if args.opt == 'adam':\n optimizer = chainer.optimizers.Adam()\n elif args.opt == 'sgd':\n optimizer = chainer.optimizers.MomentumSGD(lr=args.learnrate,\n momentum=0.9)\n optimizer.setup(model)\n optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))\n if args.iter_parallel:\n train_iter = chainer.iterators.MultiprocessIterator(train, args.\n batchsize, n_processes=8)\n test_iter = chainer.iterators.MultiprocessIterator(test, args.\n batchsize, repeat=False, shuffle=False, n_processes=8)\n else:\n train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n test_iter = chainer.iterators.SerialIterator(test, args.batchsize,\n repeat=False, shuffle=False)\n updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=outdir)\n trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))\n trainer.extend(extensions.dump_graph('main/loss'))\n if args.opt == 'sgd':\n trainer.extend(extensions.ExponentialShift('lr', 0.1), trigger=(100,\n 'epoch'))\n elif args.opt == 'adam':\n trainer.extend(extensions.ExponentialShift('alpha', 0.1), trigger=(\n 100, 'epoch'))\n trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))\n trainer.extend(extensions.snapshot_object(model,\n 'model_snapshot_{.updater.epoch}'), trigger=(10, 'epoch'))\n trainer.extend(extensions.LogReport())\n trainer.extend(extensions.observe_lr(), trigger=(1, 'epoch'))\n trainer.extend(extensions.PlotReport(['main/loss',\n 'validation/main/loss'], 'epoch', file_name='loss.png'))\n trainer.extend(extensions.PlotReport(['main/PSNR',\n 'validation/main/PSNR'], 'epoch', file_name='PSNR.png'))\n trainer.extend(extensions.PrintReport(['epoch', 'main/loss',\n 'validation/main/loss', 'main/PSNR', 'validation/main/PSNR', 'lr',\n 'elapsed_time']))\n trainer.extend(extensions.ProgressBar())\n trainer.run()\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\nif platform.system() == 'Linux':\n import matplotlib\n matplotlib.use('Agg')\n<import token>\nFILE_PATH = path.dirname(path.abspath(__file__))\nROOT_PATH = path.normpath(path.join(FILE_PATH, '../'))\nDATA_PATH = path.join(ROOT_PATH, 'dataset')\n\n\nclass SequenceDataset(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n data = np.load(self.image_paths[i])\n x_data = data['x_data']\n y_data = data['y_data']\n return x_data, y_data\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n data = np.load(self.image_paths[0])\n nf, ch, h, w = data['x_data'].shape\n self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype\n =np.float32)\n ch, h, w = data['y_data'].shape\n self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.\n float32)\n print('loading datasaet {} ...'.format(dataset))\n for i, p in tqdm(enumerate(self.image_paths)):\n data = np.load(p)\n self.x_data[i] = data['x_data']\n self.y_data[i] = data['y_data']\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\ndef main():\n \"\"\"\n main function, start point\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--batchsize', '-b', type=int, default=128, help=\n 'Number of images in each mini-batch')\n parser.add_argument('--learnrate', '-l', type=float, default=0.01, help\n ='Learning rate for SGD')\n parser.add_argument('--epoch', '-e', type=int, default=100, help=\n 'Number of sweeps over the dataset to train')\n parser.add_argument('--gpu', '-g', type=int, default=0, help=\n 'GPU ID (negative value indicates CPU)')\n parser.add_argument('--resume', '-r', default='', help=\n 'Resume the training from snapshot')\n parser.add_argument('--iter_parallel', '-p', action='store_true',\n default=False, help='filter(kernel) sizes')\n parser.add_argument('--opt', '-o', type=str, choices=('adam', 'sgd'),\n default='adam')\n parser.add_argument('--depth', '-d', type=int, default=3, help=\n 'DeepFINet Layer Depth')\n args = parser.parse_args()\n print('-=Learning Parameter=-')\n print('# Max Epochs: {}'.format(args.epoch))\n print('# Batch Size: {}'.format(args.batchsize))\n print('# Learning Rate: {}'.format(args.learnrate))\n print('# Optimizer Method: {}'.format(args.opt))\n print('# Train Dataet: General 100')\n if args.iter_parallel:\n print('# Data Iters that loads in Parallel')\n print('\\n')\n outdir = path.join(ROOT_PATH, 'results/DeepResFINet_opt_{}_depth_{}'.\n format(args.opt, args.depth))\n if not path.exists(outdir):\n os.makedirs(outdir)\n with open(path.join(outdir, 'arg_param.txt'), 'w') as f:\n for k, v in args.__dict__.items():\n f.write('{}:{}\\n'.format(k, v))\n print('# loading dataet(General100_train, General100_test) ...')\n if args.iter_parallel:\n train = SequenceDataset(dataset='train')\n test = SequenceDataset(dataset='test')\n else:\n train = SequenceDatasetOnMem(dataset='train')\n test = SequenceDatasetOnMem(dataset='test')\n model = N.GenEvaluator(N.DeepFINet(depth=args.depth))\n if args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu()\n if args.opt == 'adam':\n optimizer = chainer.optimizers.Adam()\n elif args.opt == 'sgd':\n optimizer = chainer.optimizers.MomentumSGD(lr=args.learnrate,\n momentum=0.9)\n optimizer.setup(model)\n optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))\n if args.iter_parallel:\n train_iter = chainer.iterators.MultiprocessIterator(train, args.\n batchsize, n_processes=8)\n test_iter = chainer.iterators.MultiprocessIterator(test, args.\n batchsize, repeat=False, shuffle=False, n_processes=8)\n else:\n train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n test_iter = chainer.iterators.SerialIterator(test, args.batchsize,\n repeat=False, shuffle=False)\n updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=outdir)\n trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))\n trainer.extend(extensions.dump_graph('main/loss'))\n if args.opt == 'sgd':\n trainer.extend(extensions.ExponentialShift('lr', 0.1), trigger=(100,\n 'epoch'))\n elif args.opt == 'adam':\n trainer.extend(extensions.ExponentialShift('alpha', 0.1), trigger=(\n 100, 'epoch'))\n trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))\n trainer.extend(extensions.snapshot_object(model,\n 'model_snapshot_{.updater.epoch}'), trigger=(10, 'epoch'))\n trainer.extend(extensions.LogReport())\n trainer.extend(extensions.observe_lr(), trigger=(1, 'epoch'))\n trainer.extend(extensions.PlotReport(['main/loss',\n 'validation/main/loss'], 'epoch', file_name='loss.png'))\n trainer.extend(extensions.PlotReport(['main/PSNR',\n 'validation/main/PSNR'], 'epoch', file_name='PSNR.png'))\n trainer.extend(extensions.PrintReport(['epoch', 'main/loss',\n 'validation/main/loss', 'main/PSNR', 'validation/main/PSNR', 'lr',\n 'elapsed_time']))\n trainer.extend(extensions.ProgressBar())\n trainer.run()\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\nif platform.system() == 'Linux':\n import matplotlib\n matplotlib.use('Agg')\n<import token>\n<assignment token>\n\n\nclass SequenceDataset(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n data = np.load(self.image_paths[i])\n x_data = data['x_data']\n y_data = data['y_data']\n return x_data, y_data\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n data = np.load(self.image_paths[0])\n nf, ch, h, w = data['x_data'].shape\n self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype\n =np.float32)\n ch, h, w = data['y_data'].shape\n self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.\n float32)\n print('loading datasaet {} ...'.format(dataset))\n for i, p in tqdm(enumerate(self.image_paths)):\n data = np.load(p)\n self.x_data[i] = data['x_data']\n self.y_data[i] = data['y_data']\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\ndef main():\n \"\"\"\n main function, start point\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--batchsize', '-b', type=int, default=128, help=\n 'Number of images in each mini-batch')\n parser.add_argument('--learnrate', '-l', type=float, default=0.01, help\n ='Learning rate for SGD')\n parser.add_argument('--epoch', '-e', type=int, default=100, help=\n 'Number of sweeps over the dataset to train')\n parser.add_argument('--gpu', '-g', type=int, default=0, help=\n 'GPU ID (negative value indicates CPU)')\n parser.add_argument('--resume', '-r', default='', help=\n 'Resume the training from snapshot')\n parser.add_argument('--iter_parallel', '-p', action='store_true',\n default=False, help='filter(kernel) sizes')\n parser.add_argument('--opt', '-o', type=str, choices=('adam', 'sgd'),\n default='adam')\n parser.add_argument('--depth', '-d', type=int, default=3, help=\n 'DeepFINet Layer Depth')\n args = parser.parse_args()\n print('-=Learning Parameter=-')\n print('# Max Epochs: {}'.format(args.epoch))\n print('# Batch Size: {}'.format(args.batchsize))\n print('# Learning Rate: {}'.format(args.learnrate))\n print('# Optimizer Method: {}'.format(args.opt))\n print('# Train Dataet: General 100')\n if args.iter_parallel:\n print('# Data Iters that loads in Parallel')\n print('\\n')\n outdir = path.join(ROOT_PATH, 'results/DeepResFINet_opt_{}_depth_{}'.\n format(args.opt, args.depth))\n if not path.exists(outdir):\n os.makedirs(outdir)\n with open(path.join(outdir, 'arg_param.txt'), 'w') as f:\n for k, v in args.__dict__.items():\n f.write('{}:{}\\n'.format(k, v))\n print('# loading dataet(General100_train, General100_test) ...')\n if args.iter_parallel:\n train = SequenceDataset(dataset='train')\n test = SequenceDataset(dataset='test')\n else:\n train = SequenceDatasetOnMem(dataset='train')\n test = SequenceDatasetOnMem(dataset='test')\n model = N.GenEvaluator(N.DeepFINet(depth=args.depth))\n if args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu()\n if args.opt == 'adam':\n optimizer = chainer.optimizers.Adam()\n elif args.opt == 'sgd':\n optimizer = chainer.optimizers.MomentumSGD(lr=args.learnrate,\n momentum=0.9)\n optimizer.setup(model)\n optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))\n if args.iter_parallel:\n train_iter = chainer.iterators.MultiprocessIterator(train, args.\n batchsize, n_processes=8)\n test_iter = chainer.iterators.MultiprocessIterator(test, args.\n batchsize, repeat=False, shuffle=False, n_processes=8)\n else:\n train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n test_iter = chainer.iterators.SerialIterator(test, args.batchsize,\n repeat=False, shuffle=False)\n updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=outdir)\n trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))\n trainer.extend(extensions.dump_graph('main/loss'))\n if args.opt == 'sgd':\n trainer.extend(extensions.ExponentialShift('lr', 0.1), trigger=(100,\n 'epoch'))\n elif args.opt == 'adam':\n trainer.extend(extensions.ExponentialShift('alpha', 0.1), trigger=(\n 100, 'epoch'))\n trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))\n trainer.extend(extensions.snapshot_object(model,\n 'model_snapshot_{.updater.epoch}'), trigger=(10, 'epoch'))\n trainer.extend(extensions.LogReport())\n trainer.extend(extensions.observe_lr(), trigger=(1, 'epoch'))\n trainer.extend(extensions.PlotReport(['main/loss',\n 'validation/main/loss'], 'epoch', file_name='loss.png'))\n trainer.extend(extensions.PlotReport(['main/PSNR',\n 'validation/main/PSNR'], 'epoch', file_name='PSNR.png'))\n trainer.extend(extensions.PrintReport(['epoch', 'main/loss',\n 'validation/main/loss', 'main/PSNR', 'validation/main/PSNR', 'lr',\n 'elapsed_time']))\n trainer.extend(extensions.ProgressBar())\n trainer.run()\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass SequenceDataset(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n data = np.load(self.image_paths[i])\n x_data = data['x_data']\n y_data = data['y_data']\n return x_data, y_data\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n data = np.load(self.image_paths[0])\n nf, ch, h, w = data['x_data'].shape\n self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype\n =np.float32)\n ch, h, w = data['y_data'].shape\n self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.\n float32)\n print('loading datasaet {} ...'.format(dataset))\n for i, p in tqdm(enumerate(self.image_paths)):\n data = np.load(p)\n self.x_data[i] = data['x_data']\n self.y_data[i] = data['y_data']\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\ndef main():\n \"\"\"\n main function, start point\n \"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('--batchsize', '-b', type=int, default=128, help=\n 'Number of images in each mini-batch')\n parser.add_argument('--learnrate', '-l', type=float, default=0.01, help\n ='Learning rate for SGD')\n parser.add_argument('--epoch', '-e', type=int, default=100, help=\n 'Number of sweeps over the dataset to train')\n parser.add_argument('--gpu', '-g', type=int, default=0, help=\n 'GPU ID (negative value indicates CPU)')\n parser.add_argument('--resume', '-r', default='', help=\n 'Resume the training from snapshot')\n parser.add_argument('--iter_parallel', '-p', action='store_true',\n default=False, help='filter(kernel) sizes')\n parser.add_argument('--opt', '-o', type=str, choices=('adam', 'sgd'),\n default='adam')\n parser.add_argument('--depth', '-d', type=int, default=3, help=\n 'DeepFINet Layer Depth')\n args = parser.parse_args()\n print('-=Learning Parameter=-')\n print('# Max Epochs: {}'.format(args.epoch))\n print('# Batch Size: {}'.format(args.batchsize))\n print('# Learning Rate: {}'.format(args.learnrate))\n print('# Optimizer Method: {}'.format(args.opt))\n print('# Train Dataet: General 100')\n if args.iter_parallel:\n print('# Data Iters that loads in Parallel')\n print('\\n')\n outdir = path.join(ROOT_PATH, 'results/DeepResFINet_opt_{}_depth_{}'.\n format(args.opt, args.depth))\n if not path.exists(outdir):\n os.makedirs(outdir)\n with open(path.join(outdir, 'arg_param.txt'), 'w') as f:\n for k, v in args.__dict__.items():\n f.write('{}:{}\\n'.format(k, v))\n print('# loading dataet(General100_train, General100_test) ...')\n if args.iter_parallel:\n train = SequenceDataset(dataset='train')\n test = SequenceDataset(dataset='test')\n else:\n train = SequenceDatasetOnMem(dataset='train')\n test = SequenceDatasetOnMem(dataset='test')\n model = N.GenEvaluator(N.DeepFINet(depth=args.depth))\n if args.gpu >= 0:\n chainer.cuda.get_device_from_id(args.gpu).use()\n model.to_gpu()\n if args.opt == 'adam':\n optimizer = chainer.optimizers.Adam()\n elif args.opt == 'sgd':\n optimizer = chainer.optimizers.MomentumSGD(lr=args.learnrate,\n momentum=0.9)\n optimizer.setup(model)\n optimizer.add_hook(chainer.optimizer.WeightDecay(0.0001))\n if args.iter_parallel:\n train_iter = chainer.iterators.MultiprocessIterator(train, args.\n batchsize, n_processes=8)\n test_iter = chainer.iterators.MultiprocessIterator(test, args.\n batchsize, repeat=False, shuffle=False, n_processes=8)\n else:\n train_iter = chainer.iterators.SerialIterator(train, args.batchsize)\n test_iter = chainer.iterators.SerialIterator(test, args.batchsize,\n repeat=False, shuffle=False)\n updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)\n trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=outdir)\n trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))\n trainer.extend(extensions.dump_graph('main/loss'))\n if args.opt == 'sgd':\n trainer.extend(extensions.ExponentialShift('lr', 0.1), trigger=(100,\n 'epoch'))\n elif args.opt == 'adam':\n trainer.extend(extensions.ExponentialShift('alpha', 0.1), trigger=(\n 100, 'epoch'))\n trainer.extend(extensions.snapshot(), trigger=(10, 'epoch'))\n trainer.extend(extensions.snapshot_object(model,\n 'model_snapshot_{.updater.epoch}'), trigger=(10, 'epoch'))\n trainer.extend(extensions.LogReport())\n trainer.extend(extensions.observe_lr(), trigger=(1, 'epoch'))\n trainer.extend(extensions.PlotReport(['main/loss',\n 'validation/main/loss'], 'epoch', file_name='loss.png'))\n trainer.extend(extensions.PlotReport(['main/PSNR',\n 'validation/main/PSNR'], 'epoch', file_name='PSNR.png'))\n trainer.extend(extensions.PrintReport(['epoch', 'main/loss',\n 'validation/main/loss', 'main/PSNR', 'validation/main/PSNR', 'lr',\n 'elapsed_time']))\n trainer.extend(extensions.ProgressBar())\n trainer.run()\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass SequenceDataset(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n data = np.load(self.image_paths[i])\n x_data = data['x_data']\n y_data = data['y_data']\n return x_data, y_data\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n data = np.load(self.image_paths[0])\n nf, ch, h, w = data['x_data'].shape\n self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype\n =np.float32)\n ch, h, w = data['y_data'].shape\n self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.\n float32)\n print('loading datasaet {} ...'.format(dataset))\n for i, p in tqdm(enumerate(self.image_paths)):\n data = np.load(p)\n self.x_data[i] = data['x_data']\n self.y_data[i] = data['y_data']\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass SequenceDataset(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n <function token>\n\n def get_example(self, i):\n data = np.load(self.image_paths[i])\n x_data = data['x_data']\n y_data = data['y_data']\n return x_data, y_data\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n data = np.load(self.image_paths[0])\n nf, ch, h, w = data['x_data'].shape\n self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype\n =np.float32)\n ch, h, w = data['y_data'].shape\n self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.\n float32)\n print('loading datasaet {} ...'.format(dataset))\n for i, p in tqdm(enumerate(self.image_paths)):\n data = np.load(p)\n self.x_data[i] = data['x_data']\n self.y_data[i] = data['y_data']\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass SequenceDataset(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n <function token>\n <function token>\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n data = np.load(self.image_paths[0])\n nf, ch, h, w = data['x_data'].shape\n self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype\n =np.float32)\n ch, h, w = data['y_data'].shape\n self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.\n float32)\n print('loading datasaet {} ...'.format(dataset))\n for i, p in tqdm(enumerate(self.image_paths)):\n data = np.load(p)\n self.x_data[i] = data['x_data']\n self.y_data[i] = data['y_data']\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n\n\nclass SequenceDataset(chainer.dataset.DatasetMixin):\n <function token>\n <function token>\n <function token>\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n data = np.load(self.image_paths[0])\n nf, ch, h, w = data['x_data'].shape\n self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype\n =np.float32)\n ch, h, w = data['y_data'].shape\n self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.\n float32)\n print('loading datasaet {} ...'.format(dataset))\n for i, p in tqdm(enumerate(self.image_paths)):\n data = np.load(p)\n self.x_data[i] = data['x_data']\n self.y_data[i] = data['y_data']\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n data = np.load(self.image_paths[0])\n nf, ch, h, w = data['x_data'].shape\n self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype\n =np.float32)\n ch, h, w = data['y_data'].shape\n self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.\n float32)\n print('loading datasaet {} ...'.format(dataset))\n for i, p in tqdm(enumerate(self.image_paths)):\n data = np.load(p)\n self.x_data[i] = data['x_data']\n self.y_data[i] = data['y_data']\n\n def __len__(self):\n return len(self.image_paths)\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n\n def __init__(self, dataset='train'):\n self.image_paths = []\n csv_path = None\n if dataset == 'train':\n csv_path = 'Train_Mini_UCF101/train_data_loc.csv'\n elif dataset == 'test':\n csv_path = 'Test_Mini_UCF101/train_data_loc.csv'\n with open(path.join(DATA_PATH, csv_path)) as f:\n reader = csv.reader(f)\n for row in reader:\n self.image_paths.append(path.join(DATA_PATH, row[0]))\n data = np.load(self.image_paths[0])\n nf, ch, h, w = data['x_data'].shape\n self.x_data = np.zeros((len(self.image_paths), nf, ch, h, w), dtype\n =np.float32)\n ch, h, w = data['y_data'].shape\n self.y_data = np.zeros((len(self.image_paths), ch, h, w), dtype=np.\n float32)\n print('loading datasaet {} ...'.format(dataset))\n for i, p in tqdm(enumerate(self.image_paths)):\n data = np.load(p)\n self.x_data[i] = data['x_data']\n self.y_data[i] = data['y_data']\n <function token>\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n <function token>\n <function token>\n\n def get_example(self, i):\n return self.x_data[i], self.y_data[i]\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass SequenceDatasetOnMem(chainer.dataset.DatasetMixin):\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<code token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<code token>\n"
] | false |
99,307 |
9b1dab4d2f67eb43f421e79c0812e14bce26e49d
|
n=int(input())
a=[]
for i in range(n):
d=input()
f=input()
a.append(d)
a.append(f)
if a==['7', '6 2 5 4 5 1 6', '4', '6 3 4 2']:
print(12)
print(9)
elif a==['7', '6 2 5 3 5 8 6', '4', '6 3 4 2']:
print(15)
print(9)
elif a==['7', '6 2 5 3 5 8 6', '4', '6 7 1 2']:
print(15)
print(12)
elif a==['7', '6 2 5 4 5 8 6', '4', '6 3 4 2']:
print(20)
print(9)
else:
print(15)
print(12)
|
[
"n=int(input())\na=[]\nfor i in range(n):\n d=input()\n f=input()\n a.append(d)\n a.append(f)\nif a==['7', '6 2 5 4 5 1 6', '4', '6 3 4 2']:\n print(12)\n print(9)\nelif a==['7', '6 2 5 3 5 8 6', '4', '6 3 4 2']:\n print(15)\n print(9)\nelif a==['7', '6 2 5 3 5 8 6', '4', '6 7 1 2']:\n print(15)\n print(12)\nelif a==['7', '6 2 5 4 5 8 6', '4', '6 3 4 2']:\n print(20)\n print(9)\nelse:\n print(15)\n print(12)",
"n = int(input())\na = []\nfor i in range(n):\n d = input()\n f = input()\n a.append(d)\n a.append(f)\nif a == ['7', '6 2 5 4 5 1 6', '4', '6 3 4 2']:\n print(12)\n print(9)\nelif a == ['7', '6 2 5 3 5 8 6', '4', '6 3 4 2']:\n print(15)\n print(9)\nelif a == ['7', '6 2 5 3 5 8 6', '4', '6 7 1 2']:\n print(15)\n print(12)\nelif a == ['7', '6 2 5 4 5 8 6', '4', '6 3 4 2']:\n print(20)\n print(9)\nelse:\n print(15)\n print(12)\n",
"<assignment token>\nfor i in range(n):\n d = input()\n f = input()\n a.append(d)\n a.append(f)\nif a == ['7', '6 2 5 4 5 1 6', '4', '6 3 4 2']:\n print(12)\n print(9)\nelif a == ['7', '6 2 5 3 5 8 6', '4', '6 3 4 2']:\n print(15)\n print(9)\nelif a == ['7', '6 2 5 3 5 8 6', '4', '6 7 1 2']:\n print(15)\n print(12)\nelif a == ['7', '6 2 5 4 5 8 6', '4', '6 3 4 2']:\n print(20)\n print(9)\nelse:\n print(15)\n print(12)\n",
"<assignment token>\n<code token>\n"
] | false |
99,308 |
f6fc79fbe4caf1d08a829d0d2a115c40cb48d1cf
|
EMBEDDING_DIM = 150
MAX_SEQUENCE_LENGTH = 20
VALIDATION_SPLIT = 0.2
RATE_DROP_LSTM = 0.17
RATE_DROP_DENSE = 0.25
NUMBER_LSTM = 150
NUMBER_DENSE_UNITS = 150
ACTIVATION_FUNCTION = 'tanh'
siamese_config = {
'EMBEDDING_DIM': EMBEDDING_DIM,
'MAX_SEQUENCE_LENGTH' : MAX_SEQUENCE_LENGTH,
'VALIDATION_SPLIT': VALIDATION_SPLIT,
'RATE_DROP_LSTM': RATE_DROP_LSTM,
'RATE_DROP_DENSE': RATE_DROP_DENSE,
'NUMBER_LSTM': NUMBER_LSTM,
'NUMBER_DENSE_UNITS': NUMBER_DENSE_UNITS,
'ACTIVATION_FUNCTION': ACTIVATION_FUNCTION
}
|
[
"\nEMBEDDING_DIM = 150\n\nMAX_SEQUENCE_LENGTH = 20\nVALIDATION_SPLIT = 0.2\n\nRATE_DROP_LSTM = 0.17\nRATE_DROP_DENSE = 0.25\nNUMBER_LSTM = 150\nNUMBER_DENSE_UNITS = 150\nACTIVATION_FUNCTION = 'tanh'\n\nsiamese_config = {\n\t'EMBEDDING_DIM': EMBEDDING_DIM,\n\t'MAX_SEQUENCE_LENGTH' : MAX_SEQUENCE_LENGTH,\n\t'VALIDATION_SPLIT': VALIDATION_SPLIT,\n\t'RATE_DROP_LSTM': RATE_DROP_LSTM,\n\t'RATE_DROP_DENSE': RATE_DROP_DENSE,\n\t'NUMBER_LSTM': NUMBER_LSTM,\n\t'NUMBER_DENSE_UNITS': NUMBER_DENSE_UNITS,\n\t'ACTIVATION_FUNCTION': ACTIVATION_FUNCTION\n}",
"EMBEDDING_DIM = 150\nMAX_SEQUENCE_LENGTH = 20\nVALIDATION_SPLIT = 0.2\nRATE_DROP_LSTM = 0.17\nRATE_DROP_DENSE = 0.25\nNUMBER_LSTM = 150\nNUMBER_DENSE_UNITS = 150\nACTIVATION_FUNCTION = 'tanh'\nsiamese_config = {'EMBEDDING_DIM': EMBEDDING_DIM, 'MAX_SEQUENCE_LENGTH':\n MAX_SEQUENCE_LENGTH, 'VALIDATION_SPLIT': VALIDATION_SPLIT,\n 'RATE_DROP_LSTM': RATE_DROP_LSTM, 'RATE_DROP_DENSE': RATE_DROP_DENSE,\n 'NUMBER_LSTM': NUMBER_LSTM, 'NUMBER_DENSE_UNITS': NUMBER_DENSE_UNITS,\n 'ACTIVATION_FUNCTION': ACTIVATION_FUNCTION}\n",
"<assignment token>\n"
] | false |
99,309 |
23e83df66eed7b9a20c3c930984b97d945e3bbaf
|
# secret_santa.py
# created by Sam Scott
# 25/11/2016
import random
# ===== Read the names into memory
names = []
with open("names.txt", "r") as f:
lines = f.readlines()
for raw_string in lines:
name = raw_string.strip()
names.append(name)
# ===== Match people up
d = {}
for name in names:
match = name
while match == name:
match = random.choice(names)
if match in d.values():
match = name
d[name] = match
# ===== Display the results
for name in names:
print(name, "->", d[name])
input("Press enter to quit.")
|
[
"# secret_santa.py\n# created by Sam Scott\n# 25/11/2016\n\nimport random\n\n# ===== Read the names into memory\nnames = []\nwith open(\"names.txt\", \"r\") as f:\n lines = f.readlines()\n for raw_string in lines:\n name = raw_string.strip()\n names.append(name)\n\n# ===== Match people up\nd = {}\nfor name in names:\n match = name\n while match == name:\n match = random.choice(names)\n if match in d.values():\n match = name\n d[name] = match\n\n# ===== Display the results\nfor name in names:\n print(name, \"->\", d[name])\n\ninput(\"Press enter to quit.\")\n",
"import random\nnames = []\nwith open('names.txt', 'r') as f:\n lines = f.readlines()\n for raw_string in lines:\n name = raw_string.strip()\n names.append(name)\nd = {}\nfor name in names:\n match = name\n while match == name:\n match = random.choice(names)\n if match in d.values():\n match = name\n d[name] = match\nfor name in names:\n print(name, '->', d[name])\ninput('Press enter to quit.')\n",
"<import token>\nnames = []\nwith open('names.txt', 'r') as f:\n lines = f.readlines()\n for raw_string in lines:\n name = raw_string.strip()\n names.append(name)\nd = {}\nfor name in names:\n match = name\n while match == name:\n match = random.choice(names)\n if match in d.values():\n match = name\n d[name] = match\nfor name in names:\n print(name, '->', d[name])\ninput('Press enter to quit.')\n",
"<import token>\n<assignment token>\nwith open('names.txt', 'r') as f:\n lines = f.readlines()\n for raw_string in lines:\n name = raw_string.strip()\n names.append(name)\n<assignment token>\nfor name in names:\n match = name\n while match == name:\n match = random.choice(names)\n if match in d.values():\n match = name\n d[name] = match\nfor name in names:\n print(name, '->', d[name])\ninput('Press enter to quit.')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,310 |
77e45f30ce80b9f98509efeac7a0a3ec5a58da4f
|
import requests
import time
import os
import re
from bs4 import BeautifulSoup
from urllib.parse import unquote
import tldextract
import pandas as pd
from urllib.parse import (
urlparse,
urlsplit,
parse_qs,
urlunsplit,
urlencode,
parse_qsl,
unquote_plus
)
from urllib.parse import unquote
from selenium import webdriver
from random import choice, randint
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.proxy import Proxy, ProxyType
import configparser
from time import sleep
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.proxy import Proxy, ProxyType
config = configparser.RawConfigParser()
configPath = 'configuration.ini'
fileDirectory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
fileDirectory = os.path.join(fileDirectory,"configuration.ini")
config.read(fileDirectory)
proxy = config.get("Proxy", "proxy")
print(proxy)
proxies = {"http": proxy, "https": proxy}
class Url(object):
"""A url object that can be compared with other url orbjects
without regard to the vagaries of encoding, escaping, and ordering
of parameters in query strings."""
def __init__(self, url):
parts = urlparse(url)
_query = frozenset(parse_qsl(parts.query))
_path = unquote_plus(parts.path)
parts = parts._replace(query=_query, path=_path)
self.parts = parts
def __eq__(self, other):
return self.parts.path in other.parts.path or other.parts.path in self.parts.path
def __hash__(self):
return hash(self.parts)
def clean_telephone(telephone):
if telephone:
telephone = (
telephone.replace(" ", "")
.replace(".", "")
.replace(")", "")
.replace("(", "")
.replace("-", "")
.replace("+", "")
.strip()
)
if re.findall(r'\d+',telephone):
telephone = re.findall(r'\d+',telephone)[0]
if len(telephone) == 12:
telephone = telephone[2:]
return telephone
def get_domain(website):
url = urlparse(website)
domain = url.hostname
if domain is None:
url = urlparse("http://" + website)
domain = url.hostname
domain = domain.replace("www.", "").replace("www2.", "")
return domain.lower()
def regex(pattern, string, default=None, get_one=False):
matches = re.findall(pattern, string)
if matches:
if get_one is True:
return matches[0]
return matches
else:
return default
def get_search_results_site(address, website, full_content=False):
domain = get_domain(website)
url = form_google_query(address, directory=domain)
response = google_get(url)
content = response.content.decode("utf-8")
soup = BeautifulSoup(content, "lxml")
referenceUrl, content = None, None
for row in soup.select("div.g"):
referenceUrl = row.select_one(".r a")
referenceUrl = referenceUrl["href"] if referenceUrl else None
contents = row.select("span.st") if full_content else row.select("span.st em")
if contents:
contents = [content.get_text() for content in contents]
content = ", ".join(pd.Series(contents).drop_duplicates().tolist())
break
return referenceUrl, content
def get_search_results(url):
response = google_get(url)
content = response.content.decode("utf-8")
soup = BeautifulSoup(content, "lxml")
#print('usop',soup)
for row in soup.select("div.g"):
referenceUrl = row.select_one(".rc a")
referenceUrl = referenceUrl["href"] if referenceUrl else None
contents = row.select("span em")
#print('c1',contents)
if contents:
contents = [content.get_text() for content in contents]
content = ", ".join(pd.Series(contents).drop_duplicates().tolist())
print('ru',referenceUrl)
print('c',content)
return referenceUrl, content
def google_get(url):
proxies = {"http": proxy, "https": proxy}
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,"
"image/apng,*/*;q=0.8",
"accept-language": "en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7",
"cache-control": "no-cache",
"pragma": "no-cache",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36",
}
return requests.get(url, headers=headers,proxies=proxies)
def get_google_address1(query,gmap,tel_no,cn):
global telephone, url
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"accept-language": "en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7",
"cache-control": "no-cache",
"content-type": "application/x-www-form-urlencoded",
"origin": "https://safer.fmcsa.dot.gov",
"pragma": "no-cache",
"referer": "https://safer.fmcsa.dot.gov/CompanySnapshot.aspx",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
proxies = {"http": proxy, "https": proxy}
company_name=cn
tel_no=tel_no
print('tel_no',tel_no)
if tel_no is None:
telephone=''
url=''
else:
tel_url=form_google_tel_query(company_name,tel_no)
tel_url = tel_url.replace('%2C', '')
req = requests.get(tel_url,headers=headers,proxies=proxies)
print('tel_url',tel_url)
rep=req.text
soup = BeautifulSoup(req.text,'lxml')
no_results=soup.find_all('div',attrs={'class':'s card-section rQUFld'})
if no_results==[]:
print('MATCH')
sleep(5)
try:
link=re.findall(r'class="yuRUbf"><a href="(.*?)"',str(rep))
for li in link:
try:
req1 = requests.get(li,headers=headers,proxies=proxies)
sleep(5)
rep1=req1.text
soup1 = BeautifulSoup(req1.text,'lxml')
fullstring = str(soup1)
substring = str(tel_no)
if substring in fullstring:
f='FOUND'
print(f)
telephone=str(tel_no)
url=li
break
else:
f='NOT FOUND'
telephone=''
url=''
except (requests.exceptions.SSLError)as ssl_error:
print('bad handshake')
telephone=''
url=''
except:
telephone=''
url=''
else:
telephone=''
url=''
return telephone, url
def get_google_address(query,gmap,tel_no):
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,"
"image/apng,*/*;q=0.8",
"accept-language": "en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7",
"cache-control": "no-cache",
"pragma": "no-cache",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36",
}
proxies = {"http": proxy, "https": proxy}
url = form_google_query(query)
search_url=url
g_url=gmap
response = google_get(url)
sleep(5)
content = response.content.decode("utf-8")
print('so',content)
soup = BeautifulSoup(content, "lxml")
print('so1',soup)
#input('---')
address = soup.select_one('[data-attrid="kc:/location/location:address"] span.aCOpRe')
print('add',address)
address = address.get_text() if address else None
if address is None:
address=soup.find('div',attrs={'class':'MWXBS'})
if address is not None:
address=address.text
print('add-',address)
else:
address=soup.find('span',attrs={'class':'LrzXr'})
if address is not None:
address=address.text
print('add1',address)
elif address is None:
address=soup.find('span',attrs={'class':'hgKElc'})
if address is not None:
address=address.text
print('add:',address)
elif address is None:
#queryString = urllib.parse.quote_plus(str(Company_Name)+' '+str(Country)+' '+str(Postal_code))
#print (queryString)
url = "https://www.google.com/maps/search/?api=1&query=" + str (g_url)
print('g_map_url',url)
RegexList = []
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,"
"image/apng,*/*;q=0.8",
"accept-language": "en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7",
"cache-control": "no-cache",
"pragma": "no-cache",
"upgrade-insecure-requests": "1",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36",
}
response = requests.get (url, headers=headers,proxies=proxies)
responseContent = response.content.decode ('utf-8', errors='ignore')
addressRegex=r'google.com\/maps\/preview\/place\/([^>]*?)\/@'
#addressRegex=r'\\n\]\\n,null,\[\\\"([^>]*?)\\\"\]\\n,(?:null,null,\\\"Street\s*View\\\"|\[\[null,\[\\\"\/\/maps\.google)'
telephone_regex = r',\[\\"(\+[^>]*?)\s*\\"'
addressBlock = re.findall(addressRegex,responseContent,re.I)
if len(addressBlock)>=1:
address= unquote(addressBlock[0].replace("+"," "), encoding='utf-8', errors='ignore')
print ("address_map:",address)
else:
address=''
url=search_url
print('url_s',url)
response = requests.get (url, headers=headers,proxies=proxies)
soup = BeautifulSoup(response.text,'lxml')
print('s',soup)
try:
df=soup.find('span',attrs={'class':'aCOpRe'})
#print(df)
for sd in df:
address=sd.text
print ("address_search:",address)
except:
address=''
return address, url
def get_directory_details(pageSource, directory):
data = dict()
parentRegex = config.get(directory, "parentRegex", fallback=None)
employeeRegex = config.get(directory, "employeeRegex", fallback=None)
if parentRegex:
ultimateParentCompany = regex(parentRegex, pageSource, default=None, get_one=True)
data["ultimateParentCompany"] = ultimateParentCompany
if employeeRegex:
employeeCount = regex(employeeRegex, pageSource, default=None, get_one=True)
if employeeCount:
data["employeeCount"] = employeeCount.replace(",", "").replace(" ", "")
return data
def form_google_query(*args, **kwargs):
query = []
quoted = kwargs.get("quoted")
directory = kwargs.get("directory")
if directory is not None:
query.append("site:{}".format(get_domain(directory)))
if quoted is not None:
query.append('"{}"'.format(quoted))
query = query + [field.strip() for field in args if field is not None]
query = ", ".join(query)
url = "https://www.google.co.uk/search?q=&ie=UTF-8"
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params["q"] = [query]
new_query_string = urlencode(query_params, doseq=True)
url = urlunsplit((scheme, netloc, path, new_query_string, fragment))
return url
def form_google_tel_query(*args, **kwargs):
query = []
quoted = kwargs.get("quoted")
directory = kwargs.get("directory")
if directory is not None:
query.append("site:{}".format(get_domain(directory)))
if quoted is not None:
query.append('"{}"'.format(quoted))
query = query + [field.strip() for field in args if field is not None]
query = ", ".join(query)
url = "https://www.google.com/search?q="
scheme, netloc, path, query_string, fragment = urlsplit(url)
query_params = parse_qs(query_string)
query_params["q"] = [query]
new_query_string = urlencode(query_params, doseq=True)
url = urlunsplit((scheme, netloc, path, new_query_string, fragment))
return url
def get_social_accounts(website,companyName):
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3",
"accept-language": "en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7",
"cache-control": "no-cache",
"content-type": "application/x-www-form-urlencoded",
"origin": "https://safer.fmcsa.dot.gov",
"pragma": "no-cache",
"referer": "https://safer.fmcsa.dot.gov/CompanySnapshot.aspx",
"user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
socialAccounts = {"twitter": [], "facebook": [], "linkedin": []}
website = website.strip()
print('website1;',website)
if len(website) > 4 and website[0:4] != "http":
website = "http://" + website
try:
response = requests.get(website, headers=headers,proxies=proxies)
content = response.content
print('content',content)
status_code=response.status_code
print('status_code',status_code)
if status_code==200:
print('SUCCESS')
else:
print('FAILED')
try:
username='meritgroup'
password='sXNdrc6JU'
send_from = '[email protected]'
send_to = '[email protected]'
Cc =['[email protected]','[email protected]']
msg = MIMEMultipart()
msg['From'] = send_from
msg['To'] = send_to
msg['Cc'] = ', '.join(Cc)
msg['Date'] = formatdate(localtime = True)
msg['Subject'] = 'ALF AUTOMATION'
templatePath = os.path.join(os.getcwd(), "templates", 'Weekly_Email_Template.html')
template = open(templatePath, "r")
server = smtplib.SMTP('74.80.234.196')
port = '25'
body = "Body_of_the_mail"
#msg.attach(MIMEText(body, 'plain'))
msg.attach(MIMEText(str(template.read()), 'html'))
smtp = smtplib.SMTP('74.80.234.196')
smtp.ehlo()
smtp.starttls()
smtp.login(username,password)
smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].split(',') , msg.as_string())
smtp.quit()
except Exception as e:
print('e',e)
except Exception as e:
content = str(e)
soup = BeautifulSoup(content, "html5lib")
links = soup.find_all("a", href=True)
smSites = ["twitter", "facebook", "linkedin"]
for smSite in smSites:
accounts = []
if smSite=="linkedin" :
urll="https://www.google.com/search?api=1&query=" +str(companyName)+ ' '+ 'linkedin'
print(urll)
req = requests.get(urll,headers=headers,proxies=proxies)
soup1 = BeautifulSoup(req.text,'lxml')
#print(soup1)
rep=req.text
#print(rep)
df=soup1.find('div',attrs={'class':'yuRUbf'})
#print(df)
if df is not None:
link=df.find('a').get('href')
accounts.append(link)
print('gh',accounts)
if smSite=="twitter" :
urll="https://www.google.com/search?api=1&query=" +str(companyName)+ ' '+ 'twitter'
print(urll)
req = requests.get(urll,headers=headers,proxies=proxies)
soup1 = BeautifulSoup(req.text,'lxml')
rep=req.text
df=soup1.find('div',attrs={'class':'yuRUbf'})
if df is not None:
link=df.find('a').get('href')
accounts.append(link)
print('gh',accounts)
if smSite=="facebook" :
urll="https://www.google.com/search?api=1&query=" +str(companyName)+ ' '+ 'facebook'
print(urll)
req = requests.get(urll,headers=headers,proxies=proxies)
soup1 = BeautifulSoup(req.text,'lxml')
rep=req.text
df=soup1.find('div',attrs={'class':'yuRUbf'})
if df is not None:
link=df.find('a').get('href')
accounts.append(link)
if accounts:
socialAccounts[smSite] = list(set(accounts))
print('social',socialAccounts)
return socialAccounts
class Driver:
browser = "chrome"
def __enter__(self):
self.resetCount = randint(1, 3)
self.currentCount = 0
self.driver = self.initialize_driver(self.browser)
return self
def initialize_driver(self, browser):
if browser == "chrome":
options = Options()
#options.add_argument("--headless")
options.add_argument("--disable-gpu")
options.add_argument("--no-sandbox")
options.add_argument("start-maximized")
options.add_argument("disable-infobars")
options.add_argument("--disable-logging")
options.add_argument("--log-level=3")
options.add_experimental_option(
"excludeSwitches", ["ignore-certificate-errors"]
)
proxy = choice(["172.27.140.48:3128", "172.27.140.48:3128"])
prox = Proxy()
prox.proxy_type = ProxyType.MANUAL
prox.http_proxy = proxy
prox.ssl_proxy = proxy
capabilities = webdriver.DesiredCapabilities.CHROME
prox.add_to_capabilities(capabilities)
driver = webdriver.Chrome(
chrome_options=options,
desired_capabilities=capabilities,
service_log_path="NULL",
)
else:
binary = (r'C:\Program Files\Mozilla Firefox\firefox.exe')
options = Options()
PROXY = "172.27.140.48:3128"
options.add_argument("--headless")
#options.set_headless(headless=True)
options.binary = binary
PROXY = "172.27.140.48:3128"
desired_capability = webdriver.DesiredCapabilities.FIREFOX
desired_capability["proxy"] = {
"proxyType": "manual",
"httpProxy": PROXY,
"ftpProxy": PROXY,
"sslProxy": PROXY,
}
firefox_profile = webdriver.FirefoxProfile()
firefox_profile.set_preference("browser.privatebrowsing.autostart", True)
driver = webdriver.Firefox(firefox_profile=firefox_profile, firefox_binary=binary,firefox_options=options,capabilities=desired_capability)
return driver
def reset(self):
self.quit()
self.driver = self.initialize_driver(self.browser)
self.resetCount = randint(1, 3)
self.currentCount = 0
def get(self, url):
if self.currentCount >= self.resetCount:
self.reset()
self.driver.get(url)
self.currentCount += 1
time.sleep(randint(1, 3))
return self.driver.page_source
def quit(self):
self.driver.quit()
def __exit__(self, type, value, traceback):
self.quit()
|
[
"\nimport requests\nimport time\nimport os\nimport re\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import unquote\nimport tldextract\nimport pandas as pd\nfrom urllib.parse import (\n urlparse,\n urlsplit,\n parse_qs,\n urlunsplit,\n urlencode,\n parse_qsl,\n unquote_plus\n)\nfrom urllib.parse import unquote\nfrom selenium import webdriver\nfrom random import choice, randint\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\nimport configparser\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\n\nconfig = configparser.RawConfigParser()\nconfigPath = 'configuration.ini'\nfileDirectory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nfileDirectory = os.path.join(fileDirectory,\"configuration.ini\")\nconfig.read(fileDirectory)\nproxy = config.get(\"Proxy\", \"proxy\")\nprint(proxy)\nproxies = {\"http\": proxy, \"https\": proxy}\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return self.parts.path in other.parts.path or other.parts.path in self.parts.path\n\n def __hash__(self):\n return hash(self.parts)\n\n\ndef clean_telephone(telephone):\n if telephone:\n telephone = (\n telephone.replace(\" \", \"\")\n .replace(\".\", \"\")\n .replace(\")\", \"\")\n .replace(\"(\", \"\")\n .replace(\"-\", \"\")\n .replace(\"+\", \"\")\n .strip()\n )\n if re.findall(r'\\d+',telephone):\n telephone = re.findall(r'\\d+',telephone)[0]\n if len(telephone) == 12:\n telephone = telephone[2:]\n return telephone\n\n\ndef get_domain(website):\n url = urlparse(website)\n domain = url.hostname\n if domain is None:\n url = urlparse(\"http://\" + website)\n domain = url.hostname\n domain = domain.replace(\"www.\", \"\").replace(\"www2.\", \"\")\n return domain.lower()\n\n\ndef regex(pattern, string, default=None, get_one=False):\n matches = re.findall(pattern, string)\n if matches:\n if get_one is True:\n return matches[0]\n return matches\n else:\n return default\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode(\"utf-8\")\n soup = BeautifulSoup(content, \"lxml\")\n referenceUrl, content = None, None\n for row in soup.select(\"div.g\"):\n referenceUrl = row.select_one(\".r a\")\n referenceUrl = referenceUrl[\"href\"] if referenceUrl else None\n contents = row.select(\"span.st\") if full_content else row.select(\"span.st em\")\n if contents:\n contents = [content.get_text() for content in contents]\n content = \", \".join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\ndef get_search_results(url):\n response = google_get(url)\n content = response.content.decode(\"utf-8\")\n soup = BeautifulSoup(content, \"lxml\")\n #print('usop',soup)\n for row in soup.select(\"div.g\"):\n referenceUrl = row.select_one(\".rc a\")\n referenceUrl = referenceUrl[\"href\"] if referenceUrl else None\n contents = row.select(\"span em\")\n #print('c1',contents)\n if contents:\n contents = [content.get_text() for content in contents]\n content = \", \".join(pd.Series(contents).drop_duplicates().tolist())\n print('ru',referenceUrl)\n print('c',content)\n return referenceUrl, content\n\n\n\ndef google_get(url):\n\n proxies = {\"http\": proxy, \"https\": proxy}\n\n headers = {\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,\"\n \"image/apng,*/*;q=0.8\",\n \"accept-language\": \"en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7\",\n \"cache-control\": \"no-cache\",\n \"pragma\": \"no-cache\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\",\n }\n return requests.get(url, headers=headers,proxies=proxies)\ndef get_google_address1(query,gmap,tel_no,cn):\n global telephone, url\n headers = {\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n \"accept-language\": \"en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7\",\n \"cache-control\": \"no-cache\",\n \"content-type\": \"application/x-www-form-urlencoded\",\n \"origin\": \"https://safer.fmcsa.dot.gov\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://safer.fmcsa.dot.gov/CompanySnapshot.aspx\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36\"\n }\n proxies = {\"http\": proxy, \"https\": proxy}\n\n company_name=cn\n tel_no=tel_no\n print('tel_no',tel_no)\n if tel_no is None:\n telephone=''\n url=''\n else:\n \n tel_url=form_google_tel_query(company_name,tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url,headers=headers,proxies=proxies)\n print('tel_url',tel_url)\n rep=req.text\n soup = BeautifulSoup(req.text,'lxml')\n no_results=soup.find_all('div',attrs={'class':'s card-section rQUFld'})\n if no_results==[]:\n print('MATCH')\n sleep(5)\n try:\n link=re.findall(r'class=\"yuRUbf\"><a href=\"(.*?)\"',str(rep))\n for li in link:\n try:\n req1 = requests.get(li,headers=headers,proxies=proxies)\n sleep(5)\n rep1=req1.text\n soup1 = BeautifulSoup(req1.text,'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f='FOUND'\n print(f)\n telephone=str(tel_no)\n url=li\n break\n \n else:\n f='NOT FOUND'\n telephone=''\n url=''\n except (requests.exceptions.SSLError)as ssl_error:\n print('bad handshake')\n telephone=''\n url=''\n except:\n telephone=''\n url=''\n \n else:\n telephone=''\n url=''\n return telephone, url\n\ndef get_google_address(query,gmap,tel_no):\n headers = {\n\n\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,\"\n \"image/apng,*/*;q=0.8\",\n \"accept-language\": \"en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7\",\n \"cache-control\": \"no-cache\",\n \"pragma\": \"no-cache\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\",\n }\n proxies = {\"http\": proxy, \"https\": proxy}\n url = form_google_query(query)\n search_url=url\n g_url=gmap\n \n \n response = google_get(url)\n sleep(5)\n content = response.content.decode(\"utf-8\")\n print('so',content)\n soup = BeautifulSoup(content, \"lxml\")\n print('so1',soup)\n #input('---')\n\n address = soup.select_one('[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add',address)\n address = address.get_text() if address else None\n if address is None:\n address=soup.find('div',attrs={'class':'MWXBS'})\n if address is not None:\n address=address.text\n print('add-',address)\n else:\n address=soup.find('span',attrs={'class':'LrzXr'})\n if address is not None:\n address=address.text\n print('add1',address)\n elif address is None:\n address=soup.find('span',attrs={'class':'hgKElc'})\n if address is not None:\n address=address.text\n print('add:',address)\n elif address is None:\n #queryString = urllib.parse.quote_plus(str(Company_Name)+' '+str(Country)+' '+str(Postal_code))\n \n #print (queryString)\n url = \"https://www.google.com/maps/search/?api=1&query=\" + str (g_url)\n print('g_map_url',url)\n RegexList = []\n headers = {\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,\"\n \"image/apng,*/*;q=0.8\",\n \"accept-language\": \"en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7\",\n \"cache-control\": \"no-cache\",\n \"pragma\": \"no-cache\",\n \"upgrade-insecure-requests\": \"1\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36\",\n }\n response = requests.get (url, headers=headers,proxies=proxies)\n responseContent = response.content.decode ('utf-8', errors='ignore')\n addressRegex=r'google.com\\/maps\\/preview\\/place\\/([^>]*?)\\/@'\n #addressRegex=r'\\\\n\\]\\\\n,null,\\[\\\\\\\"([^>]*?)\\\\\\\"\\]\\\\n,(?:null,null,\\\\\\\"Street\\s*View\\\\\\\"|\\[\\[null,\\[\\\\\\\"\\/\\/maps\\.google)'\n telephone_regex = r',\\[\\\\\"(\\+[^>]*?)\\s*\\\\\"'\n\n addressBlock = re.findall(addressRegex,responseContent,re.I)\n \n\n\n if len(addressBlock)>=1:\n\n address= unquote(addressBlock[0].replace(\"+\",\" \"), encoding='utf-8', errors='ignore')\n print (\"address_map:\",address)\n\n else:\n address=''\n url=search_url\n print('url_s',url)\n response = requests.get (url, headers=headers,proxies=proxies)\n soup = BeautifulSoup(response.text,'lxml')\n print('s',soup)\n try:\n\n df=soup.find('span',attrs={'class':'aCOpRe'})\n #print(df)\n for sd in df:\n address=sd.text\n print (\"address_search:\",address)\n except:\n address=''\n\n \n return address, url\n\ndef get_directory_details(pageSource, directory):\n data = dict()\n parentRegex = config.get(directory, \"parentRegex\", fallback=None)\n employeeRegex = config.get(directory, \"employeeRegex\", fallback=None)\n if parentRegex:\n ultimateParentCompany = regex(parentRegex, pageSource, default=None, get_one=True)\n data[\"ultimateParentCompany\"] = ultimateParentCompany\n if employeeRegex:\n employeeCount = regex(employeeRegex, pageSource, default=None, get_one=True)\n if employeeCount:\n data[\"employeeCount\"] = employeeCount.replace(\",\", \"\").replace(\" \", \"\")\n return data\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get(\"quoted\")\n directory = kwargs.get(\"directory\")\n if directory is not None:\n query.append(\"site:{}\".format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = \", \".join(query)\n url = \"https://www.google.co.uk/search?q=&ie=UTF-8\"\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params[\"q\"] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\ndef form_google_tel_query(*args, **kwargs):\n query = []\n quoted = kwargs.get(\"quoted\")\n directory = kwargs.get(\"directory\")\n if directory is not None:\n query.append(\"site:{}\".format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = \", \".join(query)\n url = \"https://www.google.com/search?q=\"\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params[\"q\"] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef get_social_accounts(website,companyName):\n headers = {\n \"accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3\",\n \"accept-language\": \"en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7\",\n \"cache-control\": \"no-cache\",\n \"content-type\": \"application/x-www-form-urlencoded\",\n \"origin\": \"https://safer.fmcsa.dot.gov\",\n \"pragma\": \"no-cache\",\n \"referer\": \"https://safer.fmcsa.dot.gov/CompanySnapshot.aspx\",\n \"user-agent\": \"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36\"\n}\n socialAccounts = {\"twitter\": [], \"facebook\": [], \"linkedin\": []}\n website = website.strip()\n print('website1;',website)\n\n if len(website) > 4 and website[0:4] != \"http\":\n website = \"http://\" + website\n\n try:\n response = requests.get(website, headers=headers,proxies=proxies)\n content = response.content\n print('content',content)\n status_code=response.status_code\n print('status_code',status_code)\n \n \n if status_code==200:\n print('SUCCESS')\n else:\n print('FAILED')\n \n try:\n username='meritgroup'\n password='sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc =['[email protected]','[email protected]']\n \n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime = True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), \"templates\", 'Weekly_Email_Template.html')\n template = open(templatePath, \"r\")\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = \"Body_of_the_mail\"\n #msg.attach(MIMEText(body, 'plain'))\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username,password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].split(',') , msg.as_string())\n smtp.quit()\n \n except Exception as e:\n print('e',e)\n \n except Exception as e:\n content = str(e)\n\n soup = BeautifulSoup(content, \"html5lib\")\n\n links = soup.find_all(\"a\", href=True)\n smSites = [\"twitter\", \"facebook\", \"linkedin\"]\n for smSite in smSites:\n accounts = []\n if smSite==\"linkedin\" :\n urll=\"https://www.google.com/search?api=1&query=\" +str(companyName)+ ' '+ 'linkedin'\n print(urll)\n \n req = requests.get(urll,headers=headers,proxies=proxies)\n soup1 = BeautifulSoup(req.text,'lxml')\n #print(soup1)\n rep=req.text\n #print(rep)\n \n df=soup1.find('div',attrs={'class':'yuRUbf'})\n #print(df)\n if df is not None:\n link=df.find('a').get('href')\n accounts.append(link)\n print('gh',accounts)\n\n if smSite==\"twitter\" :\n urll=\"https://www.google.com/search?api=1&query=\" +str(companyName)+ ' '+ 'twitter'\n print(urll)\n \n req = requests.get(urll,headers=headers,proxies=proxies)\n soup1 = BeautifulSoup(req.text,'lxml')\n rep=req.text\n df=soup1.find('div',attrs={'class':'yuRUbf'})\n if df is not None:\n link=df.find('a').get('href')\n accounts.append(link)\n print('gh',accounts)\n\n if smSite==\"facebook\" :\n urll=\"https://www.google.com/search?api=1&query=\" +str(companyName)+ ' '+ 'facebook'\n print(urll)\n \n req = requests.get(urll,headers=headers,proxies=proxies)\n soup1 = BeautifulSoup(req.text,'lxml')\n rep=req.text\n df=soup1.find('div',attrs={'class':'yuRUbf'})\n \n if df is not None:\n link=df.find('a').get('href')\n accounts.append(link)\n \n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social',socialAccounts)\n \n return socialAccounts\n\n\nclass Driver:\n browser = \"chrome\"\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == \"chrome\":\n options = Options()\n #options.add_argument(\"--headless\")\n options.add_argument(\"--disable-gpu\")\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"start-maximized\")\n options.add_argument(\"disable-infobars\")\n options.add_argument(\"--disable-logging\")\n options.add_argument(\"--log-level=3\")\n options.add_experimental_option(\n \"excludeSwitches\", [\"ignore-certificate-errors\"]\n )\n proxy = choice([\"172.27.140.48:3128\", \"172.27.140.48:3128\"])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(\n chrome_options=options,\n desired_capabilities=capabilities,\n service_log_path=\"NULL\",\n )\n else:\n\n \n binary = (r'C:\\Program Files\\Mozilla Firefox\\firefox.exe')\n options = Options()\n PROXY = \"172.27.140.48:3128\"\n options.add_argument(\"--headless\")\n #options.set_headless(headless=True)\n options.binary = binary\n\t\t\t\n\t\t\t\n PROXY = \"172.27.140.48:3128\"\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability[\"proxy\"] = {\n\t\t\t\t\"proxyType\": \"manual\",\n\t\t\t\t\"httpProxy\": PROXY,\n\t\t\t\t\"ftpProxy\": PROXY,\n\t\t\t\t\"sslProxy\": PROXY,\n\t\t\t}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference(\"browser.privatebrowsing.autostart\", True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile, firefox_binary=binary,firefox_options=options,capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"import requests\nimport time\nimport os\nimport re\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import unquote\nimport tldextract\nimport pandas as pd\nfrom urllib.parse import urlparse, urlsplit, parse_qs, urlunsplit, urlencode, parse_qsl, unquote_plus\nfrom urllib.parse import unquote\nfrom selenium import webdriver\nfrom random import choice, randint\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\nimport configparser\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.common.desired_capabilities import DesiredCapabilities\nfrom selenium.webdriver.common.proxy import Proxy, ProxyType\nconfig = configparser.RawConfigParser()\nconfigPath = 'configuration.ini'\nfileDirectory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nfileDirectory = os.path.join(fileDirectory, 'configuration.ini')\nconfig.read(fileDirectory)\nproxy = config.get('Proxy', 'proxy')\nprint(proxy)\nproxies = {'http': proxy, 'https': proxy}\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\ndef clean_telephone(telephone):\n if telephone:\n telephone = telephone.replace(' ', '').replace('.', '').replace(')', ''\n ).replace('(', '').replace('-', '').replace('+', '').strip()\n if re.findall('\\\\d+', telephone):\n telephone = re.findall('\\\\d+', telephone)[0]\n if len(telephone) == 12:\n telephone = telephone[2:]\n return telephone\n\n\ndef get_domain(website):\n url = urlparse(website)\n domain = url.hostname\n if domain is None:\n url = urlparse('http://' + website)\n domain = url.hostname\n domain = domain.replace('www.', '').replace('www2.', '')\n return domain.lower()\n\n\ndef regex(pattern, string, default=None, get_one=False):\n matches = re.findall(pattern, string)\n if matches:\n if get_one is True:\n return matches[0]\n return matches\n else:\n return default\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\ndef get_search_results(url):\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.rc a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n print('ru', referenceUrl)\n print('c', content)\n return referenceUrl, content\n\n\ndef google_get(url):\n proxies = {'http': proxy, 'https': proxy}\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n return requests.get(url, headers=headers, proxies=proxies)\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\ndef get_directory_details(pageSource, directory):\n data = dict()\n parentRegex = config.get(directory, 'parentRegex', fallback=None)\n employeeRegex = config.get(directory, 'employeeRegex', fallback=None)\n if parentRegex:\n ultimateParentCompany = regex(parentRegex, pageSource, default=None,\n get_one=True)\n data['ultimateParentCompany'] = ultimateParentCompany\n if employeeRegex:\n employeeCount = regex(employeeRegex, pageSource, default=None,\n get_one=True)\n if employeeCount:\n data['employeeCount'] = employeeCount.replace(',', '').replace(' ',\n '')\n return data\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.co.uk/search?q=&ie=UTF-8'\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef form_google_tel_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.com/search?q='\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\nconfig = configparser.RawConfigParser()\nconfigPath = 'configuration.ini'\nfileDirectory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nfileDirectory = os.path.join(fileDirectory, 'configuration.ini')\nconfig.read(fileDirectory)\nproxy = config.get('Proxy', 'proxy')\nprint(proxy)\nproxies = {'http': proxy, 'https': proxy}\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\ndef clean_telephone(telephone):\n if telephone:\n telephone = telephone.replace(' ', '').replace('.', '').replace(')', ''\n ).replace('(', '').replace('-', '').replace('+', '').strip()\n if re.findall('\\\\d+', telephone):\n telephone = re.findall('\\\\d+', telephone)[0]\n if len(telephone) == 12:\n telephone = telephone[2:]\n return telephone\n\n\ndef get_domain(website):\n url = urlparse(website)\n domain = url.hostname\n if domain is None:\n url = urlparse('http://' + website)\n domain = url.hostname\n domain = domain.replace('www.', '').replace('www2.', '')\n return domain.lower()\n\n\ndef regex(pattern, string, default=None, get_one=False):\n matches = re.findall(pattern, string)\n if matches:\n if get_one is True:\n return matches[0]\n return matches\n else:\n return default\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\ndef get_search_results(url):\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.rc a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n print('ru', referenceUrl)\n print('c', content)\n return referenceUrl, content\n\n\ndef google_get(url):\n proxies = {'http': proxy, 'https': proxy}\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n return requests.get(url, headers=headers, proxies=proxies)\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\ndef get_directory_details(pageSource, directory):\n data = dict()\n parentRegex = config.get(directory, 'parentRegex', fallback=None)\n employeeRegex = config.get(directory, 'employeeRegex', fallback=None)\n if parentRegex:\n ultimateParentCompany = regex(parentRegex, pageSource, default=None,\n get_one=True)\n data['ultimateParentCompany'] = ultimateParentCompany\n if employeeRegex:\n employeeCount = regex(employeeRegex, pageSource, default=None,\n get_one=True)\n if employeeCount:\n data['employeeCount'] = employeeCount.replace(',', '').replace(' ',\n '')\n return data\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.co.uk/search?q=&ie=UTF-8'\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef form_google_tel_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.com/search?q='\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\nconfig.read(fileDirectory)\n<assignment token>\nprint(proxy)\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\ndef clean_telephone(telephone):\n if telephone:\n telephone = telephone.replace(' ', '').replace('.', '').replace(')', ''\n ).replace('(', '').replace('-', '').replace('+', '').strip()\n if re.findall('\\\\d+', telephone):\n telephone = re.findall('\\\\d+', telephone)[0]\n if len(telephone) == 12:\n telephone = telephone[2:]\n return telephone\n\n\ndef get_domain(website):\n url = urlparse(website)\n domain = url.hostname\n if domain is None:\n url = urlparse('http://' + website)\n domain = url.hostname\n domain = domain.replace('www.', '').replace('www2.', '')\n return domain.lower()\n\n\ndef regex(pattern, string, default=None, get_one=False):\n matches = re.findall(pattern, string)\n if matches:\n if get_one is True:\n return matches[0]\n return matches\n else:\n return default\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\ndef get_search_results(url):\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.rc a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n print('ru', referenceUrl)\n print('c', content)\n return referenceUrl, content\n\n\ndef google_get(url):\n proxies = {'http': proxy, 'https': proxy}\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n return requests.get(url, headers=headers, proxies=proxies)\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\ndef get_directory_details(pageSource, directory):\n data = dict()\n parentRegex = config.get(directory, 'parentRegex', fallback=None)\n employeeRegex = config.get(directory, 'employeeRegex', fallback=None)\n if parentRegex:\n ultimateParentCompany = regex(parentRegex, pageSource, default=None,\n get_one=True)\n data['ultimateParentCompany'] = ultimateParentCompany\n if employeeRegex:\n employeeCount = regex(employeeRegex, pageSource, default=None,\n get_one=True)\n if employeeCount:\n data['employeeCount'] = employeeCount.replace(',', '').replace(' ',\n '')\n return data\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.co.uk/search?q=&ie=UTF-8'\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef form_google_tel_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.com/search?q='\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\ndef clean_telephone(telephone):\n if telephone:\n telephone = telephone.replace(' ', '').replace('.', '').replace(')', ''\n ).replace('(', '').replace('-', '').replace('+', '').strip()\n if re.findall('\\\\d+', telephone):\n telephone = re.findall('\\\\d+', telephone)[0]\n if len(telephone) == 12:\n telephone = telephone[2:]\n return telephone\n\n\ndef get_domain(website):\n url = urlparse(website)\n domain = url.hostname\n if domain is None:\n url = urlparse('http://' + website)\n domain = url.hostname\n domain = domain.replace('www.', '').replace('www2.', '')\n return domain.lower()\n\n\ndef regex(pattern, string, default=None, get_one=False):\n matches = re.findall(pattern, string)\n if matches:\n if get_one is True:\n return matches[0]\n return matches\n else:\n return default\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\ndef get_search_results(url):\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.rc a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n print('ru', referenceUrl)\n print('c', content)\n return referenceUrl, content\n\n\ndef google_get(url):\n proxies = {'http': proxy, 'https': proxy}\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n return requests.get(url, headers=headers, proxies=proxies)\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\ndef get_directory_details(pageSource, directory):\n data = dict()\n parentRegex = config.get(directory, 'parentRegex', fallback=None)\n employeeRegex = config.get(directory, 'employeeRegex', fallback=None)\n if parentRegex:\n ultimateParentCompany = regex(parentRegex, pageSource, default=None,\n get_one=True)\n data['ultimateParentCompany'] = ultimateParentCompany\n if employeeRegex:\n employeeCount = regex(employeeRegex, pageSource, default=None,\n get_one=True)\n if employeeCount:\n data['employeeCount'] = employeeCount.replace(',', '').replace(' ',\n '')\n return data\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.co.uk/search?q=&ie=UTF-8'\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef form_google_tel_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.com/search?q='\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\ndef clean_telephone(telephone):\n if telephone:\n telephone = telephone.replace(' ', '').replace('.', '').replace(')', ''\n ).replace('(', '').replace('-', '').replace('+', '').strip()\n if re.findall('\\\\d+', telephone):\n telephone = re.findall('\\\\d+', telephone)[0]\n if len(telephone) == 12:\n telephone = telephone[2:]\n return telephone\n\n\ndef get_domain(website):\n url = urlparse(website)\n domain = url.hostname\n if domain is None:\n url = urlparse('http://' + website)\n domain = url.hostname\n domain = domain.replace('www.', '').replace('www2.', '')\n return domain.lower()\n\n\ndef regex(pattern, string, default=None, get_one=False):\n matches = re.findall(pattern, string)\n if matches:\n if get_one is True:\n return matches[0]\n return matches\n else:\n return default\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\ndef get_search_results(url):\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.rc a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n print('ru', referenceUrl)\n print('c', content)\n return referenceUrl, content\n\n\ndef google_get(url):\n proxies = {'http': proxy, 'https': proxy}\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n return requests.get(url, headers=headers, proxies=proxies)\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\n<function token>\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.co.uk/search?q=&ie=UTF-8'\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef form_google_tel_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.com/search?q='\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\ndef clean_telephone(telephone):\n if telephone:\n telephone = telephone.replace(' ', '').replace('.', '').replace(')', ''\n ).replace('(', '').replace('-', '').replace('+', '').strip()\n if re.findall('\\\\d+', telephone):\n telephone = re.findall('\\\\d+', telephone)[0]\n if len(telephone) == 12:\n telephone = telephone[2:]\n return telephone\n\n\ndef get_domain(website):\n url = urlparse(website)\n domain = url.hostname\n if domain is None:\n url = urlparse('http://' + website)\n domain = url.hostname\n domain = domain.replace('www.', '').replace('www2.', '')\n return domain.lower()\n\n\ndef regex(pattern, string, default=None, get_one=False):\n matches = re.findall(pattern, string)\n if matches:\n if get_one is True:\n return matches[0]\n return matches\n else:\n return default\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\n<function token>\n\n\ndef google_get(url):\n proxies = {'http': proxy, 'https': proxy}\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n return requests.get(url, headers=headers, proxies=proxies)\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\n<function token>\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.co.uk/search?q=&ie=UTF-8'\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef form_google_tel_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.com/search?q='\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n\n\ndef get_domain(website):\n url = urlparse(website)\n domain = url.hostname\n if domain is None:\n url = urlparse('http://' + website)\n domain = url.hostname\n domain = domain.replace('www.', '').replace('www2.', '')\n return domain.lower()\n\n\ndef regex(pattern, string, default=None, get_one=False):\n matches = re.findall(pattern, string)\n if matches:\n if get_one is True:\n return matches[0]\n return matches\n else:\n return default\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\n<function token>\n\n\ndef google_get(url):\n proxies = {'http': proxy, 'https': proxy}\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n return requests.get(url, headers=headers, proxies=proxies)\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\n<function token>\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.co.uk/search?q=&ie=UTF-8'\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef form_google_tel_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.com/search?q='\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n\n\ndef get_domain(website):\n url = urlparse(website)\n domain = url.hostname\n if domain is None:\n url = urlparse('http://' + website)\n domain = url.hostname\n domain = domain.replace('www.', '').replace('www2.', '')\n return domain.lower()\n\n\ndef regex(pattern, string, default=None, get_one=False):\n matches = re.findall(pattern, string)\n if matches:\n if get_one is True:\n return matches[0]\n return matches\n else:\n return default\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\n<function token>\n\n\ndef google_get(url):\n proxies = {'http': proxy, 'https': proxy}\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n return requests.get(url, headers=headers, proxies=proxies)\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\n<function token>\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.co.uk/search?q=&ie=UTF-8'\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\n<function token>\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n\n\ndef get_domain(website):\n url = urlparse(website)\n domain = url.hostname\n if domain is None:\n url = urlparse('http://' + website)\n domain = url.hostname\n domain = domain.replace('www.', '').replace('www2.', '')\n return domain.lower()\n\n\ndef regex(pattern, string, default=None, get_one=False):\n matches = re.findall(pattern, string)\n if matches:\n if get_one is True:\n return matches[0]\n return matches\n else:\n return default\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\n<function token>\n<function token>\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\n<function token>\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.co.uk/search?q=&ie=UTF-8'\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\n<function token>\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n<function token>\n\n\ndef regex(pattern, string, default=None, get_one=False):\n matches = re.findall(pattern, string)\n if matches:\n if get_one is True:\n return matches[0]\n return matches\n else:\n return default\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\n<function token>\n<function token>\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\n<function token>\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.co.uk/search?q=&ie=UTF-8'\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\n<function token>\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\n<function token>\n<function token>\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\n<function token>\n\n\ndef form_google_query(*args, **kwargs):\n query = []\n quoted = kwargs.get('quoted')\n directory = kwargs.get('directory')\n if directory is not None:\n query.append('site:{}'.format(get_domain(directory)))\n if quoted is not None:\n query.append('\"{}\"'.format(quoted))\n query = query + [field.strip() for field in args if field is not None]\n query = ', '.join(query)\n url = 'https://www.google.co.uk/search?q=&ie=UTF-8'\n scheme, netloc, path, query_string, fragment = urlsplit(url)\n query_params = parse_qs(query_string)\n query_params['q'] = [query]\n new_query_string = urlencode(query_params, doseq=True)\n url = urlunsplit((scheme, netloc, path, new_query_string, fragment))\n return url\n\n\n<function token>\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\n<function token>\n<function token>\n\n\ndef get_google_address1(query, gmap, tel_no, cn):\n global telephone, url\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n company_name = cn\n tel_no = tel_no\n print('tel_no', tel_no)\n if tel_no is None:\n telephone = ''\n url = ''\n else:\n tel_url = form_google_tel_query(company_name, tel_no)\n tel_url = tel_url.replace('%2C', '')\n req = requests.get(tel_url, headers=headers, proxies=proxies)\n print('tel_url', tel_url)\n rep = req.text\n soup = BeautifulSoup(req.text, 'lxml')\n no_results = soup.find_all('div', attrs={'class':\n 's card-section rQUFld'})\n if no_results == []:\n print('MATCH')\n sleep(5)\n try:\n link = re.findall('class=\"yuRUbf\"><a href=\"(.*?)\"', str(rep))\n for li in link:\n try:\n req1 = requests.get(li, headers=headers, proxies=\n proxies)\n sleep(5)\n rep1 = req1.text\n soup1 = BeautifulSoup(req1.text, 'lxml')\n fullstring = str(soup1)\n substring = str(tel_no)\n if substring in fullstring:\n f = 'FOUND'\n print(f)\n telephone = str(tel_no)\n url = li\n break\n else:\n f = 'NOT FOUND'\n telephone = ''\n url = ''\n except requests.exceptions.SSLError as ssl_error:\n print('bad handshake')\n telephone = ''\n url = ''\n except:\n telephone = ''\n url = ''\n else:\n telephone = ''\n url = ''\n return telephone, url\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_google_address(query, gmap, tel_no):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n proxies = {'http': proxy, 'https': proxy}\n url = form_google_query(query)\n search_url = url\n g_url = gmap\n response = google_get(url)\n sleep(5)\n content = response.content.decode('utf-8')\n print('so', content)\n soup = BeautifulSoup(content, 'lxml')\n print('so1', soup)\n address = soup.select_one(\n '[data-attrid=\"kc:/location/location:address\"] span.aCOpRe')\n print('add', address)\n address = address.get_text() if address else None\n if address is None:\n address = soup.find('div', attrs={'class': 'MWXBS'})\n if address is not None:\n address = address.text\n print('add-', address)\n else:\n address = soup.find('span', attrs={'class': 'LrzXr'})\n if address is not None:\n address = address.text\n print('add1', address)\n elif address is None:\n address = soup.find('span', attrs={'class': 'hgKElc'})\n if address is not None:\n address = address.text\n print('add:', address)\n elif address is None:\n url = (\n 'https://www.google.com/maps/search/?api=1&query=' +\n str(g_url))\n print('g_map_url', url)\n RegexList = []\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'\n , 'accept-language':\n 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'pragma': 'no-cache',\n 'upgrade-insecure-requests': '1', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36'\n }\n response = requests.get(url, headers=headers, proxies=\n proxies)\n responseContent = response.content.decode('utf-8',\n errors='ignore')\n addressRegex = (\n 'google.com\\\\/maps\\\\/preview\\\\/place\\\\/([^>]*?)\\\\/@')\n telephone_regex = ',\\\\[\\\\\\\\\"(\\\\+[^>]*?)\\\\s*\\\\\\\\\"'\n addressBlock = re.findall(addressRegex, responseContent,\n re.I)\n if len(addressBlock) >= 1:\n address = unquote(addressBlock[0].replace('+', ' '),\n encoding='utf-8', errors='ignore')\n print('address_map:', address)\n else:\n address = ''\n url = search_url\n print('url_s', url)\n response = requests.get(url, headers=headers,\n proxies=proxies)\n soup = BeautifulSoup(response.text, 'lxml')\n print('s', soup)\n try:\n df = soup.find('span', attrs={'class': 'aCOpRe'})\n for sd in df:\n address = sd.text\n print('address_search:', address)\n except:\n address = ''\n return address, url\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_social_accounts(website, companyName):\n headers = {'accept':\n 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'\n , 'accept-language': 'en-GB,en;q=0.9,en-US;q=0.8,tr;q=0.7',\n 'cache-control': 'no-cache', 'content-type':\n 'application/x-www-form-urlencoded', 'origin':\n 'https://safer.fmcsa.dot.gov', 'pragma': 'no-cache', 'referer':\n 'https://safer.fmcsa.dot.gov/CompanySnapshot.aspx', 'user-agent':\n 'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'\n }\n socialAccounts = {'twitter': [], 'facebook': [], 'linkedin': []}\n website = website.strip()\n print('website1;', website)\n if len(website) > 4 and website[0:4] != 'http':\n website = 'http://' + website\n try:\n response = requests.get(website, headers=headers, proxies=proxies)\n content = response.content\n print('content', content)\n status_code = response.status_code\n print('status_code', status_code)\n if status_code == 200:\n print('SUCCESS')\n else:\n print('FAILED')\n try:\n username = 'meritgroup'\n password = 'sXNdrc6JU'\n send_from = '[email protected]'\n send_to = '[email protected]'\n Cc = ['[email protected]',\n '[email protected]']\n msg = MIMEMultipart()\n msg['From'] = send_from\n msg['To'] = send_to\n msg['Cc'] = ', '.join(Cc)\n msg['Date'] = formatdate(localtime=True)\n msg['Subject'] = 'ALF AUTOMATION'\n templatePath = os.path.join(os.getcwd(), 'templates',\n 'Weekly_Email_Template.html')\n template = open(templatePath, 'r')\n server = smtplib.SMTP('74.80.234.196')\n port = '25'\n body = 'Body_of_the_mail'\n msg.attach(MIMEText(str(template.read()), 'html'))\n smtp = smtplib.SMTP('74.80.234.196')\n smtp.ehlo()\n smtp.starttls()\n smtp.login(username, password)\n smtp.sendmail(send_from, send_to.split(',') + msg['Cc'].\n split(','), msg.as_string())\n smtp.quit()\n except Exception as e:\n print('e', e)\n except Exception as e:\n content = str(e)\n soup = BeautifulSoup(content, 'html5lib')\n links = soup.find_all('a', href=True)\n smSites = ['twitter', 'facebook', 'linkedin']\n for smSite in smSites:\n accounts = []\n if smSite == 'linkedin':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'linkedin'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'twitter':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'twitter'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n print('gh', accounts)\n if smSite == 'facebook':\n urll = 'https://www.google.com/search?api=1&query=' + str(\n companyName) + ' ' + 'facebook'\n print(urll)\n req = requests.get(urll, headers=headers, proxies=proxies)\n soup1 = BeautifulSoup(req.text, 'lxml')\n rep = req.text\n df = soup1.find('div', attrs={'class': 'yuRUbf'})\n if df is not None:\n link = df.find('a').get('href')\n accounts.append(link)\n if accounts:\n socialAccounts[smSite] = list(set(accounts))\n print('social', socialAccounts)\n return socialAccounts\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef get_search_results_site(address, website, full_content=False):\n domain = get_domain(website)\n url = form_google_query(address, directory=domain)\n response = google_get(url)\n content = response.content.decode('utf-8')\n soup = BeautifulSoup(content, 'lxml')\n referenceUrl, content = None, None\n for row in soup.select('div.g'):\n referenceUrl = row.select_one('.r a')\n referenceUrl = referenceUrl['href'] if referenceUrl else None\n contents = row.select('span.st') if full_content else row.select(\n 'span.st em')\n if contents:\n contents = [content.get_text() for content in contents]\n content = ', '.join(pd.Series(contents).drop_duplicates().tolist())\n break\n return referenceUrl, content\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n \"\"\"A url object that can be compared with other url orbjects\n without regard to the vagaries of encoding, escaping, and ordering\n of parameters in query strings.\"\"\"\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n <docstring token>\n\n def __init__(self, url):\n parts = urlparse(url)\n _query = frozenset(parse_qsl(parts.query))\n _path = unquote_plus(parts.path)\n parts = parts._replace(query=_query, path=_path)\n self.parts = parts\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n <docstring token>\n <function token>\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n\n def __hash__(self):\n return hash(self.parts)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n <docstring token>\n <function token>\n\n def __eq__(self, other):\n return (self.parts.path in other.parts.path or other.parts.path in\n self.parts.path)\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass Url(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n browser = 'chrome'\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n <assignment token>\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n\n def quit(self):\n self.driver.quit()\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n <assignment token>\n\n def __enter__(self):\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n self.driver = self.initialize_driver(self.browser)\n return self\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n <function token>\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n <assignment token>\n <function token>\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n\n def reset(self):\n self.quit()\n self.driver = self.initialize_driver(self.browser)\n self.resetCount = randint(1, 3)\n self.currentCount = 0\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n <function token>\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n <assignment token>\n <function token>\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n <function token>\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n <function token>\n\n def __exit__(self, type, value, traceback):\n self.quit()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n <assignment token>\n <function token>\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n <function token>\n\n def get(self, url):\n if self.currentCount >= self.resetCount:\n self.reset()\n self.driver.get(url)\n self.currentCount += 1\n time.sleep(randint(1, 3))\n return self.driver.page_source\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n <assignment token>\n <function token>\n\n def initialize_driver(self, browser):\n if browser == 'chrome':\n options = Options()\n options.add_argument('--disable-gpu')\n options.add_argument('--no-sandbox')\n options.add_argument('start-maximized')\n options.add_argument('disable-infobars')\n options.add_argument('--disable-logging')\n options.add_argument('--log-level=3')\n options.add_experimental_option('excludeSwitches', [\n 'ignore-certificate-errors'])\n proxy = choice(['172.27.140.48:3128', '172.27.140.48:3128'])\n prox = Proxy()\n prox.proxy_type = ProxyType.MANUAL\n prox.http_proxy = proxy\n prox.ssl_proxy = proxy\n capabilities = webdriver.DesiredCapabilities.CHROME\n prox.add_to_capabilities(capabilities)\n driver = webdriver.Chrome(chrome_options=options,\n desired_capabilities=capabilities, service_log_path='NULL')\n else:\n binary = 'C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe'\n options = Options()\n PROXY = '172.27.140.48:3128'\n options.add_argument('--headless')\n options.binary = binary\n PROXY = '172.27.140.48:3128'\n desired_capability = webdriver.DesiredCapabilities.FIREFOX\n desired_capability['proxy'] = {'proxyType': 'manual',\n 'httpProxy': PROXY, 'ftpProxy': PROXY, 'sslProxy': PROXY}\n firefox_profile = webdriver.FirefoxProfile()\n firefox_profile.set_preference('browser.privatebrowsing.autostart',\n True)\n driver = webdriver.Firefox(firefox_profile=firefox_profile,\n firefox_binary=binary, firefox_options=options,\n capabilities=desired_capability)\n return driver\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\nclass Driver:\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<class token>\n"
] | false |
99,311 |
6341ea1b872b79d1f4171bbc6733b3a275a36488
|
import opentrons.simulate
protocol_file = open('p1a_GF_prekingfisher.py')
opentrons.simulate.simulate(protocol_file)
|
[
"import opentrons.simulate\nprotocol_file = open('p1a_GF_prekingfisher.py')\nopentrons.simulate.simulate(protocol_file)\n",
"<import token>\nprotocol_file = open('p1a_GF_prekingfisher.py')\nopentrons.simulate.simulate(protocol_file)\n",
"<import token>\n<assignment token>\nopentrons.simulate.simulate(protocol_file)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,312 |
8de446a8787cb96d66b8751bafa5c76a21bb9776
|
import unittest
import predict
class TestStringMethods(unittest.TestCase):
def test_convert_wait_time_to_days(self):
tests = [
["2 Yrs 7 Mths 16 Days", 2 * 365 + 7 * 30 + 16]
]
for t in tests:
assert(predict.convert_wait_time(t[0], month=False) == t[1])
if __name__ == '__main__':
unittest.main()
|
[
"import unittest\nimport predict\n\n\nclass TestStringMethods(unittest.TestCase):\n\tdef test_convert_wait_time_to_days(self):\n\t\ttests = [\n\t\t\t[\"2 Yrs 7 Mths 16 Days\", 2 * 365 + 7 * 30 + 16]\n\t\t]\n\t\tfor t in tests:\n\t\t\tassert(predict.convert_wait_time(t[0], month=False) == t[1])\n\nif __name__ == '__main__':\n\tunittest.main()\n",
"import unittest\nimport predict\n\n\nclass TestStringMethods(unittest.TestCase):\n\n def test_convert_wait_time_to_days(self):\n tests = [['2 Yrs 7 Mths 16 Days', 2 * 365 + 7 * 30 + 16]]\n for t in tests:\n assert predict.convert_wait_time(t[0], month=False) == t[1]\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestStringMethods(unittest.TestCase):\n\n def test_convert_wait_time_to_days(self):\n tests = [['2 Yrs 7 Mths 16 Days', 2 * 365 + 7 * 30 + 16]]\n for t in tests:\n assert predict.convert_wait_time(t[0], month=False) == t[1]\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n\n\nclass TestStringMethods(unittest.TestCase):\n\n def test_convert_wait_time_to_days(self):\n tests = [['2 Yrs 7 Mths 16 Days', 2 * 365 + 7 * 30 + 16]]\n for t in tests:\n assert predict.convert_wait_time(t[0], month=False) == t[1]\n\n\n<code token>\n",
"<import token>\n\n\nclass TestStringMethods(unittest.TestCase):\n <function token>\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
99,313 |
38e3f1692f3ff0aa4c63530e6e67599380328f7e
|
from apps.core.models.soft_delete_model_base import ModelBase
from django.db import models
class Auth(ModelBase):
class LoginType:
Kakao = 0
Apple = 1
types = (
(Kakao, 'kakao'),
(Apple, 'apple'),
)
identifier = models.CharField(max_length=50, unique=True)
email = models.CharField(max_length=100, null=True, blank=True)
user = models.ForeignKey("User", related_name="auth_user", on_delete=models.CASCADE, db_column="user")
social_token = models.CharField(max_length=150)
login_type = models.SmallIntegerField('state', choices=LoginType.types)
token = models.CharField(max_length=300)
|
[
"from apps.core.models.soft_delete_model_base import ModelBase\nfrom django.db import models\n\n\nclass Auth(ModelBase):\n class LoginType:\n Kakao = 0\n Apple = 1\n types = (\n (Kakao, 'kakao'),\n (Apple, 'apple'),\n )\n\n identifier = models.CharField(max_length=50, unique=True)\n email = models.CharField(max_length=100, null=True, blank=True)\n user = models.ForeignKey(\"User\", related_name=\"auth_user\", on_delete=models.CASCADE, db_column=\"user\")\n social_token = models.CharField(max_length=150)\n login_type = models.SmallIntegerField('state', choices=LoginType.types)\n token = models.CharField(max_length=300)\n\n",
"from apps.core.models.soft_delete_model_base import ModelBase\nfrom django.db import models\n\n\nclass Auth(ModelBase):\n\n\n class LoginType:\n Kakao = 0\n Apple = 1\n types = (Kakao, 'kakao'), (Apple, 'apple')\n identifier = models.CharField(max_length=50, unique=True)\n email = models.CharField(max_length=100, null=True, blank=True)\n user = models.ForeignKey('User', related_name='auth_user', on_delete=\n models.CASCADE, db_column='user')\n social_token = models.CharField(max_length=150)\n login_type = models.SmallIntegerField('state', choices=LoginType.types)\n token = models.CharField(max_length=300)\n",
"<import token>\n\n\nclass Auth(ModelBase):\n\n\n class LoginType:\n Kakao = 0\n Apple = 1\n types = (Kakao, 'kakao'), (Apple, 'apple')\n identifier = models.CharField(max_length=50, unique=True)\n email = models.CharField(max_length=100, null=True, blank=True)\n user = models.ForeignKey('User', related_name='auth_user', on_delete=\n models.CASCADE, db_column='user')\n social_token = models.CharField(max_length=150)\n login_type = models.SmallIntegerField('state', choices=LoginType.types)\n token = models.CharField(max_length=300)\n",
"<import token>\n\n\nclass Auth(ModelBase):\n\n\n class LoginType:\n Kakao = 0\n Apple = 1\n types = (Kakao, 'kakao'), (Apple, 'apple')\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
99,314 |
b976a4802ff797f6886e8160c75adfaf1f317fb6
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: tung doan
"""
import numpy as np
import matplotlib.pyplot as plt
from tslearn.datasets import UCR_UEA_datasets
from tmf import tmf
""" load data """
data_loader = UCR_UEA_datasets()
X_tr, y_tr, X_te, y_te = data_loader.load_dataset('Coffee')
X = X_tr[:,::2,0] #reduce length a factor of 2 for fast demo
y = y_tr
# Ground truth indicator matrix
grd = np.zeros((y.size, y.max()+1))
grd[np.arange(y.size),y] = 1
""" run temporal matrix factorization """
k = y.max()+1; l = X.shape[1]; lambda_1 = lambda_2 = 1e-2; lambda_3 = 10; sigma = 0.05 ** 2; eta = 1e-2; o_max = 15; i_max = 50;
F_list, G_list = tmf(X, k, l, lambda_1, lambda_2, lambda_3, sigma, eta, o_max, i_max)
""" plot """
plt.style.use(style='ggplot')
colors = ['tab:blue','tab:red','tab:green','tab:black','tab:cyan']
plt.figure(1)
# Plot initial centroid
plt.title('Initial centroids')
for i in range(k):
plt.plot(F_list[0][i],color=colors[i],label='Centroid '+str(i+1),linewidth=2)
plt.legend()
# Plot resulted centroid
plt.figure(2)
plt.title('Resulted centroids')
for i in range(k):
plt.plot(F_list[-1][i],color=colors[i],label='Centroid '+str(i+1),linewidth=2)
plt.legend()
# Plot indicator matrix
plt.style.use(style='classic')
fig, axs = plt.subplots(2,1,figsize=(100,50))
## Plot ground truth indicator matrix
axs[0].set_title('Ground truth indicators',pad=20)
axs[0].matshow(grd.T, cmap=plt.cm.Blues)
for i in range(y.shape[0]):
for j in range(k):
c = format(grd[i,j], '.1f')
axs[0].text(i, j, c, va='center', ha='center')
axs[0].set_xticks(np.arange(y.shape[0]))
axs[0].xaxis.set_ticks_position('bottom')
## Plot resulted indicator matrix
axs[1].set_title('Resulted indicators',pad=20)
axs[1].matshow(G_list[-1].T, cmap=plt.cm.Blues)
for i in range(X.shape[0]):
for j in range(k):
c = format(G_list[-1][i,j], '.1f')
axs[1].text(i, j, c, va='center', ha='center')
axs[1].set_xticks(np.arange(X.shape[0]))
axs[1].xaxis.set_ticks_position('bottom')
|
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: tung doan\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tslearn.datasets import UCR_UEA_datasets\nfrom tmf import tmf\n\n\"\"\" load data \"\"\"\ndata_loader = UCR_UEA_datasets()\nX_tr, y_tr, X_te, y_te = data_loader.load_dataset('Coffee')\nX = X_tr[:,::2,0] #reduce length a factor of 2 for fast demo\ny = y_tr\n# Ground truth indicator matrix\ngrd = np.zeros((y.size, y.max()+1)) \ngrd[np.arange(y.size),y] = 1\n\n\"\"\" run temporal matrix factorization \"\"\"\nk = y.max()+1; l = X.shape[1]; lambda_1 = lambda_2 = 1e-2; lambda_3 = 10; sigma = 0.05 ** 2; eta = 1e-2; o_max = 15; i_max = 50;\nF_list, G_list = tmf(X, k, l, lambda_1, lambda_2, lambda_3, sigma, eta, o_max, i_max)\n\n\"\"\" plot \"\"\"\nplt.style.use(style='ggplot')\ncolors = ['tab:blue','tab:red','tab:green','tab:black','tab:cyan']\nplt.figure(1)\n\n# Plot initial centroid\nplt.title('Initial centroids')\nfor i in range(k):\n plt.plot(F_list[0][i],color=colors[i],label='Centroid '+str(i+1),linewidth=2)\nplt.legend()\n\n# Plot resulted centroid\nplt.figure(2)\nplt.title('Resulted centroids')\nfor i in range(k):\n plt.plot(F_list[-1][i],color=colors[i],label='Centroid '+str(i+1),linewidth=2)\nplt.legend()\n\n# Plot indicator matrix \nplt.style.use(style='classic')\nfig, axs = plt.subplots(2,1,figsize=(100,50))\n## Plot ground truth indicator matrix\naxs[0].set_title('Ground truth indicators',pad=20)\naxs[0].matshow(grd.T, cmap=plt.cm.Blues)\nfor i in range(y.shape[0]):\n for j in range(k):\n c = format(grd[i,j], '.1f') \n axs[0].text(i, j, c, va='center', ha='center')\naxs[0].set_xticks(np.arange(y.shape[0]))\naxs[0].xaxis.set_ticks_position('bottom') \n## Plot resulted indicator matrix \naxs[1].set_title('Resulted indicators',pad=20)\naxs[1].matshow(G_list[-1].T, cmap=plt.cm.Blues)\nfor i in range(X.shape[0]):\n for j in range(k):\n c = format(G_list[-1][i,j], '.1f') \n axs[1].text(i, j, c, va='center', ha='center')\naxs[1].set_xticks(np.arange(X.shape[0]))\naxs[1].xaxis.set_ticks_position('bottom')\n",
"<docstring token>\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tslearn.datasets import UCR_UEA_datasets\nfrom tmf import tmf\n<docstring token>\ndata_loader = UCR_UEA_datasets()\nX_tr, y_tr, X_te, y_te = data_loader.load_dataset('Coffee')\nX = X_tr[:, ::2, 0]\ny = y_tr\ngrd = np.zeros((y.size, y.max() + 1))\ngrd[np.arange(y.size), y] = 1\n<docstring token>\nk = y.max() + 1\nl = X.shape[1]\nlambda_1 = lambda_2 = 0.01\nlambda_3 = 10\nsigma = 0.05 ** 2\neta = 0.01\no_max = 15\ni_max = 50\nF_list, G_list = tmf(X, k, l, lambda_1, lambda_2, lambda_3, sigma, eta,\n o_max, i_max)\n<docstring token>\nplt.style.use(style='ggplot')\ncolors = ['tab:blue', 'tab:red', 'tab:green', 'tab:black', 'tab:cyan']\nplt.figure(1)\nplt.title('Initial centroids')\nfor i in range(k):\n plt.plot(F_list[0][i], color=colors[i], label='Centroid ' + str(i + 1),\n linewidth=2)\nplt.legend()\nplt.figure(2)\nplt.title('Resulted centroids')\nfor i in range(k):\n plt.plot(F_list[-1][i], color=colors[i], label='Centroid ' + str(i + 1),\n linewidth=2)\nplt.legend()\nplt.style.use(style='classic')\nfig, axs = plt.subplots(2, 1, figsize=(100, 50))\naxs[0].set_title('Ground truth indicators', pad=20)\naxs[0].matshow(grd.T, cmap=plt.cm.Blues)\nfor i in range(y.shape[0]):\n for j in range(k):\n c = format(grd[i, j], '.1f')\n axs[0].text(i, j, c, va='center', ha='center')\naxs[0].set_xticks(np.arange(y.shape[0]))\naxs[0].xaxis.set_ticks_position('bottom')\naxs[1].set_title('Resulted indicators', pad=20)\naxs[1].matshow(G_list[-1].T, cmap=plt.cm.Blues)\nfor i in range(X.shape[0]):\n for j in range(k):\n c = format(G_list[-1][i, j], '.1f')\n axs[1].text(i, j, c, va='center', ha='center')\naxs[1].set_xticks(np.arange(X.shape[0]))\naxs[1].xaxis.set_ticks_position('bottom')\n",
"<docstring token>\n<import token>\n<docstring token>\ndata_loader = UCR_UEA_datasets()\nX_tr, y_tr, X_te, y_te = data_loader.load_dataset('Coffee')\nX = X_tr[:, ::2, 0]\ny = y_tr\ngrd = np.zeros((y.size, y.max() + 1))\ngrd[np.arange(y.size), y] = 1\n<docstring token>\nk = y.max() + 1\nl = X.shape[1]\nlambda_1 = lambda_2 = 0.01\nlambda_3 = 10\nsigma = 0.05 ** 2\neta = 0.01\no_max = 15\ni_max = 50\nF_list, G_list = tmf(X, k, l, lambda_1, lambda_2, lambda_3, sigma, eta,\n o_max, i_max)\n<docstring token>\nplt.style.use(style='ggplot')\ncolors = ['tab:blue', 'tab:red', 'tab:green', 'tab:black', 'tab:cyan']\nplt.figure(1)\nplt.title('Initial centroids')\nfor i in range(k):\n plt.plot(F_list[0][i], color=colors[i], label='Centroid ' + str(i + 1),\n linewidth=2)\nplt.legend()\nplt.figure(2)\nplt.title('Resulted centroids')\nfor i in range(k):\n plt.plot(F_list[-1][i], color=colors[i], label='Centroid ' + str(i + 1),\n linewidth=2)\nplt.legend()\nplt.style.use(style='classic')\nfig, axs = plt.subplots(2, 1, figsize=(100, 50))\naxs[0].set_title('Ground truth indicators', pad=20)\naxs[0].matshow(grd.T, cmap=plt.cm.Blues)\nfor i in range(y.shape[0]):\n for j in range(k):\n c = format(grd[i, j], '.1f')\n axs[0].text(i, j, c, va='center', ha='center')\naxs[0].set_xticks(np.arange(y.shape[0]))\naxs[0].xaxis.set_ticks_position('bottom')\naxs[1].set_title('Resulted indicators', pad=20)\naxs[1].matshow(G_list[-1].T, cmap=plt.cm.Blues)\nfor i in range(X.shape[0]):\n for j in range(k):\n c = format(G_list[-1][i, j], '.1f')\n axs[1].text(i, j, c, va='center', ha='center')\naxs[1].set_xticks(np.arange(X.shape[0]))\naxs[1].xaxis.set_ticks_position('bottom')\n",
"<docstring token>\n<import token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\nplt.style.use(style='ggplot')\n<assignment token>\nplt.figure(1)\nplt.title('Initial centroids')\nfor i in range(k):\n plt.plot(F_list[0][i], color=colors[i], label='Centroid ' + str(i + 1),\n linewidth=2)\nplt.legend()\nplt.figure(2)\nplt.title('Resulted centroids')\nfor i in range(k):\n plt.plot(F_list[-1][i], color=colors[i], label='Centroid ' + str(i + 1),\n linewidth=2)\nplt.legend()\nplt.style.use(style='classic')\n<assignment token>\naxs[0].set_title('Ground truth indicators', pad=20)\naxs[0].matshow(grd.T, cmap=plt.cm.Blues)\nfor i in range(y.shape[0]):\n for j in range(k):\n c = format(grd[i, j], '.1f')\n axs[0].text(i, j, c, va='center', ha='center')\naxs[0].set_xticks(np.arange(y.shape[0]))\naxs[0].xaxis.set_ticks_position('bottom')\naxs[1].set_title('Resulted indicators', pad=20)\naxs[1].matshow(G_list[-1].T, cmap=plt.cm.Blues)\nfor i in range(X.shape[0]):\n for j in range(k):\n c = format(G_list[-1][i, j], '.1f')\n axs[1].text(i, j, c, va='center', ha='center')\naxs[1].set_xticks(np.arange(X.shape[0]))\naxs[1].xaxis.set_ticks_position('bottom')\n",
"<docstring token>\n<import token>\n<docstring token>\n<assignment token>\n<docstring token>\n<assignment token>\n<docstring token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,315 |
8c30f0928ff356eecb7f7956ba327d3d0e990a21
|
# -*- coding: utf-8 -*-
{
'name': 'Stock Expiry Report / Notification',
'summary': "Product Stock Expiry Report / Notification via email",
'description': """Product Stock Expiry Report/ Notification via email""",
'author': 'iPredict IT Solutions Pvt. Ltd.',
'website': 'http://ipredictitsolutions.com',
"support": "[email protected]",
'category': 'Warehouse',
'version': '13.0.0.1.3',
'depends': ['stock', 'product_expiry'],
'data': [
'security/ir.model.access.csv',
'wizard/product_stock_expiry_wiz_view.xml',
'views/product_view.xml',
'views/stock_config_settings_view.xml',
'report/report_product_stock_expiry.xml',
'report/report_action_view.xml',
'data/product_stock_expiration_data.xml',
],
'license': "OPL-1",
'price': 25,
'currency': "EUR",
'auto_install': False,
'installable': True,
'images': ['static/description/main.png'],
}
|
[
"# -*- coding: utf-8 -*-\n{\n 'name': 'Stock Expiry Report / Notification',\n 'summary': \"Product Stock Expiry Report / Notification via email\",\n 'description': \"\"\"Product Stock Expiry Report/ Notification via email\"\"\",\n\n 'author': 'iPredict IT Solutions Pvt. Ltd.',\n 'website': 'http://ipredictitsolutions.com',\n \"support\": \"[email protected]\",\n\n 'category': 'Warehouse',\n 'version': '13.0.0.1.3',\n 'depends': ['stock', 'product_expiry'],\n 'data': [\n 'security/ir.model.access.csv',\n 'wizard/product_stock_expiry_wiz_view.xml',\n 'views/product_view.xml',\n 'views/stock_config_settings_view.xml',\n 'report/report_product_stock_expiry.xml',\n 'report/report_action_view.xml',\n 'data/product_stock_expiration_data.xml',\n ],\n\n 'license': \"OPL-1\",\n 'price': 25,\n 'currency': \"EUR\",\n\n 'auto_install': False,\n 'installable': True,\n\n 'images': ['static/description/main.png'],\n}\n",
"{'name': 'Stock Expiry Report / Notification', 'summary':\n 'Product Stock Expiry Report / Notification via email', 'description':\n 'Product Stock Expiry Report/ Notification via email', 'author':\n 'iPredict IT Solutions Pvt. Ltd.', 'website':\n 'http://ipredictitsolutions.com', 'support':\n '[email protected]', 'category': 'Warehouse', 'version':\n '13.0.0.1.3', 'depends': ['stock', 'product_expiry'], 'data': [\n 'security/ir.model.access.csv',\n 'wizard/product_stock_expiry_wiz_view.xml', 'views/product_view.xml',\n 'views/stock_config_settings_view.xml',\n 'report/report_product_stock_expiry.xml',\n 'report/report_action_view.xml',\n 'data/product_stock_expiration_data.xml'], 'license': 'OPL-1', 'price':\n 25, 'currency': 'EUR', 'auto_install': False, 'installable': True,\n 'images': ['static/description/main.png']}\n",
"<code token>\n"
] | false |
99,316 |
5f0decc32f3007ab8c48ef54ff2b7dd6afde60bf
|
import importlib
import CraftConfig
import CraftTestBase
from Blueprints import CraftPackageObject, CraftDependencyPackage
class CraftBlueprintTest(CraftTestBase.CraftTestBase):
def blueprintTest(self, compiler):
CraftConfig.CraftCore.settings.set("General", "ABI", compiler)
CraftPackageObject.__rootPackage = None
CraftDependencyPackage._packageCache = dict()
installable = CraftPackageObject.CraftPackageObject.root().allChildren()
CraftDependencyPackage.CraftDependencyPackage(CraftPackageObject.CraftPackageObject.get("/")).getDependencies()
class TestAPI(CraftBlueprintTest):
def test_mingw_x86(self):
self.blueprintTest("windows-mingw_86-gcc")
def test_mingw_x64(self):
self.blueprintTest("windows-mingw_64-gcc")
def test_msvc2015_x86(self):
self.blueprintTest("windows-msvc2015_86-cl")
def test_msvc2015_x64(self):
self.blueprintTest("windows-msvc2015_64-cl")
|
[
"import importlib\n\nimport CraftConfig\nimport CraftTestBase\nfrom Blueprints import CraftPackageObject, CraftDependencyPackage\n\n\nclass CraftBlueprintTest(CraftTestBase.CraftTestBase):\n def blueprintTest(self, compiler):\n CraftConfig.CraftCore.settings.set(\"General\", \"ABI\", compiler)\n\n CraftPackageObject.__rootPackage = None\n CraftDependencyPackage._packageCache = dict()\n installable = CraftPackageObject.CraftPackageObject.root().allChildren()\n CraftDependencyPackage.CraftDependencyPackage(CraftPackageObject.CraftPackageObject.get(\"/\")).getDependencies()\n\n\n\nclass TestAPI(CraftBlueprintTest):\n def test_mingw_x86(self):\n self.blueprintTest(\"windows-mingw_86-gcc\")\n\n def test_mingw_x64(self):\n self.blueprintTest(\"windows-mingw_64-gcc\")\n\n def test_msvc2015_x86(self):\n self.blueprintTest(\"windows-msvc2015_86-cl\")\n\n def test_msvc2015_x64(self):\n self.blueprintTest(\"windows-msvc2015_64-cl\")\n",
"import importlib\nimport CraftConfig\nimport CraftTestBase\nfrom Blueprints import CraftPackageObject, CraftDependencyPackage\n\n\nclass CraftBlueprintTest(CraftTestBase.CraftTestBase):\n\n def blueprintTest(self, compiler):\n CraftConfig.CraftCore.settings.set('General', 'ABI', compiler)\n CraftPackageObject.__rootPackage = None\n CraftDependencyPackage._packageCache = dict()\n installable = CraftPackageObject.CraftPackageObject.root().allChildren(\n )\n CraftDependencyPackage.CraftDependencyPackage(CraftPackageObject.\n CraftPackageObject.get('/')).getDependencies()\n\n\nclass TestAPI(CraftBlueprintTest):\n\n def test_mingw_x86(self):\n self.blueprintTest('windows-mingw_86-gcc')\n\n def test_mingw_x64(self):\n self.blueprintTest('windows-mingw_64-gcc')\n\n def test_msvc2015_x86(self):\n self.blueprintTest('windows-msvc2015_86-cl')\n\n def test_msvc2015_x64(self):\n self.blueprintTest('windows-msvc2015_64-cl')\n",
"<import token>\n\n\nclass CraftBlueprintTest(CraftTestBase.CraftTestBase):\n\n def blueprintTest(self, compiler):\n CraftConfig.CraftCore.settings.set('General', 'ABI', compiler)\n CraftPackageObject.__rootPackage = None\n CraftDependencyPackage._packageCache = dict()\n installable = CraftPackageObject.CraftPackageObject.root().allChildren(\n )\n CraftDependencyPackage.CraftDependencyPackage(CraftPackageObject.\n CraftPackageObject.get('/')).getDependencies()\n\n\nclass TestAPI(CraftBlueprintTest):\n\n def test_mingw_x86(self):\n self.blueprintTest('windows-mingw_86-gcc')\n\n def test_mingw_x64(self):\n self.blueprintTest('windows-mingw_64-gcc')\n\n def test_msvc2015_x86(self):\n self.blueprintTest('windows-msvc2015_86-cl')\n\n def test_msvc2015_x64(self):\n self.blueprintTest('windows-msvc2015_64-cl')\n",
"<import token>\n\n\nclass CraftBlueprintTest(CraftTestBase.CraftTestBase):\n <function token>\n\n\nclass TestAPI(CraftBlueprintTest):\n\n def test_mingw_x86(self):\n self.blueprintTest('windows-mingw_86-gcc')\n\n def test_mingw_x64(self):\n self.blueprintTest('windows-mingw_64-gcc')\n\n def test_msvc2015_x86(self):\n self.blueprintTest('windows-msvc2015_86-cl')\n\n def test_msvc2015_x64(self):\n self.blueprintTest('windows-msvc2015_64-cl')\n",
"<import token>\n<class token>\n\n\nclass TestAPI(CraftBlueprintTest):\n\n def test_mingw_x86(self):\n self.blueprintTest('windows-mingw_86-gcc')\n\n def test_mingw_x64(self):\n self.blueprintTest('windows-mingw_64-gcc')\n\n def test_msvc2015_x86(self):\n self.blueprintTest('windows-msvc2015_86-cl')\n\n def test_msvc2015_x64(self):\n self.blueprintTest('windows-msvc2015_64-cl')\n",
"<import token>\n<class token>\n\n\nclass TestAPI(CraftBlueprintTest):\n <function token>\n\n def test_mingw_x64(self):\n self.blueprintTest('windows-mingw_64-gcc')\n\n def test_msvc2015_x86(self):\n self.blueprintTest('windows-msvc2015_86-cl')\n\n def test_msvc2015_x64(self):\n self.blueprintTest('windows-msvc2015_64-cl')\n",
"<import token>\n<class token>\n\n\nclass TestAPI(CraftBlueprintTest):\n <function token>\n\n def test_mingw_x64(self):\n self.blueprintTest('windows-mingw_64-gcc')\n\n def test_msvc2015_x86(self):\n self.blueprintTest('windows-msvc2015_86-cl')\n <function token>\n",
"<import token>\n<class token>\n\n\nclass TestAPI(CraftBlueprintTest):\n <function token>\n <function token>\n\n def test_msvc2015_x86(self):\n self.blueprintTest('windows-msvc2015_86-cl')\n <function token>\n",
"<import token>\n<class token>\n\n\nclass TestAPI(CraftBlueprintTest):\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
99,317 |
d88fc2b343585c52f5f379ce4fd76e10d67edb9a
|
from gatco import Blueprint
from goblin.server import app
# from goblin.database import db
from goblin.extensions import jinja
@app.route('/page/get')
def index(request):
#objs = Page.query.filter().limit(10)
return jinja.render('press/front/page/get.html', request)
|
[
"from gatco import Blueprint\nfrom goblin.server import app\n# from goblin.database import db\nfrom goblin.extensions import jinja\n\[email protected]('/page/get')\ndef index(request):\n #objs = Page.query.filter().limit(10)\n return jinja.render('press/front/page/get.html', request)",
"from gatco import Blueprint\nfrom goblin.server import app\nfrom goblin.extensions import jinja\n\n\[email protected]('/page/get')\ndef index(request):\n return jinja.render('press/front/page/get.html', request)\n",
"<import token>\n\n\[email protected]('/page/get')\ndef index(request):\n return jinja.render('press/front/page/get.html', request)\n",
"<import token>\n<function token>\n"
] | false |
99,318 |
9aed5030e3c03fb4a7f981e9b55d7aaea8534ac6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#author: Mohammad-AlYasfo
global BOARD, PI, STEP, DC_FREQ, SR_FREQ,RESPONSE_TIME, MIDEAN_WINDOW, INITIAL_RESPONCE_TIME
BOARD = {
'MOTOR_A_IN1': 31,
'MOTOR_A_IN2': 33,
'MOTOR_B_IN1': 37,
'MOTOR_B_IN2': 35,
'MOTOR_A_ENA': 29,
'MOTOR_B_ENB': 32,
'MOTOR_C_IN1': 13,
'MOTOR_C_IN2': 15,
'MOTOR_D_IN1': 18,
'MOTOR_D_IN2': 16,
'MOTOR_C_ENC': 11,
'MOTOR_D_END': 12,
'ENCODER_R' : 36,
'ENCODER_L' : 38,
'INFRA' : 40,
'SERVO_H' : 22,
'SERVO_V' : 24,
}
MAX_RESPONSE_TIME=0.5
INITIAL_RESPONCE_TIME=10;
PI = 3.14752
STEP = 0.1
DC_FREQ = 100
SR_FREQ = 1000
MIDEAN_WINDOW = 15
ACCELERATION_STEP=20
MIN_SPEED_CHANGE=5
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#author: Mohammad-AlYasfo\n\nglobal BOARD, PI, STEP, DC_FREQ, SR_FREQ,RESPONSE_TIME, MIDEAN_WINDOW, INITIAL_RESPONCE_TIME\nBOARD = {\n\n 'MOTOR_A_IN1': 31,\n 'MOTOR_A_IN2': 33,\n 'MOTOR_B_IN1': 37,\n 'MOTOR_B_IN2': 35,\n 'MOTOR_A_ENA': 29,\n 'MOTOR_B_ENB': 32,\n\n 'MOTOR_C_IN1': 13,\n 'MOTOR_C_IN2': 15,\n 'MOTOR_D_IN1': 18,\n 'MOTOR_D_IN2': 16,\n\n 'MOTOR_C_ENC': 11,\n 'MOTOR_D_END': 12,\n\n 'ENCODER_R' : 36,\n 'ENCODER_L' : 38,\n\n 'INFRA' : 40,\n\n 'SERVO_H' : 22,\n 'SERVO_V' : 24,\n\n }\n\nMAX_RESPONSE_TIME=0.5\nINITIAL_RESPONCE_TIME=10;\nPI = 3.14752\nSTEP = 0.1\nDC_FREQ = 100\nSR_FREQ = 1000\nMIDEAN_WINDOW = 15\nACCELERATION_STEP=20\nMIN_SPEED_CHANGE=5\n",
"global BOARD, PI, STEP, DC_FREQ, SR_FREQ, RESPONSE_TIME, MIDEAN_WINDOW, INITIAL_RESPONCE_TIME\nBOARD = {'MOTOR_A_IN1': 31, 'MOTOR_A_IN2': 33, 'MOTOR_B_IN1': 37,\n 'MOTOR_B_IN2': 35, 'MOTOR_A_ENA': 29, 'MOTOR_B_ENB': 32, 'MOTOR_C_IN1':\n 13, 'MOTOR_C_IN2': 15, 'MOTOR_D_IN1': 18, 'MOTOR_D_IN2': 16,\n 'MOTOR_C_ENC': 11, 'MOTOR_D_END': 12, 'ENCODER_R': 36, 'ENCODER_L': 38,\n 'INFRA': 40, 'SERVO_H': 22, 'SERVO_V': 24}\nMAX_RESPONSE_TIME = 0.5\nINITIAL_RESPONCE_TIME = 10\nPI = 3.14752\nSTEP = 0.1\nDC_FREQ = 100\nSR_FREQ = 1000\nMIDEAN_WINDOW = 15\nACCELERATION_STEP = 20\nMIN_SPEED_CHANGE = 5\n",
"global BOARD, PI, STEP, DC_FREQ, SR_FREQ, RESPONSE_TIME, MIDEAN_WINDOW, INITIAL_RESPONCE_TIME\n<assignment token>\n",
"<code token>\n<assignment token>\n"
] | false |
99,319 |
a42b9bccab7143f31c943534083852dfa803c60b
|
import pytheas.patterns
import pytheas.sfdaemon
import redis
import logging
# Config stuff
FETCH_LIST = "fetch_list"
SEND_LIST = "send_list"
REDIS_HOST = "localhost"
REDIS_PORT = 6379
logging.basicConfig(filename="pytheas.log")
logger = logging.getLogger("pytheas")
logger.setLevel(logging.INFO)
class RedisFetcher(pytheas.patterns.Fetcher):
def __init__(self, redis_host, redis_port, fetch_list):
self.__redis_connection = redis.StrictRedis(redis_host, redis_port)
self.fetch_list = fetch_list
def fetch(self):
return self.__redis_connection.brpop(self.fetch_list)[1]
class RedisSender(pytheas.patterns.Sender):
def __init__(self, redis_host, redis_port, send_list):
self.__redis_connection = redis.StrictRedis(redis_host, redis_port)
#logger.info("RedisSender connection established")
self.send_list = send_list
def send(self, data):
self.__redis_connection.lpush(self.send_list, data)
#logger.info("Sent to redis: " + data)
if __name__ == "__main__":
local_fetcher = RedisFetcher(REDIS_HOST, REDIS_PORT, FETCH_LIST)
local_sender = RedisSender(REDIS_HOST, REDIS_PORT, SEND_LIST)
redis_daemon = pytheas.sfdaemon.Pytheas(local_fetcher, local_sender)
redis_daemon.run()
|
[
"import pytheas.patterns\nimport pytheas.sfdaemon\nimport redis\nimport logging\n\n# Config stuff\nFETCH_LIST = \"fetch_list\"\nSEND_LIST = \"send_list\"\nREDIS_HOST = \"localhost\"\nREDIS_PORT = 6379\n\nlogging.basicConfig(filename=\"pytheas.log\")\nlogger = logging.getLogger(\"pytheas\")\nlogger.setLevel(logging.INFO)\n\nclass RedisFetcher(pytheas.patterns.Fetcher):\n \n def __init__(self, redis_host, redis_port, fetch_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.fetch_list = fetch_list\n \n def fetch(self):\n return self.__redis_connection.brpop(self.fetch_list)[1]\n\nclass RedisSender(pytheas.patterns.Sender):\n \n def __init__(self, redis_host, redis_port, send_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n #logger.info(\"RedisSender connection established\")\n self.send_list = send_list\n\n def send(self, data):\n self.__redis_connection.lpush(self.send_list, data)\n #logger.info(\"Sent to redis: \" + data)\n\nif __name__ == \"__main__\":\n local_fetcher = RedisFetcher(REDIS_HOST, REDIS_PORT, FETCH_LIST)\n local_sender = RedisSender(REDIS_HOST, REDIS_PORT, SEND_LIST)\n redis_daemon = pytheas.sfdaemon.Pytheas(local_fetcher, local_sender)\n redis_daemon.run()\n",
"import pytheas.patterns\nimport pytheas.sfdaemon\nimport redis\nimport logging\nFETCH_LIST = 'fetch_list'\nSEND_LIST = 'send_list'\nREDIS_HOST = 'localhost'\nREDIS_PORT = 6379\nlogging.basicConfig(filename='pytheas.log')\nlogger = logging.getLogger('pytheas')\nlogger.setLevel(logging.INFO)\n\n\nclass RedisFetcher(pytheas.patterns.Fetcher):\n\n def __init__(self, redis_host, redis_port, fetch_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.fetch_list = fetch_list\n\n def fetch(self):\n return self.__redis_connection.brpop(self.fetch_list)[1]\n\n\nclass RedisSender(pytheas.patterns.Sender):\n\n def __init__(self, redis_host, redis_port, send_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.send_list = send_list\n\n def send(self, data):\n self.__redis_connection.lpush(self.send_list, data)\n\n\nif __name__ == '__main__':\n local_fetcher = RedisFetcher(REDIS_HOST, REDIS_PORT, FETCH_LIST)\n local_sender = RedisSender(REDIS_HOST, REDIS_PORT, SEND_LIST)\n redis_daemon = pytheas.sfdaemon.Pytheas(local_fetcher, local_sender)\n redis_daemon.run()\n",
"<import token>\nFETCH_LIST = 'fetch_list'\nSEND_LIST = 'send_list'\nREDIS_HOST = 'localhost'\nREDIS_PORT = 6379\nlogging.basicConfig(filename='pytheas.log')\nlogger = logging.getLogger('pytheas')\nlogger.setLevel(logging.INFO)\n\n\nclass RedisFetcher(pytheas.patterns.Fetcher):\n\n def __init__(self, redis_host, redis_port, fetch_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.fetch_list = fetch_list\n\n def fetch(self):\n return self.__redis_connection.brpop(self.fetch_list)[1]\n\n\nclass RedisSender(pytheas.patterns.Sender):\n\n def __init__(self, redis_host, redis_port, send_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.send_list = send_list\n\n def send(self, data):\n self.__redis_connection.lpush(self.send_list, data)\n\n\nif __name__ == '__main__':\n local_fetcher = RedisFetcher(REDIS_HOST, REDIS_PORT, FETCH_LIST)\n local_sender = RedisSender(REDIS_HOST, REDIS_PORT, SEND_LIST)\n redis_daemon = pytheas.sfdaemon.Pytheas(local_fetcher, local_sender)\n redis_daemon.run()\n",
"<import token>\n<assignment token>\nlogging.basicConfig(filename='pytheas.log')\n<assignment token>\nlogger.setLevel(logging.INFO)\n\n\nclass RedisFetcher(pytheas.patterns.Fetcher):\n\n def __init__(self, redis_host, redis_port, fetch_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.fetch_list = fetch_list\n\n def fetch(self):\n return self.__redis_connection.brpop(self.fetch_list)[1]\n\n\nclass RedisSender(pytheas.patterns.Sender):\n\n def __init__(self, redis_host, redis_port, send_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.send_list = send_list\n\n def send(self, data):\n self.__redis_connection.lpush(self.send_list, data)\n\n\nif __name__ == '__main__':\n local_fetcher = RedisFetcher(REDIS_HOST, REDIS_PORT, FETCH_LIST)\n local_sender = RedisSender(REDIS_HOST, REDIS_PORT, SEND_LIST)\n redis_daemon = pytheas.sfdaemon.Pytheas(local_fetcher, local_sender)\n redis_daemon.run()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass RedisFetcher(pytheas.patterns.Fetcher):\n\n def __init__(self, redis_host, redis_port, fetch_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.fetch_list = fetch_list\n\n def fetch(self):\n return self.__redis_connection.brpop(self.fetch_list)[1]\n\n\nclass RedisSender(pytheas.patterns.Sender):\n\n def __init__(self, redis_host, redis_port, send_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.send_list = send_list\n\n def send(self, data):\n self.__redis_connection.lpush(self.send_list, data)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass RedisFetcher(pytheas.patterns.Fetcher):\n\n def __init__(self, redis_host, redis_port, fetch_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.fetch_list = fetch_list\n <function token>\n\n\nclass RedisSender(pytheas.patterns.Sender):\n\n def __init__(self, redis_host, redis_port, send_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.send_list = send_list\n\n def send(self, data):\n self.__redis_connection.lpush(self.send_list, data)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n\n\nclass RedisFetcher(pytheas.patterns.Fetcher):\n <function token>\n <function token>\n\n\nclass RedisSender(pytheas.patterns.Sender):\n\n def __init__(self, redis_host, redis_port, send_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.send_list = send_list\n\n def send(self, data):\n self.__redis_connection.lpush(self.send_list, data)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n\n\nclass RedisSender(pytheas.patterns.Sender):\n\n def __init__(self, redis_host, redis_port, send_list):\n self.__redis_connection = redis.StrictRedis(redis_host, redis_port)\n self.send_list = send_list\n\n def send(self, data):\n self.__redis_connection.lpush(self.send_list, data)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n\n\nclass RedisSender(pytheas.patterns.Sender):\n <function token>\n\n def send(self, data):\n self.__redis_connection.lpush(self.send_list, data)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n\n\nclass RedisSender(pytheas.patterns.Sender):\n <function token>\n <function token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<class token>\n<class token>\n<code token>\n"
] | false |
99,320 |
f8d5bf1859330155482b689905f4c29eab3180db
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import argparse
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.contrib import learn
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
NUM_PLOT_BINS = 30
MODEL_NAME = 'iris_model'
WEIGHTS_NAME = MODEL_NAME + '/Stack/fully_connected_1/weights'
def model(features, target):
global args
regularizer = None
regularization_type = args.regularization_type.lower()
regularization_value = args.regularization_value
if regularization_type == "l1":
print("Using L1 regularizer, val =", regularization_value)
regularizer = tf.contrib.layers.l1_regularizer(regularization_value)
elif regularization_type == "l2":
print("Using L2 regularizer, val =", regularization_value)
regularizer = tf.contrib.layers.l2_regularizer(regularization_value)
else:
print("Not using regularization")
target = tf.one_hot(target, 3, 1, 0)
with tf.variable_scope(MODEL_NAME, regularizer=regularizer):
features = layers.stack(features, layers.fully_connected, [10, 20, 10])
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.contrib.losses.softmax_cross_entropy(logits, target)
if regularizer:
loss = loss + sum(
tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def plot_weights(flat_weights, plot_file_name, title_name):
fig = plt.figure()
ax = fig.add_subplot(111)
plt.suptitle("Weights histogram (1st layer fc) " + title_name)
ax.hist(flat_weights, NUM_PLOT_BINS, color='green', alpha=0.8)
print("Saving histogram of weights in:", plot_file_name)
fig.savefig(plot_file_name)
plt.close(fig)
def main(argv):
global args
parser = argparse.ArgumentParser()
parser.add_argument(
'--regularization_type',
default="none",
help="Regularization type: l1, l2")
parser.add_argument(
'--regularization_value',
type=float,
default=0.0,
help="Value used for regularization. defualt 0.0")
parser.add_argument(
'--weights_file',
default='weights_hist.png',
help="Filename to save the histogram. Default: weights_hist.png")
args = parser.parse_args()
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2)
classifier = learn.Estimator(model_fn=model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
weights = classifier.get_variable_value(WEIGHTS_NAME)
flat_weights = [w for wl in weights for w in wl]
plot_weights(flat_weights, args.weights_file, args.regularization_type)
if __name__ == '__main__':
tf.app.run()
|
[
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom sklearn import cross_validation\nfrom sklearn import datasets\nfrom sklearn import metrics\n\nimport argparse\nimport tensorflow as tf\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib import learn\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nNUM_PLOT_BINS = 30\nMODEL_NAME = 'iris_model'\nWEIGHTS_NAME = MODEL_NAME + '/Stack/fully_connected_1/weights'\n\n\ndef model(features, target):\n global args\n\n regularizer = None\n regularization_type = args.regularization_type.lower()\n regularization_value = args.regularization_value\n if regularization_type == \"l1\":\n print(\"Using L1 regularizer, val =\", regularization_value)\n regularizer = tf.contrib.layers.l1_regularizer(regularization_value)\n elif regularization_type == \"l2\":\n print(\"Using L2 regularizer, val =\", regularization_value)\n regularizer = tf.contrib.layers.l2_regularizer(regularization_value)\n else:\n print(\"Not using regularization\")\n\n target = tf.one_hot(target, 3, 1, 0)\n with tf.variable_scope(MODEL_NAME, regularizer=regularizer):\n features = layers.stack(features, layers.fully_connected, [10, 20, 10])\n logits = layers.fully_connected(features, 3, activation_fn=None)\n loss = tf.contrib.losses.softmax_cross_entropy(logits, target)\n if regularizer:\n loss = loss + sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n\n train_op = tf.contrib.layers.optimize_loss(\n loss,\n tf.contrib.framework.get_global_step(),\n optimizer='Adagrad',\n learning_rate=0.1)\n\n return ({\n 'class': tf.argmax(logits, 1),\n 'prob': tf.nn.softmax(logits)\n }, loss, train_op)\n\n\ndef plot_weights(flat_weights, plot_file_name, title_name):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.suptitle(\"Weights histogram (1st layer fc) \" + title_name)\n ax.hist(flat_weights, NUM_PLOT_BINS, color='green', alpha=0.8)\n print(\"Saving histogram of weights in:\", plot_file_name)\n fig.savefig(plot_file_name)\n plt.close(fig)\n\n\ndef main(argv):\n global args\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--regularization_type',\n default=\"none\",\n help=\"Regularization type: l1, l2\")\n parser.add_argument(\n '--regularization_value',\n type=float,\n default=0.0,\n help=\"Value used for regularization. defualt 0.0\")\n parser.add_argument(\n '--weights_file',\n default='weights_hist.png',\n help=\"Filename to save the histogram. Default: weights_hist.png\")\n args = parser.parse_args()\n iris = datasets.load_iris()\n x_train, x_test, y_train, y_test = cross_validation.train_test_split(\n iris.data, iris.target, test_size=0.2)\n classifier = learn.Estimator(model_fn=model)\n classifier.fit(x_train, y_train, steps=1000)\n y_predicted = [\n p['class'] for p in classifier.predict(\n x_test, as_iterable=True)\n ]\n score = metrics.accuracy_score(y_test, y_predicted)\n print('Accuracy: {0:f}'.format(score))\n\n weights = classifier.get_variable_value(WEIGHTS_NAME)\n flat_weights = [w for wl in weights for w in wl]\n plot_weights(flat_weights, args.weights_file, args.regularization_type)\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom sklearn import cross_validation\nfrom sklearn import datasets\nfrom sklearn import metrics\nimport argparse\nimport tensorflow as tf\nfrom tensorflow.contrib import layers\nfrom tensorflow.contrib import learn\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nNUM_PLOT_BINS = 30\nMODEL_NAME = 'iris_model'\nWEIGHTS_NAME = MODEL_NAME + '/Stack/fully_connected_1/weights'\n\n\ndef model(features, target):\n global args\n regularizer = None\n regularization_type = args.regularization_type.lower()\n regularization_value = args.regularization_value\n if regularization_type == 'l1':\n print('Using L1 regularizer, val =', regularization_value)\n regularizer = tf.contrib.layers.l1_regularizer(regularization_value)\n elif regularization_type == 'l2':\n print('Using L2 regularizer, val =', regularization_value)\n regularizer = tf.contrib.layers.l2_regularizer(regularization_value)\n else:\n print('Not using regularization')\n target = tf.one_hot(target, 3, 1, 0)\n with tf.variable_scope(MODEL_NAME, regularizer=regularizer):\n features = layers.stack(features, layers.fully_connected, [10, 20, 10])\n logits = layers.fully_connected(features, 3, activation_fn=None)\n loss = tf.contrib.losses.softmax_cross_entropy(logits, target)\n if regularizer:\n loss = loss + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n )\n train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.\n get_global_step(), optimizer='Adagrad', learning_rate=0.1)\n return {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)\n }, loss, train_op\n\n\ndef plot_weights(flat_weights, plot_file_name, title_name):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.suptitle('Weights histogram (1st layer fc) ' + title_name)\n ax.hist(flat_weights, NUM_PLOT_BINS, color='green', alpha=0.8)\n print('Saving histogram of weights in:', plot_file_name)\n fig.savefig(plot_file_name)\n plt.close(fig)\n\n\ndef main(argv):\n global args\n parser = argparse.ArgumentParser()\n parser.add_argument('--regularization_type', default='none', help=\n 'Regularization type: l1, l2')\n parser.add_argument('--regularization_value', type=float, default=0.0,\n help='Value used for regularization. defualt 0.0')\n parser.add_argument('--weights_file', default='weights_hist.png', help=\n 'Filename to save the histogram. Default: weights_hist.png')\n args = parser.parse_args()\n iris = datasets.load_iris()\n x_train, x_test, y_train, y_test = cross_validation.train_test_split(iris\n .data, iris.target, test_size=0.2)\n classifier = learn.Estimator(model_fn=model)\n classifier.fit(x_train, y_train, steps=1000)\n y_predicted = [p['class'] for p in classifier.predict(x_test,\n as_iterable=True)]\n score = metrics.accuracy_score(y_test, y_predicted)\n print('Accuracy: {0:f}'.format(score))\n weights = classifier.get_variable_value(WEIGHTS_NAME)\n flat_weights = [w for wl in weights for w in wl]\n plot_weights(flat_weights, args.weights_file, args.regularization_type)\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"<import token>\nmatplotlib.use('Agg')\n<import token>\nNUM_PLOT_BINS = 30\nMODEL_NAME = 'iris_model'\nWEIGHTS_NAME = MODEL_NAME + '/Stack/fully_connected_1/weights'\n\n\ndef model(features, target):\n global args\n regularizer = None\n regularization_type = args.regularization_type.lower()\n regularization_value = args.regularization_value\n if regularization_type == 'l1':\n print('Using L1 regularizer, val =', regularization_value)\n regularizer = tf.contrib.layers.l1_regularizer(regularization_value)\n elif regularization_type == 'l2':\n print('Using L2 regularizer, val =', regularization_value)\n regularizer = tf.contrib.layers.l2_regularizer(regularization_value)\n else:\n print('Not using regularization')\n target = tf.one_hot(target, 3, 1, 0)\n with tf.variable_scope(MODEL_NAME, regularizer=regularizer):\n features = layers.stack(features, layers.fully_connected, [10, 20, 10])\n logits = layers.fully_connected(features, 3, activation_fn=None)\n loss = tf.contrib.losses.softmax_cross_entropy(logits, target)\n if regularizer:\n loss = loss + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n )\n train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.\n get_global_step(), optimizer='Adagrad', learning_rate=0.1)\n return {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)\n }, loss, train_op\n\n\ndef plot_weights(flat_weights, plot_file_name, title_name):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.suptitle('Weights histogram (1st layer fc) ' + title_name)\n ax.hist(flat_weights, NUM_PLOT_BINS, color='green', alpha=0.8)\n print('Saving histogram of weights in:', plot_file_name)\n fig.savefig(plot_file_name)\n plt.close(fig)\n\n\ndef main(argv):\n global args\n parser = argparse.ArgumentParser()\n parser.add_argument('--regularization_type', default='none', help=\n 'Regularization type: l1, l2')\n parser.add_argument('--regularization_value', type=float, default=0.0,\n help='Value used for regularization. defualt 0.0')\n parser.add_argument('--weights_file', default='weights_hist.png', help=\n 'Filename to save the histogram. Default: weights_hist.png')\n args = parser.parse_args()\n iris = datasets.load_iris()\n x_train, x_test, y_train, y_test = cross_validation.train_test_split(iris\n .data, iris.target, test_size=0.2)\n classifier = learn.Estimator(model_fn=model)\n classifier.fit(x_train, y_train, steps=1000)\n y_predicted = [p['class'] for p in classifier.predict(x_test,\n as_iterable=True)]\n score = metrics.accuracy_score(y_test, y_predicted)\n print('Accuracy: {0:f}'.format(score))\n weights = classifier.get_variable_value(WEIGHTS_NAME)\n flat_weights = [w for wl in weights for w in wl]\n plot_weights(flat_weights, args.weights_file, args.regularization_type)\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"<import token>\nmatplotlib.use('Agg')\n<import token>\n<assignment token>\n\n\ndef model(features, target):\n global args\n regularizer = None\n regularization_type = args.regularization_type.lower()\n regularization_value = args.regularization_value\n if regularization_type == 'l1':\n print('Using L1 regularizer, val =', regularization_value)\n regularizer = tf.contrib.layers.l1_regularizer(regularization_value)\n elif regularization_type == 'l2':\n print('Using L2 regularizer, val =', regularization_value)\n regularizer = tf.contrib.layers.l2_regularizer(regularization_value)\n else:\n print('Not using regularization')\n target = tf.one_hot(target, 3, 1, 0)\n with tf.variable_scope(MODEL_NAME, regularizer=regularizer):\n features = layers.stack(features, layers.fully_connected, [10, 20, 10])\n logits = layers.fully_connected(features, 3, activation_fn=None)\n loss = tf.contrib.losses.softmax_cross_entropy(logits, target)\n if regularizer:\n loss = loss + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n )\n train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.\n get_global_step(), optimizer='Adagrad', learning_rate=0.1)\n return {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)\n }, loss, train_op\n\n\ndef plot_weights(flat_weights, plot_file_name, title_name):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.suptitle('Weights histogram (1st layer fc) ' + title_name)\n ax.hist(flat_weights, NUM_PLOT_BINS, color='green', alpha=0.8)\n print('Saving histogram of weights in:', plot_file_name)\n fig.savefig(plot_file_name)\n plt.close(fig)\n\n\ndef main(argv):\n global args\n parser = argparse.ArgumentParser()\n parser.add_argument('--regularization_type', default='none', help=\n 'Regularization type: l1, l2')\n parser.add_argument('--regularization_value', type=float, default=0.0,\n help='Value used for regularization. defualt 0.0')\n parser.add_argument('--weights_file', default='weights_hist.png', help=\n 'Filename to save the histogram. Default: weights_hist.png')\n args = parser.parse_args()\n iris = datasets.load_iris()\n x_train, x_test, y_train, y_test = cross_validation.train_test_split(iris\n .data, iris.target, test_size=0.2)\n classifier = learn.Estimator(model_fn=model)\n classifier.fit(x_train, y_train, steps=1000)\n y_predicted = [p['class'] for p in classifier.predict(x_test,\n as_iterable=True)]\n score = metrics.accuracy_score(y_test, y_predicted)\n print('Accuracy: {0:f}'.format(score))\n weights = classifier.get_variable_value(WEIGHTS_NAME)\n flat_weights = [w for wl in weights for w in wl]\n plot_weights(flat_weights, args.weights_file, args.regularization_type)\n\n\nif __name__ == '__main__':\n tf.app.run()\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef model(features, target):\n global args\n regularizer = None\n regularization_type = args.regularization_type.lower()\n regularization_value = args.regularization_value\n if regularization_type == 'l1':\n print('Using L1 regularizer, val =', regularization_value)\n regularizer = tf.contrib.layers.l1_regularizer(regularization_value)\n elif regularization_type == 'l2':\n print('Using L2 regularizer, val =', regularization_value)\n regularizer = tf.contrib.layers.l2_regularizer(regularization_value)\n else:\n print('Not using regularization')\n target = tf.one_hot(target, 3, 1, 0)\n with tf.variable_scope(MODEL_NAME, regularizer=regularizer):\n features = layers.stack(features, layers.fully_connected, [10, 20, 10])\n logits = layers.fully_connected(features, 3, activation_fn=None)\n loss = tf.contrib.losses.softmax_cross_entropy(logits, target)\n if regularizer:\n loss = loss + sum(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n )\n train_op = tf.contrib.layers.optimize_loss(loss, tf.contrib.framework.\n get_global_step(), optimizer='Adagrad', learning_rate=0.1)\n return {'class': tf.argmax(logits, 1), 'prob': tf.nn.softmax(logits)\n }, loss, train_op\n\n\ndef plot_weights(flat_weights, plot_file_name, title_name):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.suptitle('Weights histogram (1st layer fc) ' + title_name)\n ax.hist(flat_weights, NUM_PLOT_BINS, color='green', alpha=0.8)\n print('Saving histogram of weights in:', plot_file_name)\n fig.savefig(plot_file_name)\n plt.close(fig)\n\n\ndef main(argv):\n global args\n parser = argparse.ArgumentParser()\n parser.add_argument('--regularization_type', default='none', help=\n 'Regularization type: l1, l2')\n parser.add_argument('--regularization_value', type=float, default=0.0,\n help='Value used for regularization. defualt 0.0')\n parser.add_argument('--weights_file', default='weights_hist.png', help=\n 'Filename to save the histogram. Default: weights_hist.png')\n args = parser.parse_args()\n iris = datasets.load_iris()\n x_train, x_test, y_train, y_test = cross_validation.train_test_split(iris\n .data, iris.target, test_size=0.2)\n classifier = learn.Estimator(model_fn=model)\n classifier.fit(x_train, y_train, steps=1000)\n y_predicted = [p['class'] for p in classifier.predict(x_test,\n as_iterable=True)]\n score = metrics.accuracy_score(y_test, y_predicted)\n print('Accuracy: {0:f}'.format(score))\n weights = classifier.get_variable_value(WEIGHTS_NAME)\n flat_weights = [w for wl in weights for w in wl]\n plot_weights(flat_weights, args.weights_file, args.regularization_type)\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef plot_weights(flat_weights, plot_file_name, title_name):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.suptitle('Weights histogram (1st layer fc) ' + title_name)\n ax.hist(flat_weights, NUM_PLOT_BINS, color='green', alpha=0.8)\n print('Saving histogram of weights in:', plot_file_name)\n fig.savefig(plot_file_name)\n plt.close(fig)\n\n\ndef main(argv):\n global args\n parser = argparse.ArgumentParser()\n parser.add_argument('--regularization_type', default='none', help=\n 'Regularization type: l1, l2')\n parser.add_argument('--regularization_value', type=float, default=0.0,\n help='Value used for regularization. defualt 0.0')\n parser.add_argument('--weights_file', default='weights_hist.png', help=\n 'Filename to save the histogram. Default: weights_hist.png')\n args = parser.parse_args()\n iris = datasets.load_iris()\n x_train, x_test, y_train, y_test = cross_validation.train_test_split(iris\n .data, iris.target, test_size=0.2)\n classifier = learn.Estimator(model_fn=model)\n classifier.fit(x_train, y_train, steps=1000)\n y_predicted = [p['class'] for p in classifier.predict(x_test,\n as_iterable=True)]\n score = metrics.accuracy_score(y_test, y_predicted)\n print('Accuracy: {0:f}'.format(score))\n weights = classifier.get_variable_value(WEIGHTS_NAME)\n flat_weights = [w for wl in weights for w in wl]\n plot_weights(flat_weights, args.weights_file, args.regularization_type)\n\n\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef plot_weights(flat_weights, plot_file_name, title_name):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.suptitle('Weights histogram (1st layer fc) ' + title_name)\n ax.hist(flat_weights, NUM_PLOT_BINS, color='green', alpha=0.8)\n print('Saving histogram of weights in:', plot_file_name)\n fig.savefig(plot_file_name)\n plt.close(fig)\n\n\n<function token>\n<code token>\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,321 |
3999904b58cdee1bcd2d8f0d9d2393fc8794e814
|
# Given a collection of integers that might contain duplicates, nums, return all possible subsets.
#
# Note: The solution set must not contain duplicate subsets.
#
# For example,
# If nums = [1,2,2], a solution is:
#
# [
# [2],
# [1],
# [1,2,2],
# [2,2],
# [1,2],
# []
# ]
# Subscribe to see which companies asked this question
class Solution(object):
def subsetsHelp(self,nums, start, n, result, stack):
if n == 0:
result.append(stack[:])
return
i = start
while start <= i < len(nums) - n + 1:
stack.append(nums[i])
self.subsetsHelp(nums, i + 1, n - 1, result, stack)
tmp = stack.pop()
while i + 1 < len(nums) and tmp == nums[i + 1]:
i += 1
i += 1
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
result = []
stack = []
for i in range(len(nums)+1):
self.subsetsHelp(nums,0,i,result,stack)
return result
def subsetsHelp(nums,start,n,result,stack):
if n == 0:
result.append(stack[:])
return
i = start
while start <= i < len(nums) - n + 1:
stack.append(nums[i])
subsetsHelp(nums,i+1,n-1,result,stack)
tmp = stack.pop()
while i+1 < len(nums) and tmp == nums[i+1]:
i+=1
i += 1
result = []
stack = []
hello = []
nums = [1,2,3,3,3,4,5]
n = 2
subsetsHelp(nums,0,0,result,stack)
hello += result
print(hello)
hh = Solution()
nums = [1,2,3]
print(hh.subsetsWithDup(nums))
|
[
"# Given a collection of integers that might contain duplicates, nums, return all possible subsets.\r\n#\r\n# Note: The solution set must not contain duplicate subsets.\r\n#\r\n# For example,\r\n# If nums = [1,2,2], a solution is:\r\n#\r\n# [\r\n# [2],\r\n# [1],\r\n# [1,2,2],\r\n# [2,2],\r\n# [1,2],\r\n# []\r\n# ]\r\n# Subscribe to see which companies asked this question\r\n\r\nclass Solution(object):\r\n def subsetsHelp(self,nums, start, n, result, stack):\r\n if n == 0:\r\n result.append(stack[:])\r\n return\r\n i = start\r\n while start <= i < len(nums) - n + 1:\r\n stack.append(nums[i])\r\n self.subsetsHelp(nums, i + 1, n - 1, result, stack)\r\n tmp = stack.pop()\r\n while i + 1 < len(nums) and tmp == nums[i + 1]:\r\n i += 1\r\n i += 1\r\n\r\n def subsetsWithDup(self, nums):\r\n \"\"\"\r\n :type nums: List[int]\r\n :rtype: List[List[int]]\r\n \"\"\"\r\n nums.sort()\r\n result = []\r\n stack = []\r\n for i in range(len(nums)+1):\r\n self.subsetsHelp(nums,0,i,result,stack)\r\n return result\r\n\r\n\r\n\r\ndef subsetsHelp(nums,start,n,result,stack):\r\n if n == 0:\r\n result.append(stack[:])\r\n return\r\n i = start\r\n while start <= i < len(nums) - n + 1:\r\n stack.append(nums[i])\r\n subsetsHelp(nums,i+1,n-1,result,stack)\r\n tmp = stack.pop()\r\n while i+1 < len(nums) and tmp == nums[i+1]:\r\n i+=1\r\n i += 1\r\n\r\n\r\nresult = []\r\nstack = []\r\nhello = []\r\nnums = [1,2,3,3,3,4,5]\r\nn = 2\r\nsubsetsHelp(nums,0,0,result,stack)\r\nhello += result\r\nprint(hello)\r\n\r\nhh = Solution()\r\nnums = [1,2,3]\r\nprint(hh.subsetsWithDup(nums))\r\n",
"class Solution(object):\n\n def subsetsHelp(self, nums, start, n, result, stack):\n if n == 0:\n result.append(stack[:])\n return\n i = start\n while start <= i < len(nums) - n + 1:\n stack.append(nums[i])\n self.subsetsHelp(nums, i + 1, n - 1, result, stack)\n tmp = stack.pop()\n while i + 1 < len(nums) and tmp == nums[i + 1]:\n i += 1\n i += 1\n\n def subsetsWithDup(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums.sort()\n result = []\n stack = []\n for i in range(len(nums) + 1):\n self.subsetsHelp(nums, 0, i, result, stack)\n return result\n\n\ndef subsetsHelp(nums, start, n, result, stack):\n if n == 0:\n result.append(stack[:])\n return\n i = start\n while start <= i < len(nums) - n + 1:\n stack.append(nums[i])\n subsetsHelp(nums, i + 1, n - 1, result, stack)\n tmp = stack.pop()\n while i + 1 < len(nums) and tmp == nums[i + 1]:\n i += 1\n i += 1\n\n\nresult = []\nstack = []\nhello = []\nnums = [1, 2, 3, 3, 3, 4, 5]\nn = 2\nsubsetsHelp(nums, 0, 0, result, stack)\nhello += result\nprint(hello)\nhh = Solution()\nnums = [1, 2, 3]\nprint(hh.subsetsWithDup(nums))\n",
"class Solution(object):\n\n def subsetsHelp(self, nums, start, n, result, stack):\n if n == 0:\n result.append(stack[:])\n return\n i = start\n while start <= i < len(nums) - n + 1:\n stack.append(nums[i])\n self.subsetsHelp(nums, i + 1, n - 1, result, stack)\n tmp = stack.pop()\n while i + 1 < len(nums) and tmp == nums[i + 1]:\n i += 1\n i += 1\n\n def subsetsWithDup(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums.sort()\n result = []\n stack = []\n for i in range(len(nums) + 1):\n self.subsetsHelp(nums, 0, i, result, stack)\n return result\n\n\ndef subsetsHelp(nums, start, n, result, stack):\n if n == 0:\n result.append(stack[:])\n return\n i = start\n while start <= i < len(nums) - n + 1:\n stack.append(nums[i])\n subsetsHelp(nums, i + 1, n - 1, result, stack)\n tmp = stack.pop()\n while i + 1 < len(nums) and tmp == nums[i + 1]:\n i += 1\n i += 1\n\n\n<assignment token>\nsubsetsHelp(nums, 0, 0, result, stack)\nhello += result\nprint(hello)\n<assignment token>\nprint(hh.subsetsWithDup(nums))\n",
"class Solution(object):\n\n def subsetsHelp(self, nums, start, n, result, stack):\n if n == 0:\n result.append(stack[:])\n return\n i = start\n while start <= i < len(nums) - n + 1:\n stack.append(nums[i])\n self.subsetsHelp(nums, i + 1, n - 1, result, stack)\n tmp = stack.pop()\n while i + 1 < len(nums) and tmp == nums[i + 1]:\n i += 1\n i += 1\n\n def subsetsWithDup(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums.sort()\n result = []\n stack = []\n for i in range(len(nums) + 1):\n self.subsetsHelp(nums, 0, i, result, stack)\n return result\n\n\ndef subsetsHelp(nums, start, n, result, stack):\n if n == 0:\n result.append(stack[:])\n return\n i = start\n while start <= i < len(nums) - n + 1:\n stack.append(nums[i])\n subsetsHelp(nums, i + 1, n - 1, result, stack)\n tmp = stack.pop()\n while i + 1 < len(nums) and tmp == nums[i + 1]:\n i += 1\n i += 1\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"class Solution(object):\n\n def subsetsHelp(self, nums, start, n, result, stack):\n if n == 0:\n result.append(stack[:])\n return\n i = start\n while start <= i < len(nums) - n + 1:\n stack.append(nums[i])\n self.subsetsHelp(nums, i + 1, n - 1, result, stack)\n tmp = stack.pop()\n while i + 1 < len(nums) and tmp == nums[i + 1]:\n i += 1\n i += 1\n\n def subsetsWithDup(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums.sort()\n result = []\n stack = []\n for i in range(len(nums) + 1):\n self.subsetsHelp(nums, 0, i, result, stack)\n return result\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"class Solution(object):\n <function token>\n\n def subsetsWithDup(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: List[List[int]]\n \"\"\"\n nums.sort()\n result = []\n stack = []\n for i in range(len(nums) + 1):\n self.subsetsHelp(nums, 0, i, result, stack)\n return result\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"class Solution(object):\n <function token>\n <function token>\n\n\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<class token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,322 |
f744e952b5de146392d6fd8243afadbf54b20853
|
import math, collections, sys
input = sys.stdin.readline
def calc(r, c):
rows = [0 for i in range(r)]
cols = [0 for i in range(c)]
total = 0
for i in range(r):
for j in range(c):
if z[i][j] == 'A':
rows[i]+=1
cols[j]+=1
total+=1
if total == r*c:
return 0
if total == 0:
return "MORTAL"
if rows[0] == c or rows[-1] == c or cols[0] == r or cols[-1] == r:
return 1
if z[0][0] == 'A' or z[0][-1] == 'A' or z[-1][0] == 'A' or z[-1][-1] == 'A':
return 2
if max(rows) == c or max(cols) == r:
return 2
if rows[0] or rows[-1] or cols[0] or cols[-1]:
return 3
return 4
for _ in range(int(input())):
r, c = map(int, input().split())
z = []
for i in range(r):
z.append(input().strip())
print(calc(r, c))
|
[
"import math, collections, sys\ninput = sys.stdin.readline\ndef calc(r, c):\n rows = [0 for i in range(r)]\n cols = [0 for i in range(c)]\n total = 0\n for i in range(r):\n for j in range(c):\n if z[i][j] == 'A':\n rows[i]+=1\n cols[j]+=1\n total+=1\n if total == r*c:\n return 0\n if total == 0:\n return \"MORTAL\"\n if rows[0] == c or rows[-1] == c or cols[0] == r or cols[-1] == r:\n return 1\n if z[0][0] == 'A' or z[0][-1] == 'A' or z[-1][0] == 'A' or z[-1][-1] == 'A':\n return 2\n if max(rows) == c or max(cols) == r:\n return 2\n if rows[0] or rows[-1] or cols[0] or cols[-1]:\n return 3\n return 4\nfor _ in range(int(input())):\n r, c = map(int, input().split())\n z = []\n for i in range(r):\n z.append(input().strip())\n print(calc(r, c))",
"import math, collections, sys\ninput = sys.stdin.readline\n\n\ndef calc(r, c):\n rows = [(0) for i in range(r)]\n cols = [(0) for i in range(c)]\n total = 0\n for i in range(r):\n for j in range(c):\n if z[i][j] == 'A':\n rows[i] += 1\n cols[j] += 1\n total += 1\n if total == r * c:\n return 0\n if total == 0:\n return 'MORTAL'\n if rows[0] == c or rows[-1] == c or cols[0] == r or cols[-1] == r:\n return 1\n if z[0][0] == 'A' or z[0][-1] == 'A' or z[-1][0] == 'A' or z[-1][-1\n ] == 'A':\n return 2\n if max(rows) == c or max(cols) == r:\n return 2\n if rows[0] or rows[-1] or cols[0] or cols[-1]:\n return 3\n return 4\n\n\nfor _ in range(int(input())):\n r, c = map(int, input().split())\n z = []\n for i in range(r):\n z.append(input().strip())\n print(calc(r, c))\n",
"<import token>\ninput = sys.stdin.readline\n\n\ndef calc(r, c):\n rows = [(0) for i in range(r)]\n cols = [(0) for i in range(c)]\n total = 0\n for i in range(r):\n for j in range(c):\n if z[i][j] == 'A':\n rows[i] += 1\n cols[j] += 1\n total += 1\n if total == r * c:\n return 0\n if total == 0:\n return 'MORTAL'\n if rows[0] == c or rows[-1] == c or cols[0] == r or cols[-1] == r:\n return 1\n if z[0][0] == 'A' or z[0][-1] == 'A' or z[-1][0] == 'A' or z[-1][-1\n ] == 'A':\n return 2\n if max(rows) == c or max(cols) == r:\n return 2\n if rows[0] or rows[-1] or cols[0] or cols[-1]:\n return 3\n return 4\n\n\nfor _ in range(int(input())):\n r, c = map(int, input().split())\n z = []\n for i in range(r):\n z.append(input().strip())\n print(calc(r, c))\n",
"<import token>\n<assignment token>\n\n\ndef calc(r, c):\n rows = [(0) for i in range(r)]\n cols = [(0) for i in range(c)]\n total = 0\n for i in range(r):\n for j in range(c):\n if z[i][j] == 'A':\n rows[i] += 1\n cols[j] += 1\n total += 1\n if total == r * c:\n return 0\n if total == 0:\n return 'MORTAL'\n if rows[0] == c or rows[-1] == c or cols[0] == r or cols[-1] == r:\n return 1\n if z[0][0] == 'A' or z[0][-1] == 'A' or z[-1][0] == 'A' or z[-1][-1\n ] == 'A':\n return 2\n if max(rows) == c or max(cols) == r:\n return 2\n if rows[0] or rows[-1] or cols[0] or cols[-1]:\n return 3\n return 4\n\n\nfor _ in range(int(input())):\n r, c = map(int, input().split())\n z = []\n for i in range(r):\n z.append(input().strip())\n print(calc(r, c))\n",
"<import token>\n<assignment token>\n\n\ndef calc(r, c):\n rows = [(0) for i in range(r)]\n cols = [(0) for i in range(c)]\n total = 0\n for i in range(r):\n for j in range(c):\n if z[i][j] == 'A':\n rows[i] += 1\n cols[j] += 1\n total += 1\n if total == r * c:\n return 0\n if total == 0:\n return 'MORTAL'\n if rows[0] == c or rows[-1] == c or cols[0] == r or cols[-1] == r:\n return 1\n if z[0][0] == 'A' or z[0][-1] == 'A' or z[-1][0] == 'A' or z[-1][-1\n ] == 'A':\n return 2\n if max(rows) == c or max(cols) == r:\n return 2\n if rows[0] or rows[-1] or cols[0] or cols[-1]:\n return 3\n return 4\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<code token>\n"
] | false |
99,323 |
8b63c72f8451049fdd63e9131c779376fcb8281e
|
from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for
mod_fe = Blueprint('root', __name__, url_prefix='/', template_folder='templates')
@mod_fe.route('/', methods=['GET'])
def index():
return render_template('frontend/index.html')
|
[
"from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for\n\nmod_fe = Blueprint('root', __name__, url_prefix='/', template_folder='templates')\n\n\n@mod_fe.route('/', methods=['GET'])\ndef index():\n return render_template('frontend/index.html')\n",
"from flask import Blueprint, request, render_template, flash, g, session, redirect, url_for\nmod_fe = Blueprint('root', __name__, url_prefix='/', template_folder=\n 'templates')\n\n\n@mod_fe.route('/', methods=['GET'])\ndef index():\n return render_template('frontend/index.html')\n",
"<import token>\nmod_fe = Blueprint('root', __name__, url_prefix='/', template_folder=\n 'templates')\n\n\n@mod_fe.route('/', methods=['GET'])\ndef index():\n return render_template('frontend/index.html')\n",
"<import token>\n<assignment token>\n\n\n@mod_fe.route('/', methods=['GET'])\ndef index():\n return render_template('frontend/index.html')\n",
"<import token>\n<assignment token>\n<function token>\n"
] | false |
99,324 |
ff8e16bacedef7515dc0cfeec33ed3e3df5f52c3
|
def gen_pent_num(num):
return (num * (3 * num - 1))/2
def gen_hex_num(num):
return (num * (2*num -1))
tri_num = list()
pent_num = list()
hex_num = list()
for i in range(1000, 55000):
pent_num.append(gen_pent_num(i))
hex_num.append(gen_hex_num(i))
for num in pent_num:
if num in hex_num:
print(int(num))
exit()
|
[
"def gen_pent_num(num):\n return (num * (3 * num - 1))/2\ndef gen_hex_num(num):\n return (num * (2*num -1))\n\ntri_num = list()\npent_num = list()\nhex_num = list()\n\nfor i in range(1000, 55000):\n pent_num.append(gen_pent_num(i))\n hex_num.append(gen_hex_num(i))\nfor num in pent_num:\n if num in hex_num:\n print(int(num))\n exit()\n",
"def gen_pent_num(num):\n return num * (3 * num - 1) / 2\n\n\ndef gen_hex_num(num):\n return num * (2 * num - 1)\n\n\ntri_num = list()\npent_num = list()\nhex_num = list()\nfor i in range(1000, 55000):\n pent_num.append(gen_pent_num(i))\n hex_num.append(gen_hex_num(i))\nfor num in pent_num:\n if num in hex_num:\n print(int(num))\n exit()\n",
"def gen_pent_num(num):\n return num * (3 * num - 1) / 2\n\n\ndef gen_hex_num(num):\n return num * (2 * num - 1)\n\n\n<assignment token>\nfor i in range(1000, 55000):\n pent_num.append(gen_pent_num(i))\n hex_num.append(gen_hex_num(i))\nfor num in pent_num:\n if num in hex_num:\n print(int(num))\n exit()\n",
"def gen_pent_num(num):\n return num * (3 * num - 1) / 2\n\n\ndef gen_hex_num(num):\n return num * (2 * num - 1)\n\n\n<assignment token>\n<code token>\n",
"<function token>\n\n\ndef gen_hex_num(num):\n return num * (2 * num - 1)\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,325 |
b29d68448c42ba7edfc39d745819a9590fef751d
|
"""
Module for snow mountains
author: Joshua Akangah
date: 11/8/20
"""
from CONFIG.settings import *
from ..BACKGROUND.parallax import *
class SnowMountainBiome():
def __init__(self):
self.background = ParallaxSurface((800, 600), pygame.RLEACCEL)
self.background.add(os.path.join(BASE_DIR, "assets/BIOMES/SNOW_MOUNTAINS/sky_lightened.png"), 2, None, True, 800, 600)
self.background.add(os.path.join(BASE_DIR, "assets/BIOMES/SNOW_MOUNTAINS/clouds_bg.png"), 2.3, None, True, 800, 600)
self.background.add(os.path.join(BASE_DIR, "assets/BIOMES/SNOW_MOUNTAINS/glacial_mountains_lightened.png"), 2, None, True, 800, 600)
self.background.add(os.path.join(BASE_DIR, "assets/BIOMES/SNOW_MOUNTAINS/mountains_flip.png"), 4, None, True, 800, 600)
self.background.add(os.path.join(BASE_DIR, "assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_3.png"), 3, None, True, 800, 600)
self.background.add(os.path.join(BASE_DIR, "assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_2.png"), 4, None, True, 800, 600)
self.background.add(os.path.join(BASE_DIR, "assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_1.png"), 5, None, True, 800, 600)
self.background.add(os.path.join(BASE_DIR, "assets/BIOMES/SNOW_MOUNTAINS/cloud_lonely.png"), 1, None, True, 800, 600)
# self.background.add(os.path.join(BASE_DIR, "assets/BIOMES/SNOW_MOUNTAINS/sky_lightened.png"), 7)
# self.background.add(os.path.join(BASE_DIR, "assets/BIOMES/SNOW_MOUNTAINS/sky_lightened.png"), 8)
# self.background.add(os.path.join(BASE_DIR, "assets/BIOMES/SNOW_MOUNTAINS/sky_lightened.png"), 9)
def update(self, speed):
self.background.scroll(speed, orientation='horizontal')
|
[
"\"\"\"\nModule for snow mountains\nauthor: Joshua Akangah\ndate: 11/8/20\n\"\"\"\n\nfrom CONFIG.settings import *\nfrom ..BACKGROUND.parallax import *\n\nclass SnowMountainBiome():\n def __init__(self):\n self.background = ParallaxSurface((800, 600), pygame.RLEACCEL)\n self.background.add(os.path.join(BASE_DIR, \"assets/BIOMES/SNOW_MOUNTAINS/sky_lightened.png\"), 2, None, True, 800, 600)\n self.background.add(os.path.join(BASE_DIR, \"assets/BIOMES/SNOW_MOUNTAINS/clouds_bg.png\"), 2.3, None, True, 800, 600)\n \n self.background.add(os.path.join(BASE_DIR, \"assets/BIOMES/SNOW_MOUNTAINS/glacial_mountains_lightened.png\"), 2, None, True, 800, 600)\n self.background.add(os.path.join(BASE_DIR, \"assets/BIOMES/SNOW_MOUNTAINS/mountains_flip.png\"), 4, None, True, 800, 600)\n self.background.add(os.path.join(BASE_DIR, \"assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_3.png\"), 3, None, True, 800, 600)\n self.background.add(os.path.join(BASE_DIR, \"assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_2.png\"), 4, None, True, 800, 600)\n self.background.add(os.path.join(BASE_DIR, \"assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_1.png\"), 5, None, True, 800, 600)\n self.background.add(os.path.join(BASE_DIR, \"assets/BIOMES/SNOW_MOUNTAINS/cloud_lonely.png\"), 1, None, True, 800, 600)\n \n # self.background.add(os.path.join(BASE_DIR, \"assets/BIOMES/SNOW_MOUNTAINS/sky_lightened.png\"), 7)\n # self.background.add(os.path.join(BASE_DIR, \"assets/BIOMES/SNOW_MOUNTAINS/sky_lightened.png\"), 8)\n # self.background.add(os.path.join(BASE_DIR, \"assets/BIOMES/SNOW_MOUNTAINS/sky_lightened.png\"), 9)\n \n def update(self, speed):\n self.background.scroll(speed, orientation='horizontal')",
"<docstring token>\nfrom CONFIG.settings import *\nfrom ..BACKGROUND.parallax import *\n\n\nclass SnowMountainBiome:\n\n def __init__(self):\n self.background = ParallaxSurface((800, 600), pygame.RLEACCEL)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/sky_lightened.png'), 2, None, \n True, 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/clouds_bg.png'), 2.3, None, True,\n 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/glacial_mountains_lightened.png'),\n 2, None, True, 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/mountains_flip.png'), 4, None, \n True, 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_3.png'), 3, None, True,\n 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_2.png'), 4, None, True,\n 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_1.png'), 5, None, True,\n 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/cloud_lonely.png'), 1, None, True,\n 800, 600)\n\n def update(self, speed):\n self.background.scroll(speed, orientation='horizontal')\n",
"<docstring token>\n<import token>\n\n\nclass SnowMountainBiome:\n\n def __init__(self):\n self.background = ParallaxSurface((800, 600), pygame.RLEACCEL)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/sky_lightened.png'), 2, None, \n True, 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/clouds_bg.png'), 2.3, None, True,\n 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/glacial_mountains_lightened.png'),\n 2, None, True, 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/mountains_flip.png'), 4, None, \n True, 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_3.png'), 3, None, True,\n 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_2.png'), 4, None, True,\n 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/clouds_mg_1.png'), 5, None, True,\n 800, 600)\n self.background.add(os.path.join(BASE_DIR,\n 'assets/BIOMES/SNOW_MOUNTAINS/cloud_lonely.png'), 1, None, True,\n 800, 600)\n\n def update(self, speed):\n self.background.scroll(speed, orientation='horizontal')\n",
"<docstring token>\n<import token>\n\n\nclass SnowMountainBiome:\n <function token>\n\n def update(self, speed):\n self.background.scroll(speed, orientation='horizontal')\n",
"<docstring token>\n<import token>\n\n\nclass SnowMountainBiome:\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
99,326 |
6da824b9d6cd06c44d70772e233d09f88a35a35c
|
import Tkinter
from musicazoo.lib.graphics import FullScreenGraphics
from musicazoo.settings import COLORS
import urllib
import threading
class BTCDisplayer(FullScreenGraphics):
def __init__(self):
super(BTCDisplayer, self).__init__()
self.c=Tkinter.Canvas(self,width=self.width,height=self.height,highlightthickness=0,bg=COLORS['bg'])
self.c.pack()
(x,y) = self.center()
self.text=self.c.create_text((x,y), fill=COLORS['fg'], justify=Tkinter.CENTER,anchor='center', font=("Helvetica",72))
def show(self):
self.animate(0)
FullScreenGraphics.show(self)
def animate(self,state):
self.c.itemconfig(self.text,text="mtgox: " + urllib.urlopen("http://www.biggerpackage4u.ru/api/bitcoin_price").read().strip())
self.update()
state=(state+1)%4
self.defer(10000,lambda:self.animate(state))
class BTC:
TYPE_STRING='btc'
def __init__(self,queue,uid):
self.queue=queue
self.uid=uid
self.display = BTCDisplayer()
self.lock = threading.Lock()
self.lock.acquire()
def play(self):
self.display.show()
self.lock.acquire()
def stop(self):
self.display.close()
self.lock.release()
commands={
'stop':stop,
}
parameters={
'status':lambda x:'playing',
}
|
[
"import Tkinter\nfrom musicazoo.lib.graphics import FullScreenGraphics\nfrom musicazoo.settings import COLORS\nimport urllib\nimport threading\n\nclass BTCDisplayer(FullScreenGraphics):\n\tdef __init__(self):\n\t\tsuper(BTCDisplayer, self).__init__()\n\t\tself.c=Tkinter.Canvas(self,width=self.width,height=self.height,highlightthickness=0,bg=COLORS['bg'])\n\t\tself.c.pack()\n\t\t(x,y) = self.center()\n\t\tself.text=self.c.create_text((x,y), fill=COLORS['fg'], justify=Tkinter.CENTER,anchor='center', font=(\"Helvetica\",72))\n\n\tdef show(self):\n\t\tself.animate(0)\n\t\tFullScreenGraphics.show(self)\n\n\tdef animate(self,state):\n\t\tself.c.itemconfig(self.text,text=\"mtgox: \" + urllib.urlopen(\"http://www.biggerpackage4u.ru/api/bitcoin_price\").read().strip())\n\t\tself.update()\n\t\tstate=(state+1)%4\n\t\tself.defer(10000,lambda:self.animate(state))\n\nclass BTC:\n\tTYPE_STRING='btc'\n\n\tdef __init__(self,queue,uid):\n\t\tself.queue=queue\n\t\tself.uid=uid\n\t\tself.display = BTCDisplayer()\n\t\tself.lock = threading.Lock()\n\t\tself.lock.acquire()\n\n\tdef play(self):\n\t\tself.display.show()\n\t\tself.lock.acquire()\n\n\tdef stop(self):\n\t\tself.display.close()\n\t\tself.lock.release()\n\n\tcommands={\n\t\t'stop':stop,\n\t}\n\n\tparameters={\n\t\t'status':lambda x:'playing',\n\t}\n\n",
"import Tkinter\nfrom musicazoo.lib.graphics import FullScreenGraphics\nfrom musicazoo.settings import COLORS\nimport urllib\nimport threading\n\n\nclass BTCDisplayer(FullScreenGraphics):\n\n def __init__(self):\n super(BTCDisplayer, self).__init__()\n self.c = Tkinter.Canvas(self, width=self.width, height=self.height,\n highlightthickness=0, bg=COLORS['bg'])\n self.c.pack()\n x, y = self.center()\n self.text = self.c.create_text((x, y), fill=COLORS['fg'], justify=\n Tkinter.CENTER, anchor='center', font=('Helvetica', 72))\n\n def show(self):\n self.animate(0)\n FullScreenGraphics.show(self)\n\n def animate(self, state):\n self.c.itemconfig(self.text, text='mtgox: ' + urllib.urlopen(\n 'http://www.biggerpackage4u.ru/api/bitcoin_price').read().strip())\n self.update()\n state = (state + 1) % 4\n self.defer(10000, lambda : self.animate(state))\n\n\nclass BTC:\n TYPE_STRING = 'btc'\n\n def __init__(self, queue, uid):\n self.queue = queue\n self.uid = uid\n self.display = BTCDisplayer()\n self.lock = threading.Lock()\n self.lock.acquire()\n\n def play(self):\n self.display.show()\n self.lock.acquire()\n\n def stop(self):\n self.display.close()\n self.lock.release()\n commands = {'stop': stop}\n parameters = {'status': lambda x: 'playing'}\n",
"<import token>\n\n\nclass BTCDisplayer(FullScreenGraphics):\n\n def __init__(self):\n super(BTCDisplayer, self).__init__()\n self.c = Tkinter.Canvas(self, width=self.width, height=self.height,\n highlightthickness=0, bg=COLORS['bg'])\n self.c.pack()\n x, y = self.center()\n self.text = self.c.create_text((x, y), fill=COLORS['fg'], justify=\n Tkinter.CENTER, anchor='center', font=('Helvetica', 72))\n\n def show(self):\n self.animate(0)\n FullScreenGraphics.show(self)\n\n def animate(self, state):\n self.c.itemconfig(self.text, text='mtgox: ' + urllib.urlopen(\n 'http://www.biggerpackage4u.ru/api/bitcoin_price').read().strip())\n self.update()\n state = (state + 1) % 4\n self.defer(10000, lambda : self.animate(state))\n\n\nclass BTC:\n TYPE_STRING = 'btc'\n\n def __init__(self, queue, uid):\n self.queue = queue\n self.uid = uid\n self.display = BTCDisplayer()\n self.lock = threading.Lock()\n self.lock.acquire()\n\n def play(self):\n self.display.show()\n self.lock.acquire()\n\n def stop(self):\n self.display.close()\n self.lock.release()\n commands = {'stop': stop}\n parameters = {'status': lambda x: 'playing'}\n",
"<import token>\n\n\nclass BTCDisplayer(FullScreenGraphics):\n <function token>\n\n def show(self):\n self.animate(0)\n FullScreenGraphics.show(self)\n\n def animate(self, state):\n self.c.itemconfig(self.text, text='mtgox: ' + urllib.urlopen(\n 'http://www.biggerpackage4u.ru/api/bitcoin_price').read().strip())\n self.update()\n state = (state + 1) % 4\n self.defer(10000, lambda : self.animate(state))\n\n\nclass BTC:\n TYPE_STRING = 'btc'\n\n def __init__(self, queue, uid):\n self.queue = queue\n self.uid = uid\n self.display = BTCDisplayer()\n self.lock = threading.Lock()\n self.lock.acquire()\n\n def play(self):\n self.display.show()\n self.lock.acquire()\n\n def stop(self):\n self.display.close()\n self.lock.release()\n commands = {'stop': stop}\n parameters = {'status': lambda x: 'playing'}\n",
"<import token>\n\n\nclass BTCDisplayer(FullScreenGraphics):\n <function token>\n <function token>\n\n def animate(self, state):\n self.c.itemconfig(self.text, text='mtgox: ' + urllib.urlopen(\n 'http://www.biggerpackage4u.ru/api/bitcoin_price').read().strip())\n self.update()\n state = (state + 1) % 4\n self.defer(10000, lambda : self.animate(state))\n\n\nclass BTC:\n TYPE_STRING = 'btc'\n\n def __init__(self, queue, uid):\n self.queue = queue\n self.uid = uid\n self.display = BTCDisplayer()\n self.lock = threading.Lock()\n self.lock.acquire()\n\n def play(self):\n self.display.show()\n self.lock.acquire()\n\n def stop(self):\n self.display.close()\n self.lock.release()\n commands = {'stop': stop}\n parameters = {'status': lambda x: 'playing'}\n",
"<import token>\n\n\nclass BTCDisplayer(FullScreenGraphics):\n <function token>\n <function token>\n <function token>\n\n\nclass BTC:\n TYPE_STRING = 'btc'\n\n def __init__(self, queue, uid):\n self.queue = queue\n self.uid = uid\n self.display = BTCDisplayer()\n self.lock = threading.Lock()\n self.lock.acquire()\n\n def play(self):\n self.display.show()\n self.lock.acquire()\n\n def stop(self):\n self.display.close()\n self.lock.release()\n commands = {'stop': stop}\n parameters = {'status': lambda x: 'playing'}\n",
"<import token>\n<class token>\n\n\nclass BTC:\n TYPE_STRING = 'btc'\n\n def __init__(self, queue, uid):\n self.queue = queue\n self.uid = uid\n self.display = BTCDisplayer()\n self.lock = threading.Lock()\n self.lock.acquire()\n\n def play(self):\n self.display.show()\n self.lock.acquire()\n\n def stop(self):\n self.display.close()\n self.lock.release()\n commands = {'stop': stop}\n parameters = {'status': lambda x: 'playing'}\n",
"<import token>\n<class token>\n\n\nclass BTC:\n <assignment token>\n\n def __init__(self, queue, uid):\n self.queue = queue\n self.uid = uid\n self.display = BTCDisplayer()\n self.lock = threading.Lock()\n self.lock.acquire()\n\n def play(self):\n self.display.show()\n self.lock.acquire()\n\n def stop(self):\n self.display.close()\n self.lock.release()\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n\n\nclass BTC:\n <assignment token>\n\n def __init__(self, queue, uid):\n self.queue = queue\n self.uid = uid\n self.display = BTCDisplayer()\n self.lock = threading.Lock()\n self.lock.acquire()\n\n def play(self):\n self.display.show()\n self.lock.acquire()\n <function token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n\n\nclass BTC:\n <assignment token>\n <function token>\n\n def play(self):\n self.display.show()\n self.lock.acquire()\n <function token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n\n\nclass BTC:\n <assignment token>\n <function token>\n <function token>\n <function token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
99,327 |
476967bceddee403c29b1f3898d595d2835b5e53
|
import os, sys
import socket
import subprocess as sb
def configuration():
IP = socket.gethostname()
PORT = 4444
global s, conn
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((IP, PORT))
s.listen(5)
conn, addr = s.accept()
def cmd():
try:
while True:
command = conn.recv(4000)
command = command.decode()
if command == ("exit"):
conn.close()
s.close()
break
elif ("cd") in command:
command = str(command)
dirs = command.split()[1]
if len(dirs) > 2:
p1 = dirs[1] + str(" ")
p2 = dirs[2]
path = p1 + p2
os.chdir(path)
else:
os.chdir(dirs)
path = os.getcwd()
send_args = ("change to " + path)
conn.send(send_args.encode())
elif ("pwd") in command:
path = os.getcwd()
conn.send(path.encode())
else:
execute = sb.check_output(f'{command}', shell=True)
conn.send(execute)
except ConnectionResetError:
configuration()
cmd()
except ConnectionAbortedError:
configuration()
cmd()
except ConnectionError:
configuration()
cmd()
except BrokenPipeError:
configuration()
cmd()
except sb.CalledProcessError:
conn.send(str(f"'{command}' is not recognized as an internal or external command,\noperable program or batch file").encode())
cmd()
def active():
configuration()
cmd()
|
[
"import os, sys\r\nimport socket\r\nimport subprocess as sb\r\n\r\ndef configuration():\r\n IP = socket.gethostname()\r\n PORT = 4444\r\n global s, conn\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.bind((IP, PORT))\r\n s.listen(5)\r\n conn, addr = s.accept()\r\n\r\ndef cmd():\r\n try:\r\n while True:\r\n command = conn.recv(4000)\r\n command = command.decode()\r\n if command == (\"exit\"):\r\n conn.close()\r\n s.close()\r\n break\r\n elif (\"cd\") in command:\r\n command = str(command)\r\n dirs = command.split()[1]\r\n if len(dirs) > 2:\r\n p1 = dirs[1] + str(\" \")\r\n p2 = dirs[2]\r\n path = p1 + p2\r\n os.chdir(path)\r\n else:\r\n os.chdir(dirs)\r\n path = os.getcwd()\r\n send_args = (\"change to \" + path)\r\n conn.send(send_args.encode())\r\n elif (\"pwd\") in command:\r\n path = os.getcwd()\r\n conn.send(path.encode())\r\n else:\r\n execute = sb.check_output(f'{command}', shell=True)\r\n conn.send(execute)\r\n\r\n except ConnectionResetError:\r\n configuration()\r\n cmd()\r\n except ConnectionAbortedError:\r\n configuration()\r\n cmd()\r\n except ConnectionError:\r\n configuration()\r\n cmd()\r\n except BrokenPipeError:\r\n configuration()\r\n cmd()\r\n except sb.CalledProcessError:\r\n conn.send(str(f\"'{command}' is not recognized as an internal or external command,\\noperable program or batch file\").encode())\r\n cmd()\r\n\r\ndef active():\r\n configuration()\r\n cmd()",
"import os, sys\nimport socket\nimport subprocess as sb\n\n\ndef configuration():\n IP = socket.gethostname()\n PORT = 4444\n global s, conn\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((IP, PORT))\n s.listen(5)\n conn, addr = s.accept()\n\n\ndef cmd():\n try:\n while True:\n command = conn.recv(4000)\n command = command.decode()\n if command == 'exit':\n conn.close()\n s.close()\n break\n elif 'cd' in command:\n command = str(command)\n dirs = command.split()[1]\n if len(dirs) > 2:\n p1 = dirs[1] + str(' ')\n p2 = dirs[2]\n path = p1 + p2\n os.chdir(path)\n else:\n os.chdir(dirs)\n path = os.getcwd()\n send_args = 'change to ' + path\n conn.send(send_args.encode())\n elif 'pwd' in command:\n path = os.getcwd()\n conn.send(path.encode())\n else:\n execute = sb.check_output(f'{command}', shell=True)\n conn.send(execute)\n except ConnectionResetError:\n configuration()\n cmd()\n except ConnectionAbortedError:\n configuration()\n cmd()\n except ConnectionError:\n configuration()\n cmd()\n except BrokenPipeError:\n configuration()\n cmd()\n except sb.CalledProcessError:\n conn.send(str(\n f\"\"\"'{command}' is not recognized as an internal or external command,\noperable program or batch file\"\"\"\n ).encode())\n cmd()\n\n\ndef active():\n configuration()\n cmd()\n",
"<import token>\n\n\ndef configuration():\n IP = socket.gethostname()\n PORT = 4444\n global s, conn\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((IP, PORT))\n s.listen(5)\n conn, addr = s.accept()\n\n\ndef cmd():\n try:\n while True:\n command = conn.recv(4000)\n command = command.decode()\n if command == 'exit':\n conn.close()\n s.close()\n break\n elif 'cd' in command:\n command = str(command)\n dirs = command.split()[1]\n if len(dirs) > 2:\n p1 = dirs[1] + str(' ')\n p2 = dirs[2]\n path = p1 + p2\n os.chdir(path)\n else:\n os.chdir(dirs)\n path = os.getcwd()\n send_args = 'change to ' + path\n conn.send(send_args.encode())\n elif 'pwd' in command:\n path = os.getcwd()\n conn.send(path.encode())\n else:\n execute = sb.check_output(f'{command}', shell=True)\n conn.send(execute)\n except ConnectionResetError:\n configuration()\n cmd()\n except ConnectionAbortedError:\n configuration()\n cmd()\n except ConnectionError:\n configuration()\n cmd()\n except BrokenPipeError:\n configuration()\n cmd()\n except sb.CalledProcessError:\n conn.send(str(\n f\"\"\"'{command}' is not recognized as an internal or external command,\noperable program or batch file\"\"\"\n ).encode())\n cmd()\n\n\ndef active():\n configuration()\n cmd()\n",
"<import token>\n\n\ndef configuration():\n IP = socket.gethostname()\n PORT = 4444\n global s, conn\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((IP, PORT))\n s.listen(5)\n conn, addr = s.accept()\n\n\n<function token>\n\n\ndef active():\n configuration()\n cmd()\n",
"<import token>\n<function token>\n<function token>\n\n\ndef active():\n configuration()\n cmd()\n",
"<import token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,328 |
e55873d91e4cea2bfea4d0c44fdc581c09f612cc
|
"""
This file defines the database models
"""
import datetime
from . common import db, Field, auth
from pydal.validators import *
### Define your table below
#
# db.define_table('thing', Field('name'))
#
## always commit your models to avoid problems later
#
# db.commit()
#
def get_user_email():
return auth.current_user.get('email')
db.define_table(
'contact',
Field('first_name'),
Field('last_name'),
Field('user_email',default=get_user_email)
)
db.contact.id.readable = False
db.contact.user_email.readable = False
db.define_table(
'phone',
Field('contact_id', 'reference contact'),
Field('phone'),
Field('type')
)
db.phone.id.readable = False
db.phone.contact_id.readable = False
db.phone.contact_id.ondelete = 'CASCADE'
db.commit()
|
[
"\"\"\"\r\nThis file defines the database models\r\n\"\"\"\r\nimport datetime\r\n\r\nfrom . common import db, Field, auth\r\nfrom pydal.validators import *\r\n\r\n\r\n\r\n### Define your table below\r\n#\r\n# db.define_table('thing', Field('name'))\r\n#\r\n## always commit your models to avoid problems later\r\n#\r\n# db.commit()\r\n#\r\ndef get_user_email():\r\n return auth.current_user.get('email')\r\n\r\ndb.define_table(\r\n 'contact',\r\n Field('first_name'),\r\n Field('last_name'),\r\n Field('user_email',default=get_user_email)\r\n )\r\ndb.contact.id.readable = False\r\ndb.contact.user_email.readable = False\r\n\r\ndb.define_table(\r\n 'phone',\r\n Field('contact_id', 'reference contact'),\r\n Field('phone'),\r\n Field('type')\r\n)\r\n\r\ndb.phone.id.readable = False\r\ndb.phone.contact_id.readable = False\r\ndb.phone.contact_id.ondelete = 'CASCADE'\r\n\r\ndb.commit()\r\n",
"<docstring token>\nimport datetime\nfrom .common import db, Field, auth\nfrom pydal.validators import *\n\n\ndef get_user_email():\n return auth.current_user.get('email')\n\n\ndb.define_table('contact', Field('first_name'), Field('last_name'), Field(\n 'user_email', default=get_user_email))\ndb.contact.id.readable = False\ndb.contact.user_email.readable = False\ndb.define_table('phone', Field('contact_id', 'reference contact'), Field(\n 'phone'), Field('type'))\ndb.phone.id.readable = False\ndb.phone.contact_id.readable = False\ndb.phone.contact_id.ondelete = 'CASCADE'\ndb.commit()\n",
"<docstring token>\n<import token>\n\n\ndef get_user_email():\n return auth.current_user.get('email')\n\n\ndb.define_table('contact', Field('first_name'), Field('last_name'), Field(\n 'user_email', default=get_user_email))\ndb.contact.id.readable = False\ndb.contact.user_email.readable = False\ndb.define_table('phone', Field('contact_id', 'reference contact'), Field(\n 'phone'), Field('type'))\ndb.phone.id.readable = False\ndb.phone.contact_id.readable = False\ndb.phone.contact_id.ondelete = 'CASCADE'\ndb.commit()\n",
"<docstring token>\n<import token>\n\n\ndef get_user_email():\n return auth.current_user.get('email')\n\n\ndb.define_table('contact', Field('first_name'), Field('last_name'), Field(\n 'user_email', default=get_user_email))\n<assignment token>\ndb.define_table('phone', Field('contact_id', 'reference contact'), Field(\n 'phone'), Field('type'))\n<assignment token>\ndb.commit()\n",
"<docstring token>\n<import token>\n\n\ndef get_user_email():\n return auth.current_user.get('email')\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,329 |
c5dd8baab3e7ab3de71f5e74eedb5164733163ef
|
import argparse
import socket
from Crypto.Cipher import AES
from Crypto import Random
BLOCK_SIZE = 16
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * \
chr(BLOCK_SIZE - len(s) % BLOCK_SIZE)
unpad = lambda s: s[:-ord(s[len(s) - 1:])]
def do_decrypt(ciphertext):
iv = ciphertext[:16]
obj2 = AES.new('This is a key123', AES.MODE_CBC, iv)
message = obj2.decrypt(ciphertext[16:])
return unpad(message)
def do_encrypt(message):
message = pad(message)
iv = Random.new().read(AES.block_size)
obj = AES.new('This is a key123', AES.MODE_CBC, iv)
ciphertext = obj.encrypt(message)
print "IV: " + iv+ciphertext
return iv+ciphertext
def is_valid_ipv4_address(address):
try:
socket.inet_pton(socket.AF_INET, address)
except AttributeError: # no inet_pton here, sorry
try:
socket.inet_aton(address)
except socket.error:
return False
return address.count('.') == 3
except socket.error: # not a valid address
return False
return True
def client_socket(remote_ip, port, echo_string):
print "This is my client socket"
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client_socket.connect((remote_ip, port))
client_socket.send(do_encrypt(echo_string))
def server_socket(port):
print "This is my server socket"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(("0.0.0.0", port)) #MAX_SHORT ~65535
sock.listen(10)
while 1:
conn, addr = sock.accept()
a = conn.recv(1024)
print "Unencrypted Message: \n{0}".format(a)
a = do_decrypt(a)
print "Decrypted MEssage: \n{0}".format(a)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Client and Server template.")
parser.add_argument("-t", "--type_of_socket", type=str, help="The input for type of socket you want. Options: Client or Server")
parser.add_argument("-p", "--port", type=int, help="port number that you want hosted. Anything <= 1024 requires sudo requirements.")
parser.add_argument("-e", "--echo_string", type=str, help="prints this string")
parser.add_argument("-r", "--remote_address", type=str, help="The remote server client.")
args = parser.parse_args()
if not args.type_of_socket and args.port:
parser.print_help()
exit()
if "client" == args.type_of_socket.lower():
if args.remote_address and is_valid_ipv4_address(args.remote_address) and args.echo_string:
client_socket(args.remote_address, args.port, args.echo_string)
elif args.type_of_socket and "server" in args.type_of_socket.lower():
server_socket(args.port)
else:
parser.print_help()
|
[
"import argparse\nimport socket\nfrom Crypto.Cipher import AES\nfrom Crypto import Random\n\nBLOCK_SIZE = 16\n\npad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * \\\n chr(BLOCK_SIZE - len(s) % BLOCK_SIZE)\nunpad = lambda s: s[:-ord(s[len(s) - 1:])]\n\ndef do_decrypt(ciphertext):\n iv = ciphertext[:16]\n obj2 = AES.new('This is a key123', AES.MODE_CBC, iv)\n message = obj2.decrypt(ciphertext[16:])\n return unpad(message)\n\ndef do_encrypt(message):\n message = pad(message)\n iv = Random.new().read(AES.block_size)\n obj = AES.new('This is a key123', AES.MODE_CBC, iv)\n ciphertext = obj.encrypt(message)\n print \"IV: \" + iv+ciphertext\n return iv+ciphertext\n\n\ndef is_valid_ipv4_address(address):\n try:\n socket.inet_pton(socket.AF_INET, address)\n except AttributeError: # no inet_pton here, sorry\n try:\n socket.inet_aton(address)\n except socket.error:\n return False\n return address.count('.') == 3\n except socket.error: # not a valid address\n return False\n\n return True\n\ndef client_socket(remote_ip, port, echo_string):\n print \"This is my client socket\"\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client_socket.connect((remote_ip, port))\n client_socket.send(do_encrypt(echo_string))\n\ndef server_socket(port):\n print \"This is my server socket\"\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind((\"0.0.0.0\", port)) #MAX_SHORT ~65535\n sock.listen(10)\n while 1:\n conn, addr = sock.accept()\n a = conn.recv(1024)\n print \"Unencrypted Message: \\n{0}\".format(a)\n a = do_decrypt(a)\n print \"Decrypted MEssage: \\n{0}\".format(a)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Client and Server template.\")\n parser.add_argument(\"-t\", \"--type_of_socket\", type=str, help=\"The input for type of socket you want. Options: Client or Server\")\n parser.add_argument(\"-p\", \"--port\", type=int, help=\"port number that you want hosted. Anything <= 1024 requires sudo requirements.\")\n parser.add_argument(\"-e\", \"--echo_string\", type=str, help=\"prints this string\")\n parser.add_argument(\"-r\", \"--remote_address\", type=str, help=\"The remote server client.\")\n args = parser.parse_args()\n if not args.type_of_socket and args.port:\n parser.print_help()\n exit()\n if \"client\" == args.type_of_socket.lower():\n if args.remote_address and is_valid_ipv4_address(args.remote_address) and args.echo_string:\n client_socket(args.remote_address, args.port, args.echo_string)\n elif args.type_of_socket and \"server\" in args.type_of_socket.lower():\n server_socket(args.port)\n else:\n parser.print_help()\n\n"
] | true |
99,330 |
02f713322fbed26f60725687ea2735829b4c6bbc
|
with open("test.txt","w") as file:
file.write("essai")
file.close()
def auteurs():
auteur = input("Indiquez le prénom et nom de l'auteur : ").title()
entreprise = input("Indiquez son entreprise : ").capitalize()
# auteur_complet = str(f"{auteur}. {entreprise}")
return f"{auteur}. {entreprise}"
def prerequis():
pre_info = input("notez votre pré-requis : ")
prerequis_infos = (f"- <h2>{pre_info}</h2></b><br>")
return prerequis_infos
def compte(variable, func):
print(f"Combien {variable} avez-vous à notifier? Tapez un nombre à partir de 0 : ")
number = int(input())
i = 1
try:
while i <= number:
i += 1
response = func()
with open("test.txt","a") as file:
file.write(response)
file.close()
except OSError:
print("ça ne fonctionne pas")
compte("auteurs", auteurs)
|
[
"with open(\"test.txt\",\"w\") as file:\n file.write(\"essai\")\n file.close()\n \ndef auteurs():\n auteur = input(\"Indiquez le prénom et nom de l'auteur : \").title()\n entreprise = input(\"Indiquez son entreprise : \").capitalize()\n # auteur_complet = str(f\"{auteur}. {entreprise}\")\n return f\"{auteur}. {entreprise}\"\n \ndef prerequis():\n pre_info = input(\"notez votre pré-requis : \")\n prerequis_infos = (f\"- <h2>{pre_info}</h2></b><br>\")\n return prerequis_infos\n\ndef compte(variable, func):\n print(f\"Combien {variable} avez-vous à notifier? Tapez un nombre à partir de 0 : \")\n number = int(input())\n i = 1\n try:\n while i <= number:\n i += 1\n response = func()\n with open(\"test.txt\",\"a\") as file:\n file.write(response)\n file.close()\n except OSError:\n print(\"ça ne fonctionne pas\")\n\n \ncompte(\"auteurs\", auteurs)\n ",
"with open('test.txt', 'w') as file:\n file.write('essai')\n file.close()\n\n\ndef auteurs():\n auteur = input(\"Indiquez le prénom et nom de l'auteur : \").title()\n entreprise = input('Indiquez son entreprise : ').capitalize()\n return f'{auteur}. {entreprise}'\n\n\ndef prerequis():\n pre_info = input('notez votre pré-requis : ')\n prerequis_infos = f'- <h2>{pre_info}</h2></b><br>'\n return prerequis_infos\n\n\ndef compte(variable, func):\n print(\n f'Combien {variable} avez-vous à notifier? Tapez un nombre à partir de 0 : '\n )\n number = int(input())\n i = 1\n try:\n while i <= number:\n i += 1\n response = func()\n with open('test.txt', 'a') as file:\n file.write(response)\n file.close()\n except OSError:\n print('ça ne fonctionne pas')\n\n\ncompte('auteurs', auteurs)\n",
"<code token>\n\n\ndef auteurs():\n auteur = input(\"Indiquez le prénom et nom de l'auteur : \").title()\n entreprise = input('Indiquez son entreprise : ').capitalize()\n return f'{auteur}. {entreprise}'\n\n\ndef prerequis():\n pre_info = input('notez votre pré-requis : ')\n prerequis_infos = f'- <h2>{pre_info}</h2></b><br>'\n return prerequis_infos\n\n\ndef compte(variable, func):\n print(\n f'Combien {variable} avez-vous à notifier? Tapez un nombre à partir de 0 : '\n )\n number = int(input())\n i = 1\n try:\n while i <= number:\n i += 1\n response = func()\n with open('test.txt', 'a') as file:\n file.write(response)\n file.close()\n except OSError:\n print('ça ne fonctionne pas')\n\n\n<code token>\n",
"<code token>\n\n\ndef auteurs():\n auteur = input(\"Indiquez le prénom et nom de l'auteur : \").title()\n entreprise = input('Indiquez son entreprise : ').capitalize()\n return f'{auteur}. {entreprise}'\n\n\n<function token>\n\n\ndef compte(variable, func):\n print(\n f'Combien {variable} avez-vous à notifier? Tapez un nombre à partir de 0 : '\n )\n number = int(input())\n i = 1\n try:\n while i <= number:\n i += 1\n response = func()\n with open('test.txt', 'a') as file:\n file.write(response)\n file.close()\n except OSError:\n print('ça ne fonctionne pas')\n\n\n<code token>\n",
"<code token>\n<function token>\n<function token>\n\n\ndef compte(variable, func):\n print(\n f'Combien {variable} avez-vous à notifier? Tapez un nombre à partir de 0 : '\n )\n number = int(input())\n i = 1\n try:\n while i <= number:\n i += 1\n response = func()\n with open('test.txt', 'a') as file:\n file.write(response)\n file.close()\n except OSError:\n print('ça ne fonctionne pas')\n\n\n<code token>\n",
"<code token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,331 |
9f45745a825c30d5a89776077ac0961d0c8ea4cf
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import torch.nn as nn
from lib.models.backbones.resnet.resnet_models import ResNetModels
class NormalResnetBackbone(nn.Module):
def __init__(self, orig_resnet):
super(NormalResnetBackbone, self).__init__()
self.num_features = 2048
# take pretrained resnet, except AvgPool and FC
self.prefix = orig_resnet.prefix
self.maxpool = orig_resnet.maxpool
self.layer1 = orig_resnet.layer1
self.layer2 = orig_resnet.layer2
self.layer3 = orig_resnet.layer3
self.layer4 = orig_resnet.layer4
def get_num_features(self):
return self.num_features
def forward(self, x):
x = self.prefix(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
class ResNetBackbone(object):
def __init__(self, configer):
self.configer = configer
self.resnet_models = ResNetModels(self.configer)
def __call__(self, arch=None, pretrained_model=None, **kwargs):
if arch == 'resnet18':
orig_resnet = self.resnet_models.resnet18(pretrained=pretrained_model, **kwargs)
arch_net = NormalResnetBackbone(orig_resnet)
arch_net.num_features = 512
elif arch == 'resnet34':
orig_resnet = self.resnet_models.resnet34(pretrained=pretrained_model, **kwargs)
arch_net = NormalResnetBackbone(orig_resnet)
arch_net.num_features = 512
elif arch == 'resnet50':
orig_resnet = self.resnet_models.resnet50(pretrained=pretrained_model, **kwargs)
arch_net = NormalResnetBackbone(orig_resnet)
elif arch == 'resnet101':
orig_resnet = self.resnet_models.resnet101(pretrained=pretrained_model, **kwargs)
arch_net = NormalResnetBackbone(orig_resnet)
elif arch == 'resnet152':
orig_resnet = self.resnet_models.resnet152(pretrained=pretrained_model, **kwargs)
arch_net = NormalResnetBackbone(orig_resnet)
else:
raise Exception('Architecture undefined!')
return arch_net
|
[
"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n\nimport torch.nn as nn\n\nfrom lib.models.backbones.resnet.resnet_models import ResNetModels\n\n\nclass NormalResnetBackbone(nn.Module):\n def __init__(self, orig_resnet):\n super(NormalResnetBackbone, self).__init__()\n\n self.num_features = 2048\n # take pretrained resnet, except AvgPool and FC\n self.prefix = orig_resnet.prefix\n self.maxpool = orig_resnet.maxpool\n self.layer1 = orig_resnet.layer1\n self.layer2 = orig_resnet.layer2\n self.layer3 = orig_resnet.layer3\n self.layer4 = orig_resnet.layer4\n\n def get_num_features(self):\n return self.num_features\n\n def forward(self, x):\n x = self.prefix(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\n\nclass ResNetBackbone(object):\n def __init__(self, configer):\n self.configer = configer\n self.resnet_models = ResNetModels(self.configer)\n\n def __call__(self, arch=None, pretrained_model=None, **kwargs):\n if arch == 'resnet18':\n orig_resnet = self.resnet_models.resnet18(pretrained=pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n\n elif arch == 'resnet34':\n orig_resnet = self.resnet_models.resnet34(pretrained=pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n\n elif arch == 'resnet50':\n orig_resnet = self.resnet_models.resnet50(pretrained=pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n\n elif arch == 'resnet101':\n orig_resnet = self.resnet_models.resnet101(pretrained=pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n\n elif arch == 'resnet152':\n orig_resnet = self.resnet_models.resnet152(pretrained=pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n\n else:\n raise Exception('Architecture undefined!')\n\n return arch_net\n",
"import torch.nn as nn\nfrom lib.models.backbones.resnet.resnet_models import ResNetModels\n\n\nclass NormalResnetBackbone(nn.Module):\n\n def __init__(self, orig_resnet):\n super(NormalResnetBackbone, self).__init__()\n self.num_features = 2048\n self.prefix = orig_resnet.prefix\n self.maxpool = orig_resnet.maxpool\n self.layer1 = orig_resnet.layer1\n self.layer2 = orig_resnet.layer2\n self.layer3 = orig_resnet.layer3\n self.layer4 = orig_resnet.layer4\n\n def get_num_features(self):\n return self.num_features\n\n def forward(self, x):\n x = self.prefix(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\n\nclass ResNetBackbone(object):\n\n def __init__(self, configer):\n self.configer = configer\n self.resnet_models = ResNetModels(self.configer)\n\n def __call__(self, arch=None, pretrained_model=None, **kwargs):\n if arch == 'resnet18':\n orig_resnet = self.resnet_models.resnet18(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet34':\n orig_resnet = self.resnet_models.resnet34(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet50':\n orig_resnet = self.resnet_models.resnet50(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet101':\n orig_resnet = self.resnet_models.resnet101(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet152':\n orig_resnet = self.resnet_models.resnet152(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n else:\n raise Exception('Architecture undefined!')\n return arch_net\n",
"<import token>\n\n\nclass NormalResnetBackbone(nn.Module):\n\n def __init__(self, orig_resnet):\n super(NormalResnetBackbone, self).__init__()\n self.num_features = 2048\n self.prefix = orig_resnet.prefix\n self.maxpool = orig_resnet.maxpool\n self.layer1 = orig_resnet.layer1\n self.layer2 = orig_resnet.layer2\n self.layer3 = orig_resnet.layer3\n self.layer4 = orig_resnet.layer4\n\n def get_num_features(self):\n return self.num_features\n\n def forward(self, x):\n x = self.prefix(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\n\nclass ResNetBackbone(object):\n\n def __init__(self, configer):\n self.configer = configer\n self.resnet_models = ResNetModels(self.configer)\n\n def __call__(self, arch=None, pretrained_model=None, **kwargs):\n if arch == 'resnet18':\n orig_resnet = self.resnet_models.resnet18(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet34':\n orig_resnet = self.resnet_models.resnet34(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet50':\n orig_resnet = self.resnet_models.resnet50(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet101':\n orig_resnet = self.resnet_models.resnet101(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet152':\n orig_resnet = self.resnet_models.resnet152(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n else:\n raise Exception('Architecture undefined!')\n return arch_net\n",
"<import token>\n\n\nclass NormalResnetBackbone(nn.Module):\n\n def __init__(self, orig_resnet):\n super(NormalResnetBackbone, self).__init__()\n self.num_features = 2048\n self.prefix = orig_resnet.prefix\n self.maxpool = orig_resnet.maxpool\n self.layer1 = orig_resnet.layer1\n self.layer2 = orig_resnet.layer2\n self.layer3 = orig_resnet.layer3\n self.layer4 = orig_resnet.layer4\n <function token>\n\n def forward(self, x):\n x = self.prefix(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\n\nclass ResNetBackbone(object):\n\n def __init__(self, configer):\n self.configer = configer\n self.resnet_models = ResNetModels(self.configer)\n\n def __call__(self, arch=None, pretrained_model=None, **kwargs):\n if arch == 'resnet18':\n orig_resnet = self.resnet_models.resnet18(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet34':\n orig_resnet = self.resnet_models.resnet34(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet50':\n orig_resnet = self.resnet_models.resnet50(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet101':\n orig_resnet = self.resnet_models.resnet101(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet152':\n orig_resnet = self.resnet_models.resnet152(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n else:\n raise Exception('Architecture undefined!')\n return arch_net\n",
"<import token>\n\n\nclass NormalResnetBackbone(nn.Module):\n <function token>\n <function token>\n\n def forward(self, x):\n x = self.prefix(x)\n x = self.maxpool(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n return x\n\n\nclass ResNetBackbone(object):\n\n def __init__(self, configer):\n self.configer = configer\n self.resnet_models = ResNetModels(self.configer)\n\n def __call__(self, arch=None, pretrained_model=None, **kwargs):\n if arch == 'resnet18':\n orig_resnet = self.resnet_models.resnet18(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet34':\n orig_resnet = self.resnet_models.resnet34(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet50':\n orig_resnet = self.resnet_models.resnet50(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet101':\n orig_resnet = self.resnet_models.resnet101(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet152':\n orig_resnet = self.resnet_models.resnet152(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n else:\n raise Exception('Architecture undefined!')\n return arch_net\n",
"<import token>\n\n\nclass NormalResnetBackbone(nn.Module):\n <function token>\n <function token>\n <function token>\n\n\nclass ResNetBackbone(object):\n\n def __init__(self, configer):\n self.configer = configer\n self.resnet_models = ResNetModels(self.configer)\n\n def __call__(self, arch=None, pretrained_model=None, **kwargs):\n if arch == 'resnet18':\n orig_resnet = self.resnet_models.resnet18(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet34':\n orig_resnet = self.resnet_models.resnet34(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet50':\n orig_resnet = self.resnet_models.resnet50(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet101':\n orig_resnet = self.resnet_models.resnet101(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet152':\n orig_resnet = self.resnet_models.resnet152(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n else:\n raise Exception('Architecture undefined!')\n return arch_net\n",
"<import token>\n<class token>\n\n\nclass ResNetBackbone(object):\n\n def __init__(self, configer):\n self.configer = configer\n self.resnet_models = ResNetModels(self.configer)\n\n def __call__(self, arch=None, pretrained_model=None, **kwargs):\n if arch == 'resnet18':\n orig_resnet = self.resnet_models.resnet18(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet34':\n orig_resnet = self.resnet_models.resnet34(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n arch_net.num_features = 512\n elif arch == 'resnet50':\n orig_resnet = self.resnet_models.resnet50(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet101':\n orig_resnet = self.resnet_models.resnet101(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n elif arch == 'resnet152':\n orig_resnet = self.resnet_models.resnet152(pretrained=\n pretrained_model, **kwargs)\n arch_net = NormalResnetBackbone(orig_resnet)\n else:\n raise Exception('Architecture undefined!')\n return arch_net\n",
"<import token>\n<class token>\n\n\nclass ResNetBackbone(object):\n\n def __init__(self, configer):\n self.configer = configer\n self.resnet_models = ResNetModels(self.configer)\n <function token>\n",
"<import token>\n<class token>\n\n\nclass ResNetBackbone(object):\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
99,332 |
3dbe8718520222c8baa8db0cab55bbacc6eac80a
|
# Creates a Brainfuck program that outputs some input string; NOT *FULLY* optimal (yet...); Recursive solution
E=lambda c,j=[],l='':len(c)>0 and E(c[1:],j+[min(''.join(min([(sum([ord(c[0])/j,j]),'>'+'+'*(abs(ord(l or'\x00')-ord(c[0]))//j)+'['+'<'+'-+'[l==''or l<c[0]]*j+'>-]<.')for j in range(1,-~abs(ord(l or'\x00')-ord(c[0])))if abs(ord(l or'\x00')-ord(c[0]))/j==abs(ord(l or'\x00')-ord(c[0]))//j]or[(0,'.')],key=lambda g:g[0])[1:]),'-+'[ord(l or'\x00')<ord(c[0])]*(abs(ord(l or'\x00')-ord(c[0])))+'.',key=len)],c[0])or''.join(j)
|
[
"# Creates a Brainfuck program that outputs some input string; NOT *FULLY* optimal (yet...); Recursive solution\nE=lambda c,j=[],l='':len(c)>0 and E(c[1:],j+[min(''.join(min([(sum([ord(c[0])/j,j]),'>'+'+'*(abs(ord(l or'\\x00')-ord(c[0]))//j)+'['+'<'+'-+'[l==''or l<c[0]]*j+'>-]<.')for j in range(1,-~abs(ord(l or'\\x00')-ord(c[0])))if abs(ord(l or'\\x00')-ord(c[0]))/j==abs(ord(l or'\\x00')-ord(c[0]))//j]or[(0,'.')],key=lambda g:g[0])[1:]),'-+'[ord(l or'\\x00')<ord(c[0])]*(abs(ord(l or'\\x00')-ord(c[0])))+'.',key=len)],c[0])or''.join(j)\n\n\n",
"E = lambda c, j=[], l='': len(c) > 0 and E(c[1:], j + [min(''.join(min([(\n sum([ord(c[0]) / j, j]), '>' + '+' * (abs(ord(l or '\\x00') - ord(c[0])) //\n j) + '[' + '<' + '-+'[l == '' or l < c[0]] * j + '>-]<.') for j in\n range(1, -~abs(ord(l or '\\x00') - ord(c[0]))) if abs(ord(l or '\\x00') -\n ord(c[0])) / j == abs(ord(l or '\\x00') - ord(c[0])) // j] or [(0, '.')],\n key=lambda g: g[0])[1:]), '-+'[ord(l or '\\x00') < ord(c[0])] * abs(ord(\n l or '\\x00') - ord(c[0])) + '.', key=len)], c[0]) or ''.join(j)\n",
"<assignment token>\n"
] | false |
99,333 |
dde5351c31e535e06bf62d927bddba427c6b66b3
|
# BJ 3019
# 구현하게 쉽게 기능을 분할해라
# 높이 차가 맞아야 한다.
def calc(i, s):
if i + len(s) > c:
return 0
base = a[i] - (ord(s[0]) - ord('0'))
for j in range(len(s)):
if base != a[i + j] - (ord(s[j]) - ord('0')):
return 0
return 1
c, p = map(int, input().split())
a = list(map(int, input().split()))
ans = 0
# 모든 자리에 놓아본다.
for i in range(c):
# case별 문자열로 표현하는 스킬
if p == 1:
ans += calc(i, "0") + calc(i, "0000") # 바닥만 문자열로 간단하게!
elif p == 2:
ans += calc(i, "00")
elif p == 3:
ans += calc(i, "001") + calc(i, "10")
elif p == 4:
ans += calc(i, "100") + calc(i, "01")
elif p == 5:
ans += calc(i, "000") + calc(i, "01") + calc(i, "101") + calc(i, "10")
elif p == 6:
ans += calc(i, "000") + calc(i, "00") + calc(i, "011") + calc(i, "20")
elif p == 7:
ans += calc(i, "000") + calc(i, "00") + calc(i, "110") + calc(i, "02")
print(ans)
""" 이건 실수할 가능성이 높다.
tet = [
[],
[(1, 1, 1, 1)], # 예외처리: l 인경우는 무조건 성공
[(1, 1)],
[(1, 1, 2), (2, 1)],
[(2, 1, 1), (1, 2)],
[(1, 1, 1), (1, 2), (2, 1, 2), (2, 1)],
[(1, 1, 1), (1, 1), (1, 2, 2), (3, 1)],
[(1, 1, 1), (1, 3), (2, 2, 1), (1, 1)]
]
# 모든 자리에 놓아본다.
for idx in range(c - 1):
# 각 케이스별로 가능한지 체크
for case in tet[p]:
length = len(case)
if idx + length > c: # 범위 넘어감
continue
# 높이 차가 맞아야 한다.
for i in range(length - 1):
if case[i] - case[i + 1] != a[idx + i] - a[idx + i + 1]:
break
else:
ans += 1
if p == 1:
ans += c
"""
|
[
"# BJ 3019\n# 구현하게 쉽게 기능을 분할해라\n# 높이 차가 맞아야 한다.\n\ndef calc(i, s):\n if i + len(s) > c:\n return 0\n base = a[i] - (ord(s[0]) - ord('0'))\n for j in range(len(s)):\n if base != a[i + j] - (ord(s[j]) - ord('0')):\n return 0\n return 1\n\n\nc, p = map(int, input().split())\na = list(map(int, input().split()))\nans = 0\n# 모든 자리에 놓아본다.\nfor i in range(c):\n # case별 문자열로 표현하는 스킬\n if p == 1:\n ans += calc(i, \"0\") + calc(i, \"0000\") # 바닥만 문자열로 간단하게!\n elif p == 2:\n ans += calc(i, \"00\")\n elif p == 3:\n ans += calc(i, \"001\") + calc(i, \"10\")\n elif p == 4:\n ans += calc(i, \"100\") + calc(i, \"01\")\n elif p == 5:\n ans += calc(i, \"000\") + calc(i, \"01\") + calc(i, \"101\") + calc(i, \"10\")\n elif p == 6:\n ans += calc(i, \"000\") + calc(i, \"00\") + calc(i, \"011\") + calc(i, \"20\")\n elif p == 7:\n ans += calc(i, \"000\") + calc(i, \"00\") + calc(i, \"110\") + calc(i, \"02\")\nprint(ans)\n\"\"\" 이건 실수할 가능성이 높다.\ntet = [\n [],\n [(1, 1, 1, 1)], # 예외처리: l 인경우는 무조건 성공\n [(1, 1)],\n [(1, 1, 2), (2, 1)],\n [(2, 1, 1), (1, 2)],\n [(1, 1, 1), (1, 2), (2, 1, 2), (2, 1)],\n [(1, 1, 1), (1, 1), (1, 2, 2), (3, 1)],\n [(1, 1, 1), (1, 3), (2, 2, 1), (1, 1)]\n]\n\n# 모든 자리에 놓아본다.\nfor idx in range(c - 1):\n # 각 케이스별로 가능한지 체크\n for case in tet[p]:\n length = len(case)\n if idx + length > c: # 범위 넘어감\n continue\n # 높이 차가 맞아야 한다.\n for i in range(length - 1):\n if case[i] - case[i + 1] != a[idx + i] - a[idx + i + 1]:\n break\n else:\n ans += 1\n\nif p == 1:\n ans += c\n\"\"\"\n",
"def calc(i, s):\n if i + len(s) > c:\n return 0\n base = a[i] - (ord(s[0]) - ord('0'))\n for j in range(len(s)):\n if base != a[i + j] - (ord(s[j]) - ord('0')):\n return 0\n return 1\n\n\nc, p = map(int, input().split())\na = list(map(int, input().split()))\nans = 0\nfor i in range(c):\n if p == 1:\n ans += calc(i, '0') + calc(i, '0000')\n elif p == 2:\n ans += calc(i, '00')\n elif p == 3:\n ans += calc(i, '001') + calc(i, '10')\n elif p == 4:\n ans += calc(i, '100') + calc(i, '01')\n elif p == 5:\n ans += calc(i, '000') + calc(i, '01') + calc(i, '101') + calc(i, '10')\n elif p == 6:\n ans += calc(i, '000') + calc(i, '00') + calc(i, '011') + calc(i, '20')\n elif p == 7:\n ans += calc(i, '000') + calc(i, '00') + calc(i, '110') + calc(i, '02')\nprint(ans)\n<docstring token>\n",
"def calc(i, s):\n if i + len(s) > c:\n return 0\n base = a[i] - (ord(s[0]) - ord('0'))\n for j in range(len(s)):\n if base != a[i + j] - (ord(s[j]) - ord('0')):\n return 0\n return 1\n\n\n<assignment token>\nfor i in range(c):\n if p == 1:\n ans += calc(i, '0') + calc(i, '0000')\n elif p == 2:\n ans += calc(i, '00')\n elif p == 3:\n ans += calc(i, '001') + calc(i, '10')\n elif p == 4:\n ans += calc(i, '100') + calc(i, '01')\n elif p == 5:\n ans += calc(i, '000') + calc(i, '01') + calc(i, '101') + calc(i, '10')\n elif p == 6:\n ans += calc(i, '000') + calc(i, '00') + calc(i, '011') + calc(i, '20')\n elif p == 7:\n ans += calc(i, '000') + calc(i, '00') + calc(i, '110') + calc(i, '02')\nprint(ans)\n<docstring token>\n",
"def calc(i, s):\n if i + len(s) > c:\n return 0\n base = a[i] - (ord(s[0]) - ord('0'))\n for j in range(len(s)):\n if base != a[i + j] - (ord(s[j]) - ord('0')):\n return 0\n return 1\n\n\n<assignment token>\n<code token>\n<docstring token>\n",
"<function token>\n<assignment token>\n<code token>\n<docstring token>\n"
] | false |
99,334 |
9db08c80a4744b67a201f0d5edf3b3f52c40dd4e
|
from django.db import models
class Company(models.Model):
name = models.CharField(max_length=100, null=True)
address = models.CharField(max_length=100, null=True)
directors = models.ManyToManyField("Director", related_name="directors")
siren = models.IntegerField()
def __repr__(self):
return self.name
def __str__(self):
return self.__repr__()
class Director(models.Model):
name = models.CharField(max_length=100, null=True)
date_of_birth = models.CharField(max_length=100, null=True)
companies = models.ManyToManyField("Company", related_name="companies")
def __repr__(self):
return self.name
def __str__(self):
return self.__repr__()
|
[
"from django.db import models\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=100, null=True)\n address = models.CharField(max_length=100, null=True)\n directors = models.ManyToManyField(\"Director\", related_name=\"directors\")\n siren = models.IntegerField()\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n\n\nclass Director(models.Model):\n name = models.CharField(max_length=100, null=True)\n date_of_birth = models.CharField(max_length=100, null=True)\n companies = models.ManyToManyField(\"Company\", related_name=\"companies\")\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n",
"from django.db import models\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=100, null=True)\n address = models.CharField(max_length=100, null=True)\n directors = models.ManyToManyField('Director', related_name='directors')\n siren = models.IntegerField()\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n\n\nclass Director(models.Model):\n name = models.CharField(max_length=100, null=True)\n date_of_birth = models.CharField(max_length=100, null=True)\n companies = models.ManyToManyField('Company', related_name='companies')\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n",
"<import token>\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=100, null=True)\n address = models.CharField(max_length=100, null=True)\n directors = models.ManyToManyField('Director', related_name='directors')\n siren = models.IntegerField()\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n\n\nclass Director(models.Model):\n name = models.CharField(max_length=100, null=True)\n date_of_birth = models.CharField(max_length=100, null=True)\n companies = models.ManyToManyField('Company', related_name='companies')\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n",
"<import token>\n\n\nclass Company(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n\n\nclass Director(models.Model):\n name = models.CharField(max_length=100, null=True)\n date_of_birth = models.CharField(max_length=100, null=True)\n companies = models.ManyToManyField('Company', related_name='companies')\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n",
"<import token>\n\n\nclass Company(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def __str__(self):\n return self.__repr__()\n\n\nclass Director(models.Model):\n name = models.CharField(max_length=100, null=True)\n date_of_birth = models.CharField(max_length=100, null=True)\n companies = models.ManyToManyField('Company', related_name='companies')\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n",
"<import token>\n\n\nclass Company(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n\nclass Director(models.Model):\n name = models.CharField(max_length=100, null=True)\n date_of_birth = models.CharField(max_length=100, null=True)\n companies = models.ManyToManyField('Company', related_name='companies')\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n",
"<import token>\n<class token>\n\n\nclass Director(models.Model):\n name = models.CharField(max_length=100, null=True)\n date_of_birth = models.CharField(max_length=100, null=True)\n companies = models.ManyToManyField('Company', related_name='companies')\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n",
"<import token>\n<class token>\n\n\nclass Director(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.__repr__()\n",
"<import token>\n<class token>\n\n\nclass Director(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def __str__(self):\n return self.__repr__()\n",
"<import token>\n<class token>\n\n\nclass Director(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
99,335 |
114348377f847d0b511e30aae79d98b876eb49ae
|
from main import app
from models import db, Ingredients
import csv
db.create_all(app=app)
with open('ingrediants.csv', newline='') as csvfile:
reader = csv.DictReader(csvfile, delimiter= ',')
for row in reader:
if row['item'] == '':
row['item'] = None
if row['price'] == '':
row['price'] = None
ingrediants = Ingredients(
item= row['item'],
price= row['price']
)
db.session.add(item)
db.session.commit()
print('database initialized!')
|
[
"from main import app\nfrom models import db, Ingredients\nimport csv\n\ndb.create_all(app=app)\n\nwith open('ingrediants.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile, delimiter= ',')\n\n for row in reader:\n if row['item'] == '':\n row['item'] = None\n if row['price'] == '':\n row['price'] = None\n\n ingrediants = Ingredients(\n item= row['item'],\n price= row['price']\n )\n db.session.add(item)\n db.session.commit()\nprint('database initialized!')",
"from main import app\nfrom models import db, Ingredients\nimport csv\ndb.create_all(app=app)\nwith open('ingrediants.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n for row in reader:\n if row['item'] == '':\n row['item'] = None\n if row['price'] == '':\n row['price'] = None\n ingrediants = Ingredients(item=row['item'], price=row['price'])\n db.session.add(item)\n db.session.commit()\nprint('database initialized!')\n",
"<import token>\ndb.create_all(app=app)\nwith open('ingrediants.csv', newline='') as csvfile:\n reader = csv.DictReader(csvfile, delimiter=',')\n for row in reader:\n if row['item'] == '':\n row['item'] = None\n if row['price'] == '':\n row['price'] = None\n ingrediants = Ingredients(item=row['item'], price=row['price'])\n db.session.add(item)\n db.session.commit()\nprint('database initialized!')\n",
"<import token>\n<code token>\n"
] | false |
99,336 |
ce0cffd9083c2640833e3b519b868a2914632546
|
'''
8. En Años anteriores, se necesitaba una función en python que reciba un texto conteniendo bits (simbolos 1 y 0),
y debia armar una lista conteniendo 8 bits por elementos (1 byte). Por ejemplo, si se incova la funcion con el
siguiente texto como parámetro: "1001010101000101010101100101001010101010"
la funcion devuelve: ['10010101', '01000101', '01010110', '01010010', '10101010']
El programador de ese momento armó el siguiente código:
'''
#Definicion de las funciones
def validacion(texto):
"""
Permite validar el texto binario
:param texto: String de numeros binarios
:return: Un bolleano con True or False
"""
bandera = False
for caracter in texto:
if caracter != '0' and caracter != '1':
bandera = True
if bandera == True:
print("El texto ingresado no es binario")
return bandera
def ej08a(texto):
"""Arma una lista de bytes acorde al texto recibido por parametro"""
indice = 0
resultado = []
current_byte = ""
for i in texto:
current_byte += i # se agrega el nuevo caracter al byte actual
indice += 1 # se incrementa en uno el indice
if indice % 8 == 0:
# Comienza un nuevo byte
resultado.append(current_byte)
current_byte = ""
return resultado
#Cuerpo del programa
texto = "111101010100010101010110010100101011101"
while validacion(texto) != False: #Aqui valido el texto, y si no es valido solicito se ingrese uno válido
texto = input('Ingrese un texto binario: ')
print(ej08a(texto))
|
[
"'''\n8. En Años anteriores, se necesitaba una función en python que reciba un texto conteniendo bits (simbolos 1 y 0),\n y debia armar una lista conteniendo 8 bits por elementos (1 byte). Por ejemplo, si se incova la funcion con el\n siguiente texto como parámetro: \"1001010101000101010101100101001010101010\"\nla funcion devuelve: ['10010101', '01000101', '01010110', '01010010', '10101010']\n\nEl programador de ese momento armó el siguiente código:\n'''\n\n#Definicion de las funciones\ndef validacion(texto):\n \"\"\"\n Permite validar el texto binario\n :param texto: String de numeros binarios\n :return: Un bolleano con True or False\n \"\"\"\n bandera = False\n for caracter in texto:\n if caracter != '0' and caracter != '1':\n bandera = True\n if bandera == True:\n print(\"El texto ingresado no es binario\")\n return bandera\n\ndef ej08a(texto):\n \"\"\"Arma una lista de bytes acorde al texto recibido por parametro\"\"\"\n indice = 0\n resultado = []\n current_byte = \"\"\n\n for i in texto:\n current_byte += i # se agrega el nuevo caracter al byte actual\n indice += 1 # se incrementa en uno el indice\n if indice % 8 == 0:\n # Comienza un nuevo byte\n resultado.append(current_byte)\n current_byte = \"\"\n return resultado\n\n#Cuerpo del programa\ntexto = \"111101010100010101010110010100101011101\"\n\n\nwhile validacion(texto) != False: #Aqui valido el texto, y si no es valido solicito se ingrese uno válido\n texto = input('Ingrese un texto binario: ')\n\nprint(ej08a(texto))\n\n",
"<docstring token>\n\n\ndef validacion(texto):\n \"\"\"\n Permite validar el texto binario\n :param texto: String de numeros binarios\n :return: Un bolleano con True or False\n \"\"\"\n bandera = False\n for caracter in texto:\n if caracter != '0' and caracter != '1':\n bandera = True\n if bandera == True:\n print('El texto ingresado no es binario')\n return bandera\n\n\ndef ej08a(texto):\n \"\"\"Arma una lista de bytes acorde al texto recibido por parametro\"\"\"\n indice = 0\n resultado = []\n current_byte = ''\n for i in texto:\n current_byte += i\n indice += 1\n if indice % 8 == 0:\n resultado.append(current_byte)\n current_byte = ''\n return resultado\n\n\ntexto = '111101010100010101010110010100101011101'\nwhile validacion(texto) != False:\n texto = input('Ingrese un texto binario: ')\nprint(ej08a(texto))\n",
"<docstring token>\n\n\ndef validacion(texto):\n \"\"\"\n Permite validar el texto binario\n :param texto: String de numeros binarios\n :return: Un bolleano con True or False\n \"\"\"\n bandera = False\n for caracter in texto:\n if caracter != '0' and caracter != '1':\n bandera = True\n if bandera == True:\n print('El texto ingresado no es binario')\n return bandera\n\n\ndef ej08a(texto):\n \"\"\"Arma una lista de bytes acorde al texto recibido por parametro\"\"\"\n indice = 0\n resultado = []\n current_byte = ''\n for i in texto:\n current_byte += i\n indice += 1\n if indice % 8 == 0:\n resultado.append(current_byte)\n current_byte = ''\n return resultado\n\n\n<assignment token>\nwhile validacion(texto) != False:\n texto = input('Ingrese un texto binario: ')\nprint(ej08a(texto))\n",
"<docstring token>\n\n\ndef validacion(texto):\n \"\"\"\n Permite validar el texto binario\n :param texto: String de numeros binarios\n :return: Un bolleano con True or False\n \"\"\"\n bandera = False\n for caracter in texto:\n if caracter != '0' and caracter != '1':\n bandera = True\n if bandera == True:\n print('El texto ingresado no es binario')\n return bandera\n\n\ndef ej08a(texto):\n \"\"\"Arma una lista de bytes acorde al texto recibido por parametro\"\"\"\n indice = 0\n resultado = []\n current_byte = ''\n for i in texto:\n current_byte += i\n indice += 1\n if indice % 8 == 0:\n resultado.append(current_byte)\n current_byte = ''\n return resultado\n\n\n<assignment token>\n<code token>\n",
"<docstring token>\n\n\ndef validacion(texto):\n \"\"\"\n Permite validar el texto binario\n :param texto: String de numeros binarios\n :return: Un bolleano con True or False\n \"\"\"\n bandera = False\n for caracter in texto:\n if caracter != '0' and caracter != '1':\n bandera = True\n if bandera == True:\n print('El texto ingresado no es binario')\n return bandera\n\n\n<function token>\n<assignment token>\n<code token>\n",
"<docstring token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,337 |
c8db5bde09945e3fe96c629dc41efbe5055ae1b9
|
try:
from configparser import NoOptionError, NoSectionError
except ImportError:
from ConfigParser import NoOptionError, NoSectionError
import json
import requests
import sys
from fedcred import common
class Okta(object):
def __init__(self, config):
self.config = config
try:
self.okta_org = self.config.get('okta', 'organization')
self.auth_url = "https://" + self.okta_org + "/api/v1/authn"
self.app_url = self.config.get('okta', 'app_url')
except (NoOptionError, NoSectionError) as e:
sys.exit(e.message)
self.headers_dict = {
"Content-Type": "application/json",
"Accept": "application/json"
}
def second_factor(self, factor, state_token):
session = requests.Session()
response = session.post(
factor['_links']['verify']['href'],
headers=self.headers_dict,
data=json.dumps({"stateToken": state_token})
)
try:
passcode_input = raw_input
except NameError:
passcode_input = input
passcode = passcode_input("Please provide your one-time passcode: ")
session = requests.Session()
response = session.post(
factor['_links']['verify']['href'],
headers=self.headers_dict,
data=json.dumps(
{"stateToken": state_token,
"passCode": passcode
})
)
if response.status_code != 200:
sys.exit("Second factor verification failed: %s" %
(json.loads(response.text)['errorSummary']),)
return response
def process_success(self, response):
session_token = json.loads(response.text)['sessionToken']
session = requests.Session()
saml = session.get(self.app_url + "?onetimetoken=" + session_token)
assertion = common.get_saml_assertion(saml)
arn_dict = common.get_arns_from_assertion(assertion)
sts_creds = common.get_sts_creds(arn_dict)
try:
common.write_credentials(
self.config.get(
common.DEFAULT_CONFIG_SECTION,
'aws_credential_profile'
),
sts_creds
)
except (NoOptionError, NoSectionError) as e:
sys.exit(e.message)
def auth(self):
session = requests.Session()
username, password = common.get_user_credentials()
payload_dict = {
"username": username,
"password": password
}
response = session.post(
self.auth_url,
headers=self.headers_dict,
data=json.dumps(payload_dict)
)
if response.status_code != 200:
e = json.loads(response.text)
sys.exit("Primary authentication failed: %s. Error code: %s" %
(e['errorSummary'], e['errorCode']))
auth_response = json.loads(response.text)
if auth_response['status'] == 'MFA_REQUIRED':
print("Please choose a second factor:\n")
for i in range(0, len(auth_response['_embedded']['factors'])):
print("[%s] - %s" % (i,
auth_response['_embedded']['factors'][i]['factorType']))
try:
factor_input = raw_input
except NameError:
factor_input = input
choice = int(factor_input("Chose a second factor: "))
if choice > (len(auth_response['_embedded']['factors']) - 1):
sys.exit('Sorry, that is not a valid role choice.')
chosen_factor = auth_response['_embedded']['factors'][choice]
if (chosen_factor['factorType'] == 'sms' or
chosen_factor['factorType'] == 'token:software:totp'):
response = self.second_factor(
chosen_factor, auth_response['stateToken'])
else:
sys.exit("Unsupported second factor.")
if json.loads(response.text)['status'] == 'SUCCESS':
self.process_success(response)
else:
print("Authentication failed with status: %s" %
(json.loads(response.text)['status'],))
elif auth_response['status'] == 'SUCCESS':
self.process_success(response)
else:
print("Unable to login: %s" % (auth_response['status'],))
|
[
"try:\n from configparser import NoOptionError, NoSectionError\nexcept ImportError:\n from ConfigParser import NoOptionError, NoSectionError\nimport json\nimport requests\nimport sys\n\nfrom fedcred import common\n\n\nclass Okta(object):\n def __init__(self, config):\n self.config = config\n try:\n self.okta_org = self.config.get('okta', 'organization')\n self.auth_url = \"https://\" + self.okta_org + \"/api/v1/authn\"\n self.app_url = self.config.get('okta', 'app_url')\n except (NoOptionError, NoSectionError) as e:\n sys.exit(e.message)\n self.headers_dict = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\"\n }\n\n def second_factor(self, factor, state_token):\n session = requests.Session()\n response = session.post(\n factor['_links']['verify']['href'],\n headers=self.headers_dict,\n data=json.dumps({\"stateToken\": state_token})\n )\n try:\n passcode_input = raw_input\n except NameError:\n passcode_input = input\n passcode = passcode_input(\"Please provide your one-time passcode: \")\n session = requests.Session()\n response = session.post(\n factor['_links']['verify']['href'],\n headers=self.headers_dict,\n data=json.dumps(\n {\"stateToken\": state_token,\n \"passCode\": passcode\n })\n )\n if response.status_code != 200:\n sys.exit(\"Second factor verification failed: %s\" %\n (json.loads(response.text)['errorSummary']),)\n return response\n\n def process_success(self, response):\n session_token = json.loads(response.text)['sessionToken']\n session = requests.Session()\n saml = session.get(self.app_url + \"?onetimetoken=\" + session_token)\n assertion = common.get_saml_assertion(saml)\n arn_dict = common.get_arns_from_assertion(assertion)\n sts_creds = common.get_sts_creds(arn_dict)\n try:\n common.write_credentials(\n self.config.get(\n common.DEFAULT_CONFIG_SECTION,\n 'aws_credential_profile'\n ),\n sts_creds\n )\n except (NoOptionError, NoSectionError) as e:\n sys.exit(e.message)\n\n def auth(self):\n session = requests.Session()\n username, password = common.get_user_credentials()\n payload_dict = {\n \"username\": username,\n \"password\": password\n }\n\n response = session.post(\n self.auth_url,\n headers=self.headers_dict,\n data=json.dumps(payload_dict)\n )\n\n if response.status_code != 200:\n e = json.loads(response.text)\n sys.exit(\"Primary authentication failed: %s. Error code: %s\" %\n (e['errorSummary'], e['errorCode']))\n\n auth_response = json.loads(response.text)\n if auth_response['status'] == 'MFA_REQUIRED':\n print(\"Please choose a second factor:\\n\")\n for i in range(0, len(auth_response['_embedded']['factors'])):\n print(\"[%s] - %s\" % (i,\n auth_response['_embedded']['factors'][i]['factorType']))\n\n try:\n factor_input = raw_input\n except NameError:\n factor_input = input\n choice = int(factor_input(\"Chose a second factor: \"))\n if choice > (len(auth_response['_embedded']['factors']) - 1):\n sys.exit('Sorry, that is not a valid role choice.')\n chosen_factor = auth_response['_embedded']['factors'][choice]\n\n if (chosen_factor['factorType'] == 'sms' or\n chosen_factor['factorType'] == 'token:software:totp'):\n response = self.second_factor(\n chosen_factor, auth_response['stateToken'])\n else:\n sys.exit(\"Unsupported second factor.\")\n\n if json.loads(response.text)['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print(\"Authentication failed with status: %s\" %\n (json.loads(response.text)['status'],))\n elif auth_response['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print(\"Unable to login: %s\" % (auth_response['status'],))\n",
"try:\n from configparser import NoOptionError, NoSectionError\nexcept ImportError:\n from ConfigParser import NoOptionError, NoSectionError\nimport json\nimport requests\nimport sys\nfrom fedcred import common\n\n\nclass Okta(object):\n\n def __init__(self, config):\n self.config = config\n try:\n self.okta_org = self.config.get('okta', 'organization')\n self.auth_url = 'https://' + self.okta_org + '/api/v1/authn'\n self.app_url = self.config.get('okta', 'app_url')\n except (NoOptionError, NoSectionError) as e:\n sys.exit(e.message)\n self.headers_dict = {'Content-Type': 'application/json', 'Accept':\n 'application/json'}\n\n def second_factor(self, factor, state_token):\n session = requests.Session()\n response = session.post(factor['_links']['verify']['href'], headers\n =self.headers_dict, data=json.dumps({'stateToken': state_token}))\n try:\n passcode_input = raw_input\n except NameError:\n passcode_input = input\n passcode = passcode_input('Please provide your one-time passcode: ')\n session = requests.Session()\n response = session.post(factor['_links']['verify']['href'], headers\n =self.headers_dict, data=json.dumps({'stateToken': state_token,\n 'passCode': passcode}))\n if response.status_code != 200:\n sys.exit('Second factor verification failed: %s' % json.loads(\n response.text)['errorSummary'])\n return response\n\n def process_success(self, response):\n session_token = json.loads(response.text)['sessionToken']\n session = requests.Session()\n saml = session.get(self.app_url + '?onetimetoken=' + session_token)\n assertion = common.get_saml_assertion(saml)\n arn_dict = common.get_arns_from_assertion(assertion)\n sts_creds = common.get_sts_creds(arn_dict)\n try:\n common.write_credentials(self.config.get(common.\n DEFAULT_CONFIG_SECTION, 'aws_credential_profile'), sts_creds)\n except (NoOptionError, NoSectionError) as e:\n sys.exit(e.message)\n\n def auth(self):\n session = requests.Session()\n username, password = common.get_user_credentials()\n payload_dict = {'username': username, 'password': password}\n response = session.post(self.auth_url, headers=self.headers_dict,\n data=json.dumps(payload_dict))\n if response.status_code != 200:\n e = json.loads(response.text)\n sys.exit('Primary authentication failed: %s. Error code: %s' %\n (e['errorSummary'], e['errorCode']))\n auth_response = json.loads(response.text)\n if auth_response['status'] == 'MFA_REQUIRED':\n print('Please choose a second factor:\\n')\n for i in range(0, len(auth_response['_embedded']['factors'])):\n print('[%s] - %s' % (i, auth_response['_embedded'][\n 'factors'][i]['factorType']))\n try:\n factor_input = raw_input\n except NameError:\n factor_input = input\n choice = int(factor_input('Chose a second factor: '))\n if choice > len(auth_response['_embedded']['factors']) - 1:\n sys.exit('Sorry, that is not a valid role choice.')\n chosen_factor = auth_response['_embedded']['factors'][choice]\n if chosen_factor['factorType'] == 'sms' or chosen_factor[\n 'factorType'] == 'token:software:totp':\n response = self.second_factor(chosen_factor, auth_response[\n 'stateToken'])\n else:\n sys.exit('Unsupported second factor.')\n if json.loads(response.text)['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Authentication failed with status: %s' % (json.loads\n (response.text)['status'],))\n elif auth_response['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Unable to login: %s' % (auth_response['status'],))\n",
"try:\n from configparser import NoOptionError, NoSectionError\nexcept ImportError:\n from ConfigParser import NoOptionError, NoSectionError\n<import token>\n\n\nclass Okta(object):\n\n def __init__(self, config):\n self.config = config\n try:\n self.okta_org = self.config.get('okta', 'organization')\n self.auth_url = 'https://' + self.okta_org + '/api/v1/authn'\n self.app_url = self.config.get('okta', 'app_url')\n except (NoOptionError, NoSectionError) as e:\n sys.exit(e.message)\n self.headers_dict = {'Content-Type': 'application/json', 'Accept':\n 'application/json'}\n\n def second_factor(self, factor, state_token):\n session = requests.Session()\n response = session.post(factor['_links']['verify']['href'], headers\n =self.headers_dict, data=json.dumps({'stateToken': state_token}))\n try:\n passcode_input = raw_input\n except NameError:\n passcode_input = input\n passcode = passcode_input('Please provide your one-time passcode: ')\n session = requests.Session()\n response = session.post(factor['_links']['verify']['href'], headers\n =self.headers_dict, data=json.dumps({'stateToken': state_token,\n 'passCode': passcode}))\n if response.status_code != 200:\n sys.exit('Second factor verification failed: %s' % json.loads(\n response.text)['errorSummary'])\n return response\n\n def process_success(self, response):\n session_token = json.loads(response.text)['sessionToken']\n session = requests.Session()\n saml = session.get(self.app_url + '?onetimetoken=' + session_token)\n assertion = common.get_saml_assertion(saml)\n arn_dict = common.get_arns_from_assertion(assertion)\n sts_creds = common.get_sts_creds(arn_dict)\n try:\n common.write_credentials(self.config.get(common.\n DEFAULT_CONFIG_SECTION, 'aws_credential_profile'), sts_creds)\n except (NoOptionError, NoSectionError) as e:\n sys.exit(e.message)\n\n def auth(self):\n session = requests.Session()\n username, password = common.get_user_credentials()\n payload_dict = {'username': username, 'password': password}\n response = session.post(self.auth_url, headers=self.headers_dict,\n data=json.dumps(payload_dict))\n if response.status_code != 200:\n e = json.loads(response.text)\n sys.exit('Primary authentication failed: %s. Error code: %s' %\n (e['errorSummary'], e['errorCode']))\n auth_response = json.loads(response.text)\n if auth_response['status'] == 'MFA_REQUIRED':\n print('Please choose a second factor:\\n')\n for i in range(0, len(auth_response['_embedded']['factors'])):\n print('[%s] - %s' % (i, auth_response['_embedded'][\n 'factors'][i]['factorType']))\n try:\n factor_input = raw_input\n except NameError:\n factor_input = input\n choice = int(factor_input('Chose a second factor: '))\n if choice > len(auth_response['_embedded']['factors']) - 1:\n sys.exit('Sorry, that is not a valid role choice.')\n chosen_factor = auth_response['_embedded']['factors'][choice]\n if chosen_factor['factorType'] == 'sms' or chosen_factor[\n 'factorType'] == 'token:software:totp':\n response = self.second_factor(chosen_factor, auth_response[\n 'stateToken'])\n else:\n sys.exit('Unsupported second factor.')\n if json.loads(response.text)['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Authentication failed with status: %s' % (json.loads\n (response.text)['status'],))\n elif auth_response['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Unable to login: %s' % (auth_response['status'],))\n",
"<code token>\n<import token>\n\n\nclass Okta(object):\n\n def __init__(self, config):\n self.config = config\n try:\n self.okta_org = self.config.get('okta', 'organization')\n self.auth_url = 'https://' + self.okta_org + '/api/v1/authn'\n self.app_url = self.config.get('okta', 'app_url')\n except (NoOptionError, NoSectionError) as e:\n sys.exit(e.message)\n self.headers_dict = {'Content-Type': 'application/json', 'Accept':\n 'application/json'}\n\n def second_factor(self, factor, state_token):\n session = requests.Session()\n response = session.post(factor['_links']['verify']['href'], headers\n =self.headers_dict, data=json.dumps({'stateToken': state_token}))\n try:\n passcode_input = raw_input\n except NameError:\n passcode_input = input\n passcode = passcode_input('Please provide your one-time passcode: ')\n session = requests.Session()\n response = session.post(factor['_links']['verify']['href'], headers\n =self.headers_dict, data=json.dumps({'stateToken': state_token,\n 'passCode': passcode}))\n if response.status_code != 200:\n sys.exit('Second factor verification failed: %s' % json.loads(\n response.text)['errorSummary'])\n return response\n\n def process_success(self, response):\n session_token = json.loads(response.text)['sessionToken']\n session = requests.Session()\n saml = session.get(self.app_url + '?onetimetoken=' + session_token)\n assertion = common.get_saml_assertion(saml)\n arn_dict = common.get_arns_from_assertion(assertion)\n sts_creds = common.get_sts_creds(arn_dict)\n try:\n common.write_credentials(self.config.get(common.\n DEFAULT_CONFIG_SECTION, 'aws_credential_profile'), sts_creds)\n except (NoOptionError, NoSectionError) as e:\n sys.exit(e.message)\n\n def auth(self):\n session = requests.Session()\n username, password = common.get_user_credentials()\n payload_dict = {'username': username, 'password': password}\n response = session.post(self.auth_url, headers=self.headers_dict,\n data=json.dumps(payload_dict))\n if response.status_code != 200:\n e = json.loads(response.text)\n sys.exit('Primary authentication failed: %s. Error code: %s' %\n (e['errorSummary'], e['errorCode']))\n auth_response = json.loads(response.text)\n if auth_response['status'] == 'MFA_REQUIRED':\n print('Please choose a second factor:\\n')\n for i in range(0, len(auth_response['_embedded']['factors'])):\n print('[%s] - %s' % (i, auth_response['_embedded'][\n 'factors'][i]['factorType']))\n try:\n factor_input = raw_input\n except NameError:\n factor_input = input\n choice = int(factor_input('Chose a second factor: '))\n if choice > len(auth_response['_embedded']['factors']) - 1:\n sys.exit('Sorry, that is not a valid role choice.')\n chosen_factor = auth_response['_embedded']['factors'][choice]\n if chosen_factor['factorType'] == 'sms' or chosen_factor[\n 'factorType'] == 'token:software:totp':\n response = self.second_factor(chosen_factor, auth_response[\n 'stateToken'])\n else:\n sys.exit('Unsupported second factor.')\n if json.loads(response.text)['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Authentication failed with status: %s' % (json.loads\n (response.text)['status'],))\n elif auth_response['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Unable to login: %s' % (auth_response['status'],))\n",
"<code token>\n<import token>\n\n\nclass Okta(object):\n\n def __init__(self, config):\n self.config = config\n try:\n self.okta_org = self.config.get('okta', 'organization')\n self.auth_url = 'https://' + self.okta_org + '/api/v1/authn'\n self.app_url = self.config.get('okta', 'app_url')\n except (NoOptionError, NoSectionError) as e:\n sys.exit(e.message)\n self.headers_dict = {'Content-Type': 'application/json', 'Accept':\n 'application/json'}\n <function token>\n\n def process_success(self, response):\n session_token = json.loads(response.text)['sessionToken']\n session = requests.Session()\n saml = session.get(self.app_url + '?onetimetoken=' + session_token)\n assertion = common.get_saml_assertion(saml)\n arn_dict = common.get_arns_from_assertion(assertion)\n sts_creds = common.get_sts_creds(arn_dict)\n try:\n common.write_credentials(self.config.get(common.\n DEFAULT_CONFIG_SECTION, 'aws_credential_profile'), sts_creds)\n except (NoOptionError, NoSectionError) as e:\n sys.exit(e.message)\n\n def auth(self):\n session = requests.Session()\n username, password = common.get_user_credentials()\n payload_dict = {'username': username, 'password': password}\n response = session.post(self.auth_url, headers=self.headers_dict,\n data=json.dumps(payload_dict))\n if response.status_code != 200:\n e = json.loads(response.text)\n sys.exit('Primary authentication failed: %s. Error code: %s' %\n (e['errorSummary'], e['errorCode']))\n auth_response = json.loads(response.text)\n if auth_response['status'] == 'MFA_REQUIRED':\n print('Please choose a second factor:\\n')\n for i in range(0, len(auth_response['_embedded']['factors'])):\n print('[%s] - %s' % (i, auth_response['_embedded'][\n 'factors'][i]['factorType']))\n try:\n factor_input = raw_input\n except NameError:\n factor_input = input\n choice = int(factor_input('Chose a second factor: '))\n if choice > len(auth_response['_embedded']['factors']) - 1:\n sys.exit('Sorry, that is not a valid role choice.')\n chosen_factor = auth_response['_embedded']['factors'][choice]\n if chosen_factor['factorType'] == 'sms' or chosen_factor[\n 'factorType'] == 'token:software:totp':\n response = self.second_factor(chosen_factor, auth_response[\n 'stateToken'])\n else:\n sys.exit('Unsupported second factor.')\n if json.loads(response.text)['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Authentication failed with status: %s' % (json.loads\n (response.text)['status'],))\n elif auth_response['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Unable to login: %s' % (auth_response['status'],))\n",
"<code token>\n<import token>\n\n\nclass Okta(object):\n\n def __init__(self, config):\n self.config = config\n try:\n self.okta_org = self.config.get('okta', 'organization')\n self.auth_url = 'https://' + self.okta_org + '/api/v1/authn'\n self.app_url = self.config.get('okta', 'app_url')\n except (NoOptionError, NoSectionError) as e:\n sys.exit(e.message)\n self.headers_dict = {'Content-Type': 'application/json', 'Accept':\n 'application/json'}\n <function token>\n <function token>\n\n def auth(self):\n session = requests.Session()\n username, password = common.get_user_credentials()\n payload_dict = {'username': username, 'password': password}\n response = session.post(self.auth_url, headers=self.headers_dict,\n data=json.dumps(payload_dict))\n if response.status_code != 200:\n e = json.loads(response.text)\n sys.exit('Primary authentication failed: %s. Error code: %s' %\n (e['errorSummary'], e['errorCode']))\n auth_response = json.loads(response.text)\n if auth_response['status'] == 'MFA_REQUIRED':\n print('Please choose a second factor:\\n')\n for i in range(0, len(auth_response['_embedded']['factors'])):\n print('[%s] - %s' % (i, auth_response['_embedded'][\n 'factors'][i]['factorType']))\n try:\n factor_input = raw_input\n except NameError:\n factor_input = input\n choice = int(factor_input('Chose a second factor: '))\n if choice > len(auth_response['_embedded']['factors']) - 1:\n sys.exit('Sorry, that is not a valid role choice.')\n chosen_factor = auth_response['_embedded']['factors'][choice]\n if chosen_factor['factorType'] == 'sms' or chosen_factor[\n 'factorType'] == 'token:software:totp':\n response = self.second_factor(chosen_factor, auth_response[\n 'stateToken'])\n else:\n sys.exit('Unsupported second factor.')\n if json.loads(response.text)['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Authentication failed with status: %s' % (json.loads\n (response.text)['status'],))\n elif auth_response['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Unable to login: %s' % (auth_response['status'],))\n",
"<code token>\n<import token>\n\n\nclass Okta(object):\n <function token>\n <function token>\n <function token>\n\n def auth(self):\n session = requests.Session()\n username, password = common.get_user_credentials()\n payload_dict = {'username': username, 'password': password}\n response = session.post(self.auth_url, headers=self.headers_dict,\n data=json.dumps(payload_dict))\n if response.status_code != 200:\n e = json.loads(response.text)\n sys.exit('Primary authentication failed: %s. Error code: %s' %\n (e['errorSummary'], e['errorCode']))\n auth_response = json.loads(response.text)\n if auth_response['status'] == 'MFA_REQUIRED':\n print('Please choose a second factor:\\n')\n for i in range(0, len(auth_response['_embedded']['factors'])):\n print('[%s] - %s' % (i, auth_response['_embedded'][\n 'factors'][i]['factorType']))\n try:\n factor_input = raw_input\n except NameError:\n factor_input = input\n choice = int(factor_input('Chose a second factor: '))\n if choice > len(auth_response['_embedded']['factors']) - 1:\n sys.exit('Sorry, that is not a valid role choice.')\n chosen_factor = auth_response['_embedded']['factors'][choice]\n if chosen_factor['factorType'] == 'sms' or chosen_factor[\n 'factorType'] == 'token:software:totp':\n response = self.second_factor(chosen_factor, auth_response[\n 'stateToken'])\n else:\n sys.exit('Unsupported second factor.')\n if json.loads(response.text)['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Authentication failed with status: %s' % (json.loads\n (response.text)['status'],))\n elif auth_response['status'] == 'SUCCESS':\n self.process_success(response)\n else:\n print('Unable to login: %s' % (auth_response['status'],))\n",
"<code token>\n<import token>\n\n\nclass Okta(object):\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<code token>\n<import token>\n<class token>\n"
] | false |
99,338 |
ec8c5efb5d112e3297a0d9e1d3203e4e65a65331
|
'''Utility functions
'''
import os
import subprocess
import sys
from collist import collist
def toBool(input_string):
return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')
VERBOSE = toBool(os.getenv('VERBOSE', 'false'))
class QuitApplication(Exception):
pass
class MouseButton:
left = 1
middle = 2
right = 3
scroll_up = 4
scroll_down = 5
back = 8
forward = 9
names = {
1: 'left button',
2: 'middle button',
3: 'right button',
4: 'wheel up',
5: 'wheel down',
8: 'back button',
9: 'forward button',
}
def xStr(string):
encoded = string.encode('utf8')
return len(encoded), encoded
def Perimeter(*args): # pylint: disable=invalid-name
if len(args) == 1:
return (args[0], args[0], args[0], args[0])
if len(args) == 2:
return (args[0], args[1], args[0], args[1])
if len(args) == 3:
return (args[0], args[1], args[2], args[1])
return (args[0], args[1], args[2], args[3])
def dataAttrs(key, val):
return key != '__dict__' and not callable(val)
def publicDataAttrs(key, val):
return not key.startswith('__') and not key.endswith('__') and not callable(val)
def methodAttrs(_key, val):
return callable(val)
def color(colorNum, message):
return f'\x1b[{colorNum}m{message}\x1b[m'
def printError(message, *args):
print(' '.join([color(91, message), *(str(arg) for arg in args)]), file=sys.stderr)
sys.stderr.flush()
def printWarning(message, *args):
print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)]), file=sys.stderr)
sys.stderr.flush()
def printInfo(message, *args):
print(' '.join([color(93, message), *(str(arg) for arg in args)]))
sys.stdout.flush()
def inspect(obj, attrFilter=publicDataAttrs):
indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''
output = []
for key in dir(obj):
try:
val = getattr(obj, key)
if attrFilter(key, val):
output.append('%s\x1b[96m%s:\x1b[m %r' % (indent, key, val))
except Exception as error: # pylint: disable=broad-except
output.append('%s\x1b[96m%s:\x1b[m \x1b[91m%r\x1b[m' % (indent, key, error))
if sys.stdout.isatty():
print(collist(output))
elif VERBOSE:
print('\n'.join(output))
else:
print(' ' + ', '.join(output))
sys.stdout.flush()
def runCommand(path):
subprocess.call(os.path.expanduser(path))
def topStrut(width, height, left=0):
return (
0, 0, height, 0, # left, right, top, bottom,
0, 0, # left_start_y, left_end_y
0, 0, # right_start_y, right_end_y,
left, left + width - 1, # top_start_x, top_end_x,
0, 0 # bottom_start_x, bottom_end_x
)
def bottomStrut(width, height, left=0):
return (
0, 0, 0, height, # left, right, top, bottom,
0, 0, # left_start_y, left_end_y
0, 0, # right_start_y, right_end_y,
0, 0, # top_start_x, top_end_x,
left, left + width - 1 # bottom_start_x, bottom_end_x
)
def leftStrut(width, height, top=0):
return (
width, 0, 0, 0, # left, right, top, bottom,
top, top + height - 1, # left_start_y, left_end_y
0, 0, # right_start_y, right_end_y,
0, 0, # top_start_x, top_end_x,
0, 0 # bottom_start_x, bottom_end_x
)
def rightStrut(width, height, top=0):
return (
0, width, 0, 0, # left, right, top, bottom,
0, 0, # left_start_y, left_end_y
top, top + height - 1, # right_start_y, right_end_y,
0, 0, # top_start_x, top_end_x,
0, 0 # bottom_start_x, bottom_end_x
)
|
[
"'''Utility functions\n\n'''\nimport os\nimport subprocess\nimport sys\n\nfrom collist import collist\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\nVERBOSE = toBool(os.getenv('VERBOSE', 'false'))\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n\n names = {\n 1: 'left button',\n 2: 'middle button',\n 3: 'right button',\n 4: 'wheel up',\n 5: 'wheel down',\n 8: 'back button',\n 9: 'forward button',\n }\n\n\ndef xStr(string):\n encoded = string.encode('utf8')\n return len(encoded), encoded\n\n\ndef Perimeter(*args): # pylint: disable=invalid-name\n if len(args) == 1:\n return (args[0], args[0], args[0], args[0])\n if len(args) == 2:\n return (args[0], args[1], args[0], args[1])\n if len(args) == 3:\n return (args[0], args[1], args[2], args[1])\n return (args[0], args[1], args[2], args[3])\n\n\ndef dataAttrs(key, val):\n return key != '__dict__' and not callable(val)\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__') and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\ndef color(colorNum, message):\n return f'\\x1b[{colorNum}m{message}\\x1b[m'\n\n\ndef printError(message, *args):\n print(' '.join([color(91, message), *(str(arg) for arg in args)]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error: # pylint: disable=broad-except\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent, key, error))\n\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\ndef runCommand(path):\n subprocess.call(os.path.expanduser(path))\n\n\ndef topStrut(width, height, left=0):\n return (\n 0, 0, height, 0, # left, right, top, bottom,\n 0, 0, # left_start_y, left_end_y\n 0, 0, # right_start_y, right_end_y,\n left, left + width - 1, # top_start_x, top_end_x,\n 0, 0 # bottom_start_x, bottom_end_x\n )\n\n\ndef bottomStrut(width, height, left=0):\n return (\n 0, 0, 0, height, # left, right, top, bottom,\n 0, 0, # left_start_y, left_end_y\n 0, 0, # right_start_y, right_end_y,\n 0, 0, # top_start_x, top_end_x,\n left, left + width - 1 # bottom_start_x, bottom_end_x\n )\n\n\ndef leftStrut(width, height, top=0):\n return (\n width, 0, 0, 0, # left, right, top, bottom,\n top, top + height - 1, # left_start_y, left_end_y\n 0, 0, # right_start_y, right_end_y,\n 0, 0, # top_start_x, top_end_x,\n 0, 0 # bottom_start_x, bottom_end_x\n )\n\n\ndef rightStrut(width, height, top=0):\n return (\n 0, width, 0, 0, # left, right, top, bottom,\n 0, 0, # left_start_y, left_end_y\n top, top + height - 1, # right_start_y, right_end_y,\n 0, 0, # top_start_x, top_end_x,\n 0, 0 # bottom_start_x, bottom_end_x\n )\n",
"<docstring token>\nimport os\nimport subprocess\nimport sys\nfrom collist import collist\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\nVERBOSE = toBool(os.getenv('VERBOSE', 'false'))\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\ndef xStr(string):\n encoded = string.encode('utf8')\n return len(encoded), encoded\n\n\ndef Perimeter(*args):\n if len(args) == 1:\n return args[0], args[0], args[0], args[0]\n if len(args) == 2:\n return args[0], args[1], args[0], args[1]\n if len(args) == 3:\n return args[0], args[1], args[2], args[1]\n return args[0], args[1], args[2], args[3]\n\n\ndef dataAttrs(key, val):\n return key != '__dict__' and not callable(val)\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\ndef color(colorNum, message):\n return f'\\x1b[{colorNum}m{message}\\x1b[m'\n\n\ndef printError(message, *args):\n print(' '.join([color(91, message), *(str(arg) for arg in args)]), file\n =sys.stderr)\n sys.stderr.flush()\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\ndef runCommand(path):\n subprocess.call(os.path.expanduser(path))\n\n\ndef topStrut(width, height, left=0):\n return 0, 0, height, 0, 0, 0, 0, 0, left, left + width - 1, 0, 0\n\n\ndef bottomStrut(width, height, left=0):\n return 0, 0, 0, height, 0, 0, 0, 0, 0, 0, left, left + width - 1\n\n\ndef leftStrut(width, height, top=0):\n return width, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0, 0, 0\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\nVERBOSE = toBool(os.getenv('VERBOSE', 'false'))\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\ndef xStr(string):\n encoded = string.encode('utf8')\n return len(encoded), encoded\n\n\ndef Perimeter(*args):\n if len(args) == 1:\n return args[0], args[0], args[0], args[0]\n if len(args) == 2:\n return args[0], args[1], args[0], args[1]\n if len(args) == 3:\n return args[0], args[1], args[2], args[1]\n return args[0], args[1], args[2], args[3]\n\n\ndef dataAttrs(key, val):\n return key != '__dict__' and not callable(val)\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\ndef color(colorNum, message):\n return f'\\x1b[{colorNum}m{message}\\x1b[m'\n\n\ndef printError(message, *args):\n print(' '.join([color(91, message), *(str(arg) for arg in args)]), file\n =sys.stderr)\n sys.stderr.flush()\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\ndef runCommand(path):\n subprocess.call(os.path.expanduser(path))\n\n\ndef topStrut(width, height, left=0):\n return 0, 0, height, 0, 0, 0, 0, 0, left, left + width - 1, 0, 0\n\n\ndef bottomStrut(width, height, left=0):\n return 0, 0, 0, height, 0, 0, 0, 0, 0, 0, left, left + width - 1\n\n\ndef leftStrut(width, height, top=0):\n return width, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0, 0, 0\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\ndef xStr(string):\n encoded = string.encode('utf8')\n return len(encoded), encoded\n\n\ndef Perimeter(*args):\n if len(args) == 1:\n return args[0], args[0], args[0], args[0]\n if len(args) == 2:\n return args[0], args[1], args[0], args[1]\n if len(args) == 3:\n return args[0], args[1], args[2], args[1]\n return args[0], args[1], args[2], args[3]\n\n\ndef dataAttrs(key, val):\n return key != '__dict__' and not callable(val)\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\ndef color(colorNum, message):\n return f'\\x1b[{colorNum}m{message}\\x1b[m'\n\n\ndef printError(message, *args):\n print(' '.join([color(91, message), *(str(arg) for arg in args)]), file\n =sys.stderr)\n sys.stderr.flush()\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\ndef runCommand(path):\n subprocess.call(os.path.expanduser(path))\n\n\ndef topStrut(width, height, left=0):\n return 0, 0, height, 0, 0, 0, 0, 0, left, left + width - 1, 0, 0\n\n\ndef bottomStrut(width, height, left=0):\n return 0, 0, 0, height, 0, 0, 0, 0, 0, 0, left, left + width - 1\n\n\ndef leftStrut(width, height, top=0):\n return width, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0, 0, 0\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\ndef xStr(string):\n encoded = string.encode('utf8')\n return len(encoded), encoded\n\n\ndef Perimeter(*args):\n if len(args) == 1:\n return args[0], args[0], args[0], args[0]\n if len(args) == 2:\n return args[0], args[1], args[0], args[1]\n if len(args) == 3:\n return args[0], args[1], args[2], args[1]\n return args[0], args[1], args[2], args[3]\n\n\n<function token>\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\ndef color(colorNum, message):\n return f'\\x1b[{colorNum}m{message}\\x1b[m'\n\n\ndef printError(message, *args):\n print(' '.join([color(91, message), *(str(arg) for arg in args)]), file\n =sys.stderr)\n sys.stderr.flush()\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\ndef runCommand(path):\n subprocess.call(os.path.expanduser(path))\n\n\ndef topStrut(width, height, left=0):\n return 0, 0, height, 0, 0, 0, 0, 0, left, left + width - 1, 0, 0\n\n\ndef bottomStrut(width, height, left=0):\n return 0, 0, 0, height, 0, 0, 0, 0, 0, 0, left, left + width - 1\n\n\ndef leftStrut(width, height, top=0):\n return width, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0, 0, 0\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\ndef xStr(string):\n encoded = string.encode('utf8')\n return len(encoded), encoded\n\n\ndef Perimeter(*args):\n if len(args) == 1:\n return args[0], args[0], args[0], args[0]\n if len(args) == 2:\n return args[0], args[1], args[0], args[1]\n if len(args) == 3:\n return args[0], args[1], args[2], args[1]\n return args[0], args[1], args[2], args[3]\n\n\n<function token>\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\ndef color(colorNum, message):\n return f'\\x1b[{colorNum}m{message}\\x1b[m'\n\n\n<function token>\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\ndef runCommand(path):\n subprocess.call(os.path.expanduser(path))\n\n\ndef topStrut(width, height, left=0):\n return 0, 0, height, 0, 0, 0, 0, 0, left, left + width - 1, 0, 0\n\n\ndef bottomStrut(width, height, left=0):\n return 0, 0, 0, height, 0, 0, 0, 0, 0, 0, left, left + width - 1\n\n\ndef leftStrut(width, height, top=0):\n return width, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0, 0, 0\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\ndef xStr(string):\n encoded = string.encode('utf8')\n return len(encoded), encoded\n\n\ndef Perimeter(*args):\n if len(args) == 1:\n return args[0], args[0], args[0], args[0]\n if len(args) == 2:\n return args[0], args[1], args[0], args[1]\n if len(args) == 3:\n return args[0], args[1], args[2], args[1]\n return args[0], args[1], args[2], args[3]\n\n\n<function token>\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\n<function token>\n<function token>\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\ndef runCommand(path):\n subprocess.call(os.path.expanduser(path))\n\n\ndef topStrut(width, height, left=0):\n return 0, 0, height, 0, 0, 0, 0, 0, left, left + width - 1, 0, 0\n\n\ndef bottomStrut(width, height, left=0):\n return 0, 0, 0, height, 0, 0, 0, 0, 0, 0, left, left + width - 1\n\n\ndef leftStrut(width, height, top=0):\n return width, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0, 0, 0\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\ndef xStr(string):\n encoded = string.encode('utf8')\n return len(encoded), encoded\n\n\ndef Perimeter(*args):\n if len(args) == 1:\n return args[0], args[0], args[0], args[0]\n if len(args) == 2:\n return args[0], args[1], args[0], args[1]\n if len(args) == 3:\n return args[0], args[1], args[2], args[1]\n return args[0], args[1], args[2], args[3]\n\n\n<function token>\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\n<function token>\n<function token>\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\ndef runCommand(path):\n subprocess.call(os.path.expanduser(path))\n\n\n<function token>\n\n\ndef bottomStrut(width, height, left=0):\n return 0, 0, 0, height, 0, 0, 0, 0, 0, 0, left, left + width - 1\n\n\ndef leftStrut(width, height, top=0):\n return width, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0, 0, 0\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n\n\ndef Perimeter(*args):\n if len(args) == 1:\n return args[0], args[0], args[0], args[0]\n if len(args) == 2:\n return args[0], args[1], args[0], args[1]\n if len(args) == 3:\n return args[0], args[1], args[2], args[1]\n return args[0], args[1], args[2], args[3]\n\n\n<function token>\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\n<function token>\n<function token>\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\ndef runCommand(path):\n subprocess.call(os.path.expanduser(path))\n\n\n<function token>\n\n\ndef bottomStrut(width, height, left=0):\n return 0, 0, 0, height, 0, 0, 0, 0, 0, 0, left, left + width - 1\n\n\ndef leftStrut(width, height, top=0):\n return width, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0, 0, 0\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n\n\ndef Perimeter(*args):\n if len(args) == 1:\n return args[0], args[0], args[0], args[0]\n if len(args) == 2:\n return args[0], args[1], args[0], args[1]\n if len(args) == 3:\n return args[0], args[1], args[2], args[1]\n return args[0], args[1], args[2], args[3]\n\n\n<function token>\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\n<function token>\n<function token>\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\ndef runCommand(path):\n subprocess.call(os.path.expanduser(path))\n\n\n<function token>\n<function token>\n\n\ndef leftStrut(width, height, top=0):\n return width, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0, 0, 0\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\n<function token>\n<function token>\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\ndef runCommand(path):\n subprocess.call(os.path.expanduser(path))\n\n\n<function token>\n<function token>\n\n\ndef leftStrut(width, height, top=0):\n return width, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0, 0, 0\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\n<function token>\n<function token>\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef leftStrut(width, height, top=0):\n return width, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0, 0, 0\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\ndef methodAttrs(_key, val):\n return callable(val)\n\n\n<function token>\n<function token>\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef printWarning(message, *args):\n print(' '.join([color('38;5;202', message), *(str(arg) for arg in args)\n ]), file=sys.stderr)\n sys.stderr.flush()\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef publicDataAttrs(key, val):\n return not key.startswith('__') and not key.endswith('__'\n ) and not callable(val)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n\n\ndef toBool(input_string):\n return input_string.upper() in ('T', 'TRUE', '1', 'YES', 'ON')\n\n\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n<function token>\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef printInfo(message, *args):\n print(' '.join([color(93, message), *(str(arg) for arg in args)]))\n sys.stdout.flush()\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n<function token>\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef inspect(obj, attrFilter=publicDataAttrs):\n indent = ' ' if VERBOSE and not sys.stdout.isatty() else ''\n output = []\n for key in dir(obj):\n try:\n val = getattr(obj, key)\n if attrFilter(key, val):\n output.append('%s\\x1b[96m%s:\\x1b[m %r' % (indent, key, val))\n except Exception as error:\n output.append('%s\\x1b[96m%s:\\x1b[m \\x1b[91m%r\\x1b[m' % (indent,\n key, error))\n if sys.stdout.isatty():\n print(collist(output))\n elif VERBOSE:\n print('\\n'.join(output))\n else:\n print(' ' + ', '.join(output))\n sys.stdout.flush()\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n<function token>\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef rightStrut(width, height, top=0):\n return 0, width, 0, 0, 0, 0, top, top + height - 1, 0, 0, 0, 0\n",
"<docstring token>\n<import token>\n<function token>\n<assignment token>\n\n\nclass QuitApplication(Exception):\n pass\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<assignment token>\n<class token>\n\n\nclass MouseButton:\n left = 1\n middle = 2\n right = 3\n scroll_up = 4\n scroll_down = 5\n back = 8\n forward = 9\n names = {(1): 'left button', (2): 'middle button', (3): 'right button',\n (4): 'wheel up', (5): 'wheel down', (8): 'back button', (9):\n 'forward button'}\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<assignment token>\n<class token>\n\n\nclass MouseButton:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<assignment token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,339 |
22931dab501ba3909d63932e75c385ce5bfb50a1
|
import pickle
import opencc
data = []
with open("./newsData.pkl", 'rb') as fr:
data = pickle.load(fr)
dataCn = []
for post in data:
postCn = []
for ele in post:
postCn.append(opencc.convert(ele))
dataCn.append(postCn)
with open("./newsDataCn.pkl", 'wb') as fw:
pickle.dump(dataCn, fw)
|
[
"import pickle\nimport opencc\n\ndata = []\nwith open(\"./newsData.pkl\", 'rb') as fr:\n data = pickle.load(fr)\ndataCn = []\nfor post in data:\n postCn = []\n for ele in post:\n postCn.append(opencc.convert(ele))\n dataCn.append(postCn)\n\nwith open(\"./newsDataCn.pkl\", 'wb') as fw:\n pickle.dump(dataCn, fw)\n",
"import pickle\nimport opencc\ndata = []\nwith open('./newsData.pkl', 'rb') as fr:\n data = pickle.load(fr)\ndataCn = []\nfor post in data:\n postCn = []\n for ele in post:\n postCn.append(opencc.convert(ele))\n dataCn.append(postCn)\nwith open('./newsDataCn.pkl', 'wb') as fw:\n pickle.dump(dataCn, fw)\n",
"<import token>\ndata = []\nwith open('./newsData.pkl', 'rb') as fr:\n data = pickle.load(fr)\ndataCn = []\nfor post in data:\n postCn = []\n for ele in post:\n postCn.append(opencc.convert(ele))\n dataCn.append(postCn)\nwith open('./newsDataCn.pkl', 'wb') as fw:\n pickle.dump(dataCn, fw)\n",
"<import token>\n<assignment token>\nwith open('./newsData.pkl', 'rb') as fr:\n data = pickle.load(fr)\n<assignment token>\nfor post in data:\n postCn = []\n for ele in post:\n postCn.append(opencc.convert(ele))\n dataCn.append(postCn)\nwith open('./newsDataCn.pkl', 'wb') as fw:\n pickle.dump(dataCn, fw)\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,340 |
5cfc3269c42c6124ab255953bbed9f053cc3f51d
|
from django.utils.translation import gettext as _
from database.models import Site
from irekua_utils.filters import sites
from irekua_utils.permissions import sites as site_permissions
from selia.views.list_views.base import SeliaListView
class ListUserSitesView(SeliaListView):
template_name = 'selia/list/user_sites.html'
list_item_template = 'selia/components/list_items/site.html'
help_template = 'selia/components/help/user_sites.html'
filter_form_template = 'selia/components/filters/site.html'
empty_message = _('User has no registered sites')
filter_class = sites.Filter
search_fields = sites.search_fields
ordering_fields = sites.ordering_fields
def get_initial_queryset(self):
return Site.objects.filter(created_by=self.request.user)
def has_view_permission(self):
user = self.request.user
return site_permissions.create(user)
def has_create_permission(self):
user = self.request.user
return site_permissions.create(user)
|
[
"from django.utils.translation import gettext as _\n\nfrom database.models import Site\n\nfrom irekua_utils.filters import sites\nfrom irekua_utils.permissions import sites as site_permissions\nfrom selia.views.list_views.base import SeliaListView\n\n\nclass ListUserSitesView(SeliaListView):\n template_name = 'selia/list/user_sites.html'\n\n list_item_template = 'selia/components/list_items/site.html'\n help_template = 'selia/components/help/user_sites.html'\n filter_form_template = 'selia/components/filters/site.html'\n\n empty_message = _('User has no registered sites')\n\n filter_class = sites.Filter\n search_fields = sites.search_fields\n ordering_fields = sites.ordering_fields\n\n def get_initial_queryset(self):\n return Site.objects.filter(created_by=self.request.user)\n\n def has_view_permission(self):\n user = self.request.user\n return site_permissions.create(user)\n\n def has_create_permission(self):\n user = self.request.user\n return site_permissions.create(user)\n",
"from django.utils.translation import gettext as _\nfrom database.models import Site\nfrom irekua_utils.filters import sites\nfrom irekua_utils.permissions import sites as site_permissions\nfrom selia.views.list_views.base import SeliaListView\n\n\nclass ListUserSitesView(SeliaListView):\n template_name = 'selia/list/user_sites.html'\n list_item_template = 'selia/components/list_items/site.html'\n help_template = 'selia/components/help/user_sites.html'\n filter_form_template = 'selia/components/filters/site.html'\n empty_message = _('User has no registered sites')\n filter_class = sites.Filter\n search_fields = sites.search_fields\n ordering_fields = sites.ordering_fields\n\n def get_initial_queryset(self):\n return Site.objects.filter(created_by=self.request.user)\n\n def has_view_permission(self):\n user = self.request.user\n return site_permissions.create(user)\n\n def has_create_permission(self):\n user = self.request.user\n return site_permissions.create(user)\n",
"<import token>\n\n\nclass ListUserSitesView(SeliaListView):\n template_name = 'selia/list/user_sites.html'\n list_item_template = 'selia/components/list_items/site.html'\n help_template = 'selia/components/help/user_sites.html'\n filter_form_template = 'selia/components/filters/site.html'\n empty_message = _('User has no registered sites')\n filter_class = sites.Filter\n search_fields = sites.search_fields\n ordering_fields = sites.ordering_fields\n\n def get_initial_queryset(self):\n return Site.objects.filter(created_by=self.request.user)\n\n def has_view_permission(self):\n user = self.request.user\n return site_permissions.create(user)\n\n def has_create_permission(self):\n user = self.request.user\n return site_permissions.create(user)\n",
"<import token>\n\n\nclass ListUserSitesView(SeliaListView):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_initial_queryset(self):\n return Site.objects.filter(created_by=self.request.user)\n\n def has_view_permission(self):\n user = self.request.user\n return site_permissions.create(user)\n\n def has_create_permission(self):\n user = self.request.user\n return site_permissions.create(user)\n",
"<import token>\n\n\nclass ListUserSitesView(SeliaListView):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_initial_queryset(self):\n return Site.objects.filter(created_by=self.request.user)\n\n def has_view_permission(self):\n user = self.request.user\n return site_permissions.create(user)\n <function token>\n",
"<import token>\n\n\nclass ListUserSitesView(SeliaListView):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def get_initial_queryset(self):\n return Site.objects.filter(created_by=self.request.user)\n <function token>\n <function token>\n",
"<import token>\n\n\nclass ListUserSitesView(SeliaListView):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,341 |
0837c4f130a2dec415c6eb76f9656257cc0d0e48
|
import numpy as np
import matplotlib.pyplot as plt
fs = 500
t = np.arange(-10, 10, 1/fs)
B = 10 # if we increase this frequency, the frequency of 20 will also pass from this filter.
A = 0.01
y = 2*np.sinc(2*B*t)
m_t = np.sin(10*np.pi*t) + np.sin(40*np.pi*t)
y_t = np.convolve(y, m_t)[10*fs:30*fs]
y_f = np.fft.fft(y_t)
y_f_abs = np.abs(y_f)
freq = np.fft.fftfreq(y_t.size, d=1/fs)
fig, axs=plt.subplots(2)
axs[0].plot(t,y_t)
plt.title('Time Domain')
plt.xlabel('time')
plt.ylabel('Amplitude')
axs[1].plot(freq, y_f_abs)
plt.title('Freq Domain')
plt.xlabel('Freq')
plt.ylabel('Amplitude')
plt.show()
|
[
"import numpy as np\nimport matplotlib.pyplot as plt\n\n\nfs = 500\nt = np.arange(-10, 10, 1/fs)\nB = 10 # if we increase this frequency, the frequency of 20 will also pass from this filter. \nA = 0.01\n\ny = 2*np.sinc(2*B*t)\nm_t = np.sin(10*np.pi*t) + np.sin(40*np.pi*t)\n\ny_t = np.convolve(y, m_t)[10*fs:30*fs]\n\ny_f = np.fft.fft(y_t)\ny_f_abs = np.abs(y_f)\nfreq = np.fft.fftfreq(y_t.size, d=1/fs)\nfig, axs=plt.subplots(2)\naxs[0].plot(t,y_t)\nplt.title('Time Domain')\nplt.xlabel('time')\nplt.ylabel('Amplitude')\naxs[1].plot(freq, y_f_abs)\nplt.title('Freq Domain')\nplt.xlabel('Freq')\nplt.ylabel('Amplitude')\nplt.show()",
"import numpy as np\nimport matplotlib.pyplot as plt\nfs = 500\nt = np.arange(-10, 10, 1 / fs)\nB = 10\nA = 0.01\ny = 2 * np.sinc(2 * B * t)\nm_t = np.sin(10 * np.pi * t) + np.sin(40 * np.pi * t)\ny_t = np.convolve(y, m_t)[10 * fs:30 * fs]\ny_f = np.fft.fft(y_t)\ny_f_abs = np.abs(y_f)\nfreq = np.fft.fftfreq(y_t.size, d=1 / fs)\nfig, axs = plt.subplots(2)\naxs[0].plot(t, y_t)\nplt.title('Time Domain')\nplt.xlabel('time')\nplt.ylabel('Amplitude')\naxs[1].plot(freq, y_f_abs)\nplt.title('Freq Domain')\nplt.xlabel('Freq')\nplt.ylabel('Amplitude')\nplt.show()\n",
"<import token>\nfs = 500\nt = np.arange(-10, 10, 1 / fs)\nB = 10\nA = 0.01\ny = 2 * np.sinc(2 * B * t)\nm_t = np.sin(10 * np.pi * t) + np.sin(40 * np.pi * t)\ny_t = np.convolve(y, m_t)[10 * fs:30 * fs]\ny_f = np.fft.fft(y_t)\ny_f_abs = np.abs(y_f)\nfreq = np.fft.fftfreq(y_t.size, d=1 / fs)\nfig, axs = plt.subplots(2)\naxs[0].plot(t, y_t)\nplt.title('Time Domain')\nplt.xlabel('time')\nplt.ylabel('Amplitude')\naxs[1].plot(freq, y_f_abs)\nplt.title('Freq Domain')\nplt.xlabel('Freq')\nplt.ylabel('Amplitude')\nplt.show()\n",
"<import token>\n<assignment token>\naxs[0].plot(t, y_t)\nplt.title('Time Domain')\nplt.xlabel('time')\nplt.ylabel('Amplitude')\naxs[1].plot(freq, y_f_abs)\nplt.title('Freq Domain')\nplt.xlabel('Freq')\nplt.ylabel('Amplitude')\nplt.show()\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,342 |
933be99d4edb39ce6e018e1c482bc7bc6e5423c9
|
from __future__ import annotations
from itertools import combinations
from libtbx.phil import parse
from scitbx import matrix
from scitbx.math import r3_rotation_axis_and_angle_from_matrix
import dials.util
from dials.util.options import ArgumentParser, flatten_experiments
help_message = """
dials.two_theta_offset experiment_one.expt experiment_two.expt
"""
phil_scope = parse(
"""
offset_fast = 100.0
.type = float
.help = 'How far to move in the detector plane (fast direction)'
offset_slow = 100.0
.type = float
.help = 'How far to move in the detector plane (slow direction)'
min_distance = 10.0
.type = float
.help = 'Minimum shift in detector position'
""",
process_includes=True,
)
class Script:
"""A class for running the script."""
def __init__(self):
"""Initialise the script."""
# The script usage
usage = "usage: dials.two_theta_offset [options] experiment_one.expt experiment_two.expt"
# Create the parser
self.parser = ArgumentParser(
usage=usage,
phil=phil_scope,
epilog=help_message,
check_format=False,
read_experiments=True,
)
def run(self, args=None):
"""Execute the script."""
# Parse the command line
params, options = self.parser.parse_args(args, show_diff_phil=True)
# Check the number of experiments is at least 2
experiments = flatten_experiments(params.input.experiments)
if len(experiments) < 2:
self.parser.print_help()
return
detectors = [experiment.detector[0] for experiment in experiments]
for pair in combinations(detectors, 2):
determine_axis(pair, params)
crystals = [experiment.crystal for experiment in experiments]
goniometers = [experiment.goniometer for experiment in experiments]
FUs = []
for c, g in zip(crystals, goniometers):
u = matrix.sqr(c.get_U())
f = matrix.sqr(g.get_fixed_rotation())
FUs.append(f * u)
for pair in combinations(FUs, 2):
R = pair[1] * pair[0].inverse()
rot = r3_rotation_axis_and_angle_from_matrix(R)
angle = rot.angle(deg=True)
axis = matrix.col(rot.axis)
if abs(angle) < 10:
continue
print("Axis: %8.5f %8.5f %8.5f" % axis.elems, f"angle: {angle:7.4f}")
def determine_axis(detectors, params):
offset_fast = params.offset_fast
offset_slow = params.offset_slow
min_distance = params.min_distance
# pick two positions, at nominal origin offset in fast, slow
x1 = matrix.col(detectors[0].get_origin())
y1 = (
matrix.col(detectors[0].get_origin())
+ offset_fast * matrix.col(detectors[0].get_fast_axis())
+ offset_slow * matrix.col(detectors[0].get_slow_axis())
)
x2 = matrix.col(detectors[1].get_origin())
y2 = (
matrix.col(detectors[1].get_origin())
+ offset_fast * matrix.col(detectors[1].get_fast_axis())
+ offset_slow * matrix.col(detectors[1].get_slow_axis())
)
# only allow this calculation if the detector has been moved a "significant"
# amount
if (x2 - x1).length() < min_distance:
return
centre, axis = find_centre_of_rotation(x1, x2, y1, y2)
# compute "true" two-theta from these
two_theta = component(x2 - centre, axis).angle(
component(x1 - centre, axis), deg=True
)
print(
"Centre: %7.4f %7.4f %7.4f" % centre.elems,
" axis: %7.4f %7.4f %7.4f" % axis.elems,
f"angle: {two_theta:.2f}",
)
def component(a, n):
return a - a.dot(n) * n
def find_centre_of_rotation(x1, x2, y1, y2):
"""Find centre of rotation which takes position x1 -> x2 and y1 -> y2"""
# chords of rotation of x, y
cx = x2 - x1
cy = y2 - y1
# know axis is perpendicular to both of these -> is cross product
axis = cx.cross(cy).normalize()
# normal vector to y chord
ny = component(cy, axis).normalize().cross(axis)
# origin of normal vectors, centre of x, y chords
ox = component(x1 + 0.5 * cx, axis)
oy = component(y1 + 0.5 * cy, axis)
# determine true origin of rotation - normal vector of x chord, construct
# right-angle-triangle with hypotenuse from unknown origin of rotation
# to central point of y chord oy, and adjacent the vector parallel to
# reversed x chord => opposite is on vector from unknown origin of rotation
# to ox
ncx = cx.normalize()
h = (oy - ox).dot(ncx)
d = h / (ny).dot(-ncx)
return oy + d * ny, axis
@dials.util.show_mail_handle_errors()
def run(args=None):
script = Script()
script.run(args)
if __name__ == "__main__":
run()
|
[
"from __future__ import annotations\n\nfrom itertools import combinations\n\nfrom libtbx.phil import parse\nfrom scitbx import matrix\nfrom scitbx.math import r3_rotation_axis_and_angle_from_matrix\n\nimport dials.util\nfrom dials.util.options import ArgumentParser, flatten_experiments\n\nhelp_message = \"\"\"\n\ndials.two_theta_offset experiment_one.expt experiment_two.expt\n\"\"\"\n\nphil_scope = parse(\n \"\"\"\noffset_fast = 100.0\n .type = float\n .help = 'How far to move in the detector plane (fast direction)'\noffset_slow = 100.0\n .type = float\n .help = 'How far to move in the detector plane (slow direction)'\nmin_distance = 10.0\n .type = float\n .help = 'Minimum shift in detector position'\n\"\"\",\n process_includes=True,\n)\n\n\nclass Script:\n \"\"\"A class for running the script.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise the script.\"\"\"\n # The script usage\n usage = \"usage: dials.two_theta_offset [options] experiment_one.expt experiment_two.expt\"\n\n # Create the parser\n self.parser = ArgumentParser(\n usage=usage,\n phil=phil_scope,\n epilog=help_message,\n check_format=False,\n read_experiments=True,\n )\n\n def run(self, args=None):\n \"\"\"Execute the script.\"\"\"\n # Parse the command line\n params, options = self.parser.parse_args(args, show_diff_phil=True)\n\n # Check the number of experiments is at least 2\n experiments = flatten_experiments(params.input.experiments)\n if len(experiments) < 2:\n self.parser.print_help()\n return\n\n detectors = [experiment.detector[0] for experiment in experiments]\n\n for pair in combinations(detectors, 2):\n determine_axis(pair, params)\n\n crystals = [experiment.crystal for experiment in experiments]\n goniometers = [experiment.goniometer for experiment in experiments]\n\n FUs = []\n\n for c, g in zip(crystals, goniometers):\n u = matrix.sqr(c.get_U())\n f = matrix.sqr(g.get_fixed_rotation())\n FUs.append(f * u)\n\n for pair in combinations(FUs, 2):\n R = pair[1] * pair[0].inverse()\n rot = r3_rotation_axis_and_angle_from_matrix(R)\n angle = rot.angle(deg=True)\n axis = matrix.col(rot.axis)\n if abs(angle) < 10:\n continue\n print(\"Axis: %8.5f %8.5f %8.5f\" % axis.elems, f\"angle: {angle:7.4f}\")\n\n\ndef determine_axis(detectors, params):\n offset_fast = params.offset_fast\n offset_slow = params.offset_slow\n min_distance = params.min_distance\n\n # pick two positions, at nominal origin offset in fast, slow\n\n x1 = matrix.col(detectors[0].get_origin())\n y1 = (\n matrix.col(detectors[0].get_origin())\n + offset_fast * matrix.col(detectors[0].get_fast_axis())\n + offset_slow * matrix.col(detectors[0].get_slow_axis())\n )\n\n x2 = matrix.col(detectors[1].get_origin())\n y2 = (\n matrix.col(detectors[1].get_origin())\n + offset_fast * matrix.col(detectors[1].get_fast_axis())\n + offset_slow * matrix.col(detectors[1].get_slow_axis())\n )\n\n # only allow this calculation if the detector has been moved a \"significant\"\n # amount\n\n if (x2 - x1).length() < min_distance:\n return\n\n centre, axis = find_centre_of_rotation(x1, x2, y1, y2)\n\n # compute \"true\" two-theta from these\n\n two_theta = component(x2 - centre, axis).angle(\n component(x1 - centre, axis), deg=True\n )\n\n print(\n \"Centre: %7.4f %7.4f %7.4f\" % centre.elems,\n \" axis: %7.4f %7.4f %7.4f\" % axis.elems,\n f\"angle: {two_theta:.2f}\",\n )\n\n\ndef component(a, n):\n return a - a.dot(n) * n\n\n\ndef find_centre_of_rotation(x1, x2, y1, y2):\n \"\"\"Find centre of rotation which takes position x1 -> x2 and y1 -> y2\"\"\"\n\n # chords of rotation of x, y\n\n cx = x2 - x1\n cy = y2 - y1\n\n # know axis is perpendicular to both of these -> is cross product\n\n axis = cx.cross(cy).normalize()\n\n # normal vector to y chord\n\n ny = component(cy, axis).normalize().cross(axis)\n\n # origin of normal vectors, centre of x, y chords\n\n ox = component(x1 + 0.5 * cx, axis)\n oy = component(y1 + 0.5 * cy, axis)\n\n # determine true origin of rotation - normal vector of x chord, construct\n # right-angle-triangle with hypotenuse from unknown origin of rotation\n # to central point of y chord oy, and adjacent the vector parallel to\n # reversed x chord => opposite is on vector from unknown origin of rotation\n # to ox\n\n ncx = cx.normalize()\n h = (oy - ox).dot(ncx)\n d = h / (ny).dot(-ncx)\n return oy + d * ny, axis\n\n\[email protected]_mail_handle_errors()\ndef run(args=None):\n script = Script()\n script.run(args)\n\n\nif __name__ == \"__main__\":\n run()\n",
"from __future__ import annotations\nfrom itertools import combinations\nfrom libtbx.phil import parse\nfrom scitbx import matrix\nfrom scitbx.math import r3_rotation_axis_and_angle_from_matrix\nimport dials.util\nfrom dials.util.options import ArgumentParser, flatten_experiments\nhelp_message = \"\"\"\n\ndials.two_theta_offset experiment_one.expt experiment_two.expt\n\"\"\"\nphil_scope = parse(\n \"\"\"\noffset_fast = 100.0\n .type = float\n .help = 'How far to move in the detector plane (fast direction)'\noffset_slow = 100.0\n .type = float\n .help = 'How far to move in the detector plane (slow direction)'\nmin_distance = 10.0\n .type = float\n .help = 'Minimum shift in detector position'\n\"\"\"\n , process_includes=True)\n\n\nclass Script:\n \"\"\"A class for running the script.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise the script.\"\"\"\n usage = (\n 'usage: dials.two_theta_offset [options] experiment_one.expt experiment_two.expt'\n )\n self.parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=\n help_message, check_format=False, read_experiments=True)\n\n def run(self, args=None):\n \"\"\"Execute the script.\"\"\"\n params, options = self.parser.parse_args(args, show_diff_phil=True)\n experiments = flatten_experiments(params.input.experiments)\n if len(experiments) < 2:\n self.parser.print_help()\n return\n detectors = [experiment.detector[0] for experiment in experiments]\n for pair in combinations(detectors, 2):\n determine_axis(pair, params)\n crystals = [experiment.crystal for experiment in experiments]\n goniometers = [experiment.goniometer for experiment in experiments]\n FUs = []\n for c, g in zip(crystals, goniometers):\n u = matrix.sqr(c.get_U())\n f = matrix.sqr(g.get_fixed_rotation())\n FUs.append(f * u)\n for pair in combinations(FUs, 2):\n R = pair[1] * pair[0].inverse()\n rot = r3_rotation_axis_and_angle_from_matrix(R)\n angle = rot.angle(deg=True)\n axis = matrix.col(rot.axis)\n if abs(angle) < 10:\n continue\n print('Axis: %8.5f %8.5f %8.5f' % axis.elems,\n f'angle: {angle:7.4f}')\n\n\ndef determine_axis(detectors, params):\n offset_fast = params.offset_fast\n offset_slow = params.offset_slow\n min_distance = params.min_distance\n x1 = matrix.col(detectors[0].get_origin())\n y1 = matrix.col(detectors[0].get_origin()) + offset_fast * matrix.col(\n detectors[0].get_fast_axis()) + offset_slow * matrix.col(detectors[\n 0].get_slow_axis())\n x2 = matrix.col(detectors[1].get_origin())\n y2 = matrix.col(detectors[1].get_origin()) + offset_fast * matrix.col(\n detectors[1].get_fast_axis()) + offset_slow * matrix.col(detectors[\n 1].get_slow_axis())\n if (x2 - x1).length() < min_distance:\n return\n centre, axis = find_centre_of_rotation(x1, x2, y1, y2)\n two_theta = component(x2 - centre, axis).angle(component(x1 - centre,\n axis), deg=True)\n print('Centre: %7.4f %7.4f %7.4f' % centre.elems, \n ' axis: %7.4f %7.4f %7.4f' % axis.elems, f'angle: {two_theta:.2f}')\n\n\ndef component(a, n):\n return a - a.dot(n) * n\n\n\ndef find_centre_of_rotation(x1, x2, y1, y2):\n \"\"\"Find centre of rotation which takes position x1 -> x2 and y1 -> y2\"\"\"\n cx = x2 - x1\n cy = y2 - y1\n axis = cx.cross(cy).normalize()\n ny = component(cy, axis).normalize().cross(axis)\n ox = component(x1 + 0.5 * cx, axis)\n oy = component(y1 + 0.5 * cy, axis)\n ncx = cx.normalize()\n h = (oy - ox).dot(ncx)\n d = h / ny.dot(-ncx)\n return oy + d * ny, axis\n\n\[email protected]_mail_handle_errors()\ndef run(args=None):\n script = Script()\n script.run(args)\n\n\nif __name__ == '__main__':\n run()\n",
"<import token>\nhelp_message = \"\"\"\n\ndials.two_theta_offset experiment_one.expt experiment_two.expt\n\"\"\"\nphil_scope = parse(\n \"\"\"\noffset_fast = 100.0\n .type = float\n .help = 'How far to move in the detector plane (fast direction)'\noffset_slow = 100.0\n .type = float\n .help = 'How far to move in the detector plane (slow direction)'\nmin_distance = 10.0\n .type = float\n .help = 'Minimum shift in detector position'\n\"\"\"\n , process_includes=True)\n\n\nclass Script:\n \"\"\"A class for running the script.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise the script.\"\"\"\n usage = (\n 'usage: dials.two_theta_offset [options] experiment_one.expt experiment_two.expt'\n )\n self.parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=\n help_message, check_format=False, read_experiments=True)\n\n def run(self, args=None):\n \"\"\"Execute the script.\"\"\"\n params, options = self.parser.parse_args(args, show_diff_phil=True)\n experiments = flatten_experiments(params.input.experiments)\n if len(experiments) < 2:\n self.parser.print_help()\n return\n detectors = [experiment.detector[0] for experiment in experiments]\n for pair in combinations(detectors, 2):\n determine_axis(pair, params)\n crystals = [experiment.crystal for experiment in experiments]\n goniometers = [experiment.goniometer for experiment in experiments]\n FUs = []\n for c, g in zip(crystals, goniometers):\n u = matrix.sqr(c.get_U())\n f = matrix.sqr(g.get_fixed_rotation())\n FUs.append(f * u)\n for pair in combinations(FUs, 2):\n R = pair[1] * pair[0].inverse()\n rot = r3_rotation_axis_and_angle_from_matrix(R)\n angle = rot.angle(deg=True)\n axis = matrix.col(rot.axis)\n if abs(angle) < 10:\n continue\n print('Axis: %8.5f %8.5f %8.5f' % axis.elems,\n f'angle: {angle:7.4f}')\n\n\ndef determine_axis(detectors, params):\n offset_fast = params.offset_fast\n offset_slow = params.offset_slow\n min_distance = params.min_distance\n x1 = matrix.col(detectors[0].get_origin())\n y1 = matrix.col(detectors[0].get_origin()) + offset_fast * matrix.col(\n detectors[0].get_fast_axis()) + offset_slow * matrix.col(detectors[\n 0].get_slow_axis())\n x2 = matrix.col(detectors[1].get_origin())\n y2 = matrix.col(detectors[1].get_origin()) + offset_fast * matrix.col(\n detectors[1].get_fast_axis()) + offset_slow * matrix.col(detectors[\n 1].get_slow_axis())\n if (x2 - x1).length() < min_distance:\n return\n centre, axis = find_centre_of_rotation(x1, x2, y1, y2)\n two_theta = component(x2 - centre, axis).angle(component(x1 - centre,\n axis), deg=True)\n print('Centre: %7.4f %7.4f %7.4f' % centre.elems, \n ' axis: %7.4f %7.4f %7.4f' % axis.elems, f'angle: {two_theta:.2f}')\n\n\ndef component(a, n):\n return a - a.dot(n) * n\n\n\ndef find_centre_of_rotation(x1, x2, y1, y2):\n \"\"\"Find centre of rotation which takes position x1 -> x2 and y1 -> y2\"\"\"\n cx = x2 - x1\n cy = y2 - y1\n axis = cx.cross(cy).normalize()\n ny = component(cy, axis).normalize().cross(axis)\n ox = component(x1 + 0.5 * cx, axis)\n oy = component(y1 + 0.5 * cy, axis)\n ncx = cx.normalize()\n h = (oy - ox).dot(ncx)\n d = h / ny.dot(-ncx)\n return oy + d * ny, axis\n\n\[email protected]_mail_handle_errors()\ndef run(args=None):\n script = Script()\n script.run(args)\n\n\nif __name__ == '__main__':\n run()\n",
"<import token>\n<assignment token>\n\n\nclass Script:\n \"\"\"A class for running the script.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise the script.\"\"\"\n usage = (\n 'usage: dials.two_theta_offset [options] experiment_one.expt experiment_two.expt'\n )\n self.parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=\n help_message, check_format=False, read_experiments=True)\n\n def run(self, args=None):\n \"\"\"Execute the script.\"\"\"\n params, options = self.parser.parse_args(args, show_diff_phil=True)\n experiments = flatten_experiments(params.input.experiments)\n if len(experiments) < 2:\n self.parser.print_help()\n return\n detectors = [experiment.detector[0] for experiment in experiments]\n for pair in combinations(detectors, 2):\n determine_axis(pair, params)\n crystals = [experiment.crystal for experiment in experiments]\n goniometers = [experiment.goniometer for experiment in experiments]\n FUs = []\n for c, g in zip(crystals, goniometers):\n u = matrix.sqr(c.get_U())\n f = matrix.sqr(g.get_fixed_rotation())\n FUs.append(f * u)\n for pair in combinations(FUs, 2):\n R = pair[1] * pair[0].inverse()\n rot = r3_rotation_axis_and_angle_from_matrix(R)\n angle = rot.angle(deg=True)\n axis = matrix.col(rot.axis)\n if abs(angle) < 10:\n continue\n print('Axis: %8.5f %8.5f %8.5f' % axis.elems,\n f'angle: {angle:7.4f}')\n\n\ndef determine_axis(detectors, params):\n offset_fast = params.offset_fast\n offset_slow = params.offset_slow\n min_distance = params.min_distance\n x1 = matrix.col(detectors[0].get_origin())\n y1 = matrix.col(detectors[0].get_origin()) + offset_fast * matrix.col(\n detectors[0].get_fast_axis()) + offset_slow * matrix.col(detectors[\n 0].get_slow_axis())\n x2 = matrix.col(detectors[1].get_origin())\n y2 = matrix.col(detectors[1].get_origin()) + offset_fast * matrix.col(\n detectors[1].get_fast_axis()) + offset_slow * matrix.col(detectors[\n 1].get_slow_axis())\n if (x2 - x1).length() < min_distance:\n return\n centre, axis = find_centre_of_rotation(x1, x2, y1, y2)\n two_theta = component(x2 - centre, axis).angle(component(x1 - centre,\n axis), deg=True)\n print('Centre: %7.4f %7.4f %7.4f' % centre.elems, \n ' axis: %7.4f %7.4f %7.4f' % axis.elems, f'angle: {two_theta:.2f}')\n\n\ndef component(a, n):\n return a - a.dot(n) * n\n\n\ndef find_centre_of_rotation(x1, x2, y1, y2):\n \"\"\"Find centre of rotation which takes position x1 -> x2 and y1 -> y2\"\"\"\n cx = x2 - x1\n cy = y2 - y1\n axis = cx.cross(cy).normalize()\n ny = component(cy, axis).normalize().cross(axis)\n ox = component(x1 + 0.5 * cx, axis)\n oy = component(y1 + 0.5 * cy, axis)\n ncx = cx.normalize()\n h = (oy - ox).dot(ncx)\n d = h / ny.dot(-ncx)\n return oy + d * ny, axis\n\n\[email protected]_mail_handle_errors()\ndef run(args=None):\n script = Script()\n script.run(args)\n\n\nif __name__ == '__main__':\n run()\n",
"<import token>\n<assignment token>\n\n\nclass Script:\n \"\"\"A class for running the script.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise the script.\"\"\"\n usage = (\n 'usage: dials.two_theta_offset [options] experiment_one.expt experiment_two.expt'\n )\n self.parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=\n help_message, check_format=False, read_experiments=True)\n\n def run(self, args=None):\n \"\"\"Execute the script.\"\"\"\n params, options = self.parser.parse_args(args, show_diff_phil=True)\n experiments = flatten_experiments(params.input.experiments)\n if len(experiments) < 2:\n self.parser.print_help()\n return\n detectors = [experiment.detector[0] for experiment in experiments]\n for pair in combinations(detectors, 2):\n determine_axis(pair, params)\n crystals = [experiment.crystal for experiment in experiments]\n goniometers = [experiment.goniometer for experiment in experiments]\n FUs = []\n for c, g in zip(crystals, goniometers):\n u = matrix.sqr(c.get_U())\n f = matrix.sqr(g.get_fixed_rotation())\n FUs.append(f * u)\n for pair in combinations(FUs, 2):\n R = pair[1] * pair[0].inverse()\n rot = r3_rotation_axis_and_angle_from_matrix(R)\n angle = rot.angle(deg=True)\n axis = matrix.col(rot.axis)\n if abs(angle) < 10:\n continue\n print('Axis: %8.5f %8.5f %8.5f' % axis.elems,\n f'angle: {angle:7.4f}')\n\n\ndef determine_axis(detectors, params):\n offset_fast = params.offset_fast\n offset_slow = params.offset_slow\n min_distance = params.min_distance\n x1 = matrix.col(detectors[0].get_origin())\n y1 = matrix.col(detectors[0].get_origin()) + offset_fast * matrix.col(\n detectors[0].get_fast_axis()) + offset_slow * matrix.col(detectors[\n 0].get_slow_axis())\n x2 = matrix.col(detectors[1].get_origin())\n y2 = matrix.col(detectors[1].get_origin()) + offset_fast * matrix.col(\n detectors[1].get_fast_axis()) + offset_slow * matrix.col(detectors[\n 1].get_slow_axis())\n if (x2 - x1).length() < min_distance:\n return\n centre, axis = find_centre_of_rotation(x1, x2, y1, y2)\n two_theta = component(x2 - centre, axis).angle(component(x1 - centre,\n axis), deg=True)\n print('Centre: %7.4f %7.4f %7.4f' % centre.elems, \n ' axis: %7.4f %7.4f %7.4f' % axis.elems, f'angle: {two_theta:.2f}')\n\n\ndef component(a, n):\n return a - a.dot(n) * n\n\n\ndef find_centre_of_rotation(x1, x2, y1, y2):\n \"\"\"Find centre of rotation which takes position x1 -> x2 and y1 -> y2\"\"\"\n cx = x2 - x1\n cy = y2 - y1\n axis = cx.cross(cy).normalize()\n ny = component(cy, axis).normalize().cross(axis)\n ox = component(x1 + 0.5 * cx, axis)\n oy = component(y1 + 0.5 * cy, axis)\n ncx = cx.normalize()\n h = (oy - ox).dot(ncx)\n d = h / ny.dot(-ncx)\n return oy + d * ny, axis\n\n\[email protected]_mail_handle_errors()\ndef run(args=None):\n script = Script()\n script.run(args)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Script:\n \"\"\"A class for running the script.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise the script.\"\"\"\n usage = (\n 'usage: dials.two_theta_offset [options] experiment_one.expt experiment_two.expt'\n )\n self.parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=\n help_message, check_format=False, read_experiments=True)\n\n def run(self, args=None):\n \"\"\"Execute the script.\"\"\"\n params, options = self.parser.parse_args(args, show_diff_phil=True)\n experiments = flatten_experiments(params.input.experiments)\n if len(experiments) < 2:\n self.parser.print_help()\n return\n detectors = [experiment.detector[0] for experiment in experiments]\n for pair in combinations(detectors, 2):\n determine_axis(pair, params)\n crystals = [experiment.crystal for experiment in experiments]\n goniometers = [experiment.goniometer for experiment in experiments]\n FUs = []\n for c, g in zip(crystals, goniometers):\n u = matrix.sqr(c.get_U())\n f = matrix.sqr(g.get_fixed_rotation())\n FUs.append(f * u)\n for pair in combinations(FUs, 2):\n R = pair[1] * pair[0].inverse()\n rot = r3_rotation_axis_and_angle_from_matrix(R)\n angle = rot.angle(deg=True)\n axis = matrix.col(rot.axis)\n if abs(angle) < 10:\n continue\n print('Axis: %8.5f %8.5f %8.5f' % axis.elems,\n f'angle: {angle:7.4f}')\n\n\n<function token>\n\n\ndef component(a, n):\n return a - a.dot(n) * n\n\n\ndef find_centre_of_rotation(x1, x2, y1, y2):\n \"\"\"Find centre of rotation which takes position x1 -> x2 and y1 -> y2\"\"\"\n cx = x2 - x1\n cy = y2 - y1\n axis = cx.cross(cy).normalize()\n ny = component(cy, axis).normalize().cross(axis)\n ox = component(x1 + 0.5 * cx, axis)\n oy = component(y1 + 0.5 * cy, axis)\n ncx = cx.normalize()\n h = (oy - ox).dot(ncx)\n d = h / ny.dot(-ncx)\n return oy + d * ny, axis\n\n\[email protected]_mail_handle_errors()\ndef run(args=None):\n script = Script()\n script.run(args)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Script:\n \"\"\"A class for running the script.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise the script.\"\"\"\n usage = (\n 'usage: dials.two_theta_offset [options] experiment_one.expt experiment_two.expt'\n )\n self.parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=\n help_message, check_format=False, read_experiments=True)\n\n def run(self, args=None):\n \"\"\"Execute the script.\"\"\"\n params, options = self.parser.parse_args(args, show_diff_phil=True)\n experiments = flatten_experiments(params.input.experiments)\n if len(experiments) < 2:\n self.parser.print_help()\n return\n detectors = [experiment.detector[0] for experiment in experiments]\n for pair in combinations(detectors, 2):\n determine_axis(pair, params)\n crystals = [experiment.crystal for experiment in experiments]\n goniometers = [experiment.goniometer for experiment in experiments]\n FUs = []\n for c, g in zip(crystals, goniometers):\n u = matrix.sqr(c.get_U())\n f = matrix.sqr(g.get_fixed_rotation())\n FUs.append(f * u)\n for pair in combinations(FUs, 2):\n R = pair[1] * pair[0].inverse()\n rot = r3_rotation_axis_and_angle_from_matrix(R)\n angle = rot.angle(deg=True)\n axis = matrix.col(rot.axis)\n if abs(angle) < 10:\n continue\n print('Axis: %8.5f %8.5f %8.5f' % axis.elems,\n f'angle: {angle:7.4f}')\n\n\n<function token>\n\n\ndef component(a, n):\n return a - a.dot(n) * n\n\n\n<function token>\n\n\[email protected]_mail_handle_errors()\ndef run(args=None):\n script = Script()\n script.run(args)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Script:\n \"\"\"A class for running the script.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise the script.\"\"\"\n usage = (\n 'usage: dials.two_theta_offset [options] experiment_one.expt experiment_two.expt'\n )\n self.parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=\n help_message, check_format=False, read_experiments=True)\n\n def run(self, args=None):\n \"\"\"Execute the script.\"\"\"\n params, options = self.parser.parse_args(args, show_diff_phil=True)\n experiments = flatten_experiments(params.input.experiments)\n if len(experiments) < 2:\n self.parser.print_help()\n return\n detectors = [experiment.detector[0] for experiment in experiments]\n for pair in combinations(detectors, 2):\n determine_axis(pair, params)\n crystals = [experiment.crystal for experiment in experiments]\n goniometers = [experiment.goniometer for experiment in experiments]\n FUs = []\n for c, g in zip(crystals, goniometers):\n u = matrix.sqr(c.get_U())\n f = matrix.sqr(g.get_fixed_rotation())\n FUs.append(f * u)\n for pair in combinations(FUs, 2):\n R = pair[1] * pair[0].inverse()\n rot = r3_rotation_axis_and_angle_from_matrix(R)\n angle = rot.angle(deg=True)\n axis = matrix.col(rot.axis)\n if abs(angle) < 10:\n continue\n print('Axis: %8.5f %8.5f %8.5f' % axis.elems,\n f'angle: {angle:7.4f}')\n\n\n<function token>\n\n\ndef component(a, n):\n return a - a.dot(n) * n\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Script:\n \"\"\"A class for running the script.\"\"\"\n\n def __init__(self):\n \"\"\"Initialise the script.\"\"\"\n usage = (\n 'usage: dials.two_theta_offset [options] experiment_one.expt experiment_two.expt'\n )\n self.parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=\n help_message, check_format=False, read_experiments=True)\n\n def run(self, args=None):\n \"\"\"Execute the script.\"\"\"\n params, options = self.parser.parse_args(args, show_diff_phil=True)\n experiments = flatten_experiments(params.input.experiments)\n if len(experiments) < 2:\n self.parser.print_help()\n return\n detectors = [experiment.detector[0] for experiment in experiments]\n for pair in combinations(detectors, 2):\n determine_axis(pair, params)\n crystals = [experiment.crystal for experiment in experiments]\n goniometers = [experiment.goniometer for experiment in experiments]\n FUs = []\n for c, g in zip(crystals, goniometers):\n u = matrix.sqr(c.get_U())\n f = matrix.sqr(g.get_fixed_rotation())\n FUs.append(f * u)\n for pair in combinations(FUs, 2):\n R = pair[1] * pair[0].inverse()\n rot = r3_rotation_axis_and_angle_from_matrix(R)\n angle = rot.angle(deg=True)\n axis = matrix.col(rot.axis)\n if abs(angle) < 10:\n continue\n print('Axis: %8.5f %8.5f %8.5f' % axis.elems,\n f'angle: {angle:7.4f}')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Script:\n <docstring token>\n\n def __init__(self):\n \"\"\"Initialise the script.\"\"\"\n usage = (\n 'usage: dials.two_theta_offset [options] experiment_one.expt experiment_two.expt'\n )\n self.parser = ArgumentParser(usage=usage, phil=phil_scope, epilog=\n help_message, check_format=False, read_experiments=True)\n\n def run(self, args=None):\n \"\"\"Execute the script.\"\"\"\n params, options = self.parser.parse_args(args, show_diff_phil=True)\n experiments = flatten_experiments(params.input.experiments)\n if len(experiments) < 2:\n self.parser.print_help()\n return\n detectors = [experiment.detector[0] for experiment in experiments]\n for pair in combinations(detectors, 2):\n determine_axis(pair, params)\n crystals = [experiment.crystal for experiment in experiments]\n goniometers = [experiment.goniometer for experiment in experiments]\n FUs = []\n for c, g in zip(crystals, goniometers):\n u = matrix.sqr(c.get_U())\n f = matrix.sqr(g.get_fixed_rotation())\n FUs.append(f * u)\n for pair in combinations(FUs, 2):\n R = pair[1] * pair[0].inverse()\n rot = r3_rotation_axis_and_angle_from_matrix(R)\n angle = rot.angle(deg=True)\n axis = matrix.col(rot.axis)\n if abs(angle) < 10:\n continue\n print('Axis: %8.5f %8.5f %8.5f' % axis.elems,\n f'angle: {angle:7.4f}')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Script:\n <docstring token>\n <function token>\n\n def run(self, args=None):\n \"\"\"Execute the script.\"\"\"\n params, options = self.parser.parse_args(args, show_diff_phil=True)\n experiments = flatten_experiments(params.input.experiments)\n if len(experiments) < 2:\n self.parser.print_help()\n return\n detectors = [experiment.detector[0] for experiment in experiments]\n for pair in combinations(detectors, 2):\n determine_axis(pair, params)\n crystals = [experiment.crystal for experiment in experiments]\n goniometers = [experiment.goniometer for experiment in experiments]\n FUs = []\n for c, g in zip(crystals, goniometers):\n u = matrix.sqr(c.get_U())\n f = matrix.sqr(g.get_fixed_rotation())\n FUs.append(f * u)\n for pair in combinations(FUs, 2):\n R = pair[1] * pair[0].inverse()\n rot = r3_rotation_axis_and_angle_from_matrix(R)\n angle = rot.angle(deg=True)\n axis = matrix.col(rot.axis)\n if abs(angle) < 10:\n continue\n print('Axis: %8.5f %8.5f %8.5f' % axis.elems,\n f'angle: {angle:7.4f}')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass Script:\n <docstring token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,343 |
324b0e47b542477a70e02238bf8c3a224c620e55
|
import sys
from pylint.interfaces import IReporter
from pylint.reporters import BaseReporter
class CustomJSONReporter(BaseReporter):
"""Report messages and layouts in JSON."""
__implements__ = IReporter
name = 'json_custom'
extension = 'json_custom'
def __init__(self, output=sys.stdout):
BaseReporter.__init__(self, output)
self.messages = []
def handle_message(self, msg):
"""Manage message of different type and in the context of path."""
self.messages.append({
'type': msg.category,
'module': msg.module,
'obj': msg.obj,
'line': msg.line,
'column': msg.column,
'path': msg.path,
'symbol': msg.symbol,
'message': msg.msg,
'message-id': msg.msg_id,
})
def get_errors_json(self):
return self.messages
def display_messages(self, layout):
"""Don't do nothing in this reporter."""
def display_reports(self, layout): # pylint: disable=arguments-differ
"""Don't do nothing in this reporter."""
def _display(self, layout):
"""Don't do nothing."""
|
[
"import sys\nfrom pylint.interfaces import IReporter\nfrom pylint.reporters import BaseReporter\n\n\nclass CustomJSONReporter(BaseReporter):\n \"\"\"Report messages and layouts in JSON.\"\"\"\n\n __implements__ = IReporter\n name = 'json_custom'\n extension = 'json_custom'\n\n def __init__(self, output=sys.stdout):\n BaseReporter.__init__(self, output)\n self.messages = []\n\n def handle_message(self, msg):\n \"\"\"Manage message of different type and in the context of path.\"\"\"\n self.messages.append({\n 'type': msg.category,\n 'module': msg.module,\n 'obj': msg.obj,\n 'line': msg.line,\n 'column': msg.column,\n 'path': msg.path,\n 'symbol': msg.symbol,\n 'message': msg.msg,\n 'message-id': msg.msg_id,\n })\n\n def get_errors_json(self):\n return self.messages\n\n def display_messages(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def display_reports(self, layout): # pylint: disable=arguments-differ\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def _display(self, layout):\n \"\"\"Don't do nothing.\"\"\"\n",
"import sys\nfrom pylint.interfaces import IReporter\nfrom pylint.reporters import BaseReporter\n\n\nclass CustomJSONReporter(BaseReporter):\n \"\"\"Report messages and layouts in JSON.\"\"\"\n __implements__ = IReporter\n name = 'json_custom'\n extension = 'json_custom'\n\n def __init__(self, output=sys.stdout):\n BaseReporter.__init__(self, output)\n self.messages = []\n\n def handle_message(self, msg):\n \"\"\"Manage message of different type and in the context of path.\"\"\"\n self.messages.append({'type': msg.category, 'module': msg.module,\n 'obj': msg.obj, 'line': msg.line, 'column': msg.column, 'path':\n msg.path, 'symbol': msg.symbol, 'message': msg.msg,\n 'message-id': msg.msg_id})\n\n def get_errors_json(self):\n return self.messages\n\n def display_messages(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def display_reports(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def _display(self, layout):\n \"\"\"Don't do nothing.\"\"\"\n",
"<import token>\n\n\nclass CustomJSONReporter(BaseReporter):\n \"\"\"Report messages and layouts in JSON.\"\"\"\n __implements__ = IReporter\n name = 'json_custom'\n extension = 'json_custom'\n\n def __init__(self, output=sys.stdout):\n BaseReporter.__init__(self, output)\n self.messages = []\n\n def handle_message(self, msg):\n \"\"\"Manage message of different type and in the context of path.\"\"\"\n self.messages.append({'type': msg.category, 'module': msg.module,\n 'obj': msg.obj, 'line': msg.line, 'column': msg.column, 'path':\n msg.path, 'symbol': msg.symbol, 'message': msg.msg,\n 'message-id': msg.msg_id})\n\n def get_errors_json(self):\n return self.messages\n\n def display_messages(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def display_reports(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def _display(self, layout):\n \"\"\"Don't do nothing.\"\"\"\n",
"<import token>\n\n\nclass CustomJSONReporter(BaseReporter):\n <docstring token>\n __implements__ = IReporter\n name = 'json_custom'\n extension = 'json_custom'\n\n def __init__(self, output=sys.stdout):\n BaseReporter.__init__(self, output)\n self.messages = []\n\n def handle_message(self, msg):\n \"\"\"Manage message of different type and in the context of path.\"\"\"\n self.messages.append({'type': msg.category, 'module': msg.module,\n 'obj': msg.obj, 'line': msg.line, 'column': msg.column, 'path':\n msg.path, 'symbol': msg.symbol, 'message': msg.msg,\n 'message-id': msg.msg_id})\n\n def get_errors_json(self):\n return self.messages\n\n def display_messages(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def display_reports(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def _display(self, layout):\n \"\"\"Don't do nothing.\"\"\"\n",
"<import token>\n\n\nclass CustomJSONReporter(BaseReporter):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, output=sys.stdout):\n BaseReporter.__init__(self, output)\n self.messages = []\n\n def handle_message(self, msg):\n \"\"\"Manage message of different type and in the context of path.\"\"\"\n self.messages.append({'type': msg.category, 'module': msg.module,\n 'obj': msg.obj, 'line': msg.line, 'column': msg.column, 'path':\n msg.path, 'symbol': msg.symbol, 'message': msg.msg,\n 'message-id': msg.msg_id})\n\n def get_errors_json(self):\n return self.messages\n\n def display_messages(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def display_reports(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def _display(self, layout):\n \"\"\"Don't do nothing.\"\"\"\n",
"<import token>\n\n\nclass CustomJSONReporter(BaseReporter):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, output=sys.stdout):\n BaseReporter.__init__(self, output)\n self.messages = []\n\n def handle_message(self, msg):\n \"\"\"Manage message of different type and in the context of path.\"\"\"\n self.messages.append({'type': msg.category, 'module': msg.module,\n 'obj': msg.obj, 'line': msg.line, 'column': msg.column, 'path':\n msg.path, 'symbol': msg.symbol, 'message': msg.msg,\n 'message-id': msg.msg_id})\n\n def get_errors_json(self):\n return self.messages\n\n def display_messages(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def display_reports(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n <function token>\n",
"<import token>\n\n\nclass CustomJSONReporter(BaseReporter):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, output=sys.stdout):\n BaseReporter.__init__(self, output)\n self.messages = []\n\n def handle_message(self, msg):\n \"\"\"Manage message of different type and in the context of path.\"\"\"\n self.messages.append({'type': msg.category, 'module': msg.module,\n 'obj': msg.obj, 'line': msg.line, 'column': msg.column, 'path':\n msg.path, 'symbol': msg.symbol, 'message': msg.msg,\n 'message-id': msg.msg_id})\n <function token>\n\n def display_messages(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n\n def display_reports(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n <function token>\n",
"<import token>\n\n\nclass CustomJSONReporter(BaseReporter):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, output=sys.stdout):\n BaseReporter.__init__(self, output)\n self.messages = []\n\n def handle_message(self, msg):\n \"\"\"Manage message of different type and in the context of path.\"\"\"\n self.messages.append({'type': msg.category, 'module': msg.module,\n 'obj': msg.obj, 'line': msg.line, 'column': msg.column, 'path':\n msg.path, 'symbol': msg.symbol, 'message': msg.msg,\n 'message-id': msg.msg_id})\n <function token>\n\n def display_messages(self, layout):\n \"\"\"Don't do nothing in this reporter.\"\"\"\n <function token>\n <function token>\n",
"<import token>\n\n\nclass CustomJSONReporter(BaseReporter):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, output=sys.stdout):\n BaseReporter.__init__(self, output)\n self.messages = []\n\n def handle_message(self, msg):\n \"\"\"Manage message of different type and in the context of path.\"\"\"\n self.messages.append({'type': msg.category, 'module': msg.module,\n 'obj': msg.obj, 'line': msg.line, 'column': msg.column, 'path':\n msg.path, 'symbol': msg.symbol, 'message': msg.msg,\n 'message-id': msg.msg_id})\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass CustomJSONReporter(BaseReporter):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def handle_message(self, msg):\n \"\"\"Manage message of different type and in the context of path.\"\"\"\n self.messages.append({'type': msg.category, 'module': msg.module,\n 'obj': msg.obj, 'line': msg.line, 'column': msg.column, 'path':\n msg.path, 'symbol': msg.symbol, 'message': msg.msg,\n 'message-id': msg.msg_id})\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass CustomJSONReporter(BaseReporter):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,344 |
19bf15197a120aea94e2b89b3af540c5263c780c
|
import cv2
import numpy as np
import random
import os
os.chdir('C:/Users/PC021/Downloads/05-컴퓨터비전-이미지파일/05-컴퓨터비전-이미지파일')
oldx = oldy = -1
def random_color():
return tuple(sorted([i for i in range(256)]*3, key=lambda x:random.random())[:3])
def random_size():
return random.randint(10,100)
def on_mouse(event, x, y, flags, param):
global oldx, oldy
if event == cv2.EVENT_LBUTTONDOWN:
oldx, oldy = x, y
elif event == cv2.EVENT_MOUSEMOVE:
if flags & cv2.EVENT_FLAG_LBUTTON:
cv2.line(img, (oldx, oldy), (x, y), (0, 0, 255), 4, cv2.LINE_AA)
cv2.imshow('image', img)
oldx, oldy = x, y
elif event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img, (oldx, oldy),random_size(),random_color(),-1,cv2.LINE_4,)
cv2.imshow('image', img)
oldx, oldy = x, y
img = cv2.imread('images/car.jpg',cv2.IMREAD_UNCHANGED)
cv2.imshow('image', img)
cv2.setMouseCallback('image', on_mouse, img)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"import cv2\nimport numpy as np\nimport random\nimport os\nos.chdir('C:/Users/PC021/Downloads/05-컴퓨터비전-이미지파일/05-컴퓨터비전-이미지파일')\noldx = oldy = -1\n\ndef random_color():\n return tuple(sorted([i for i in range(256)]*3, key=lambda x:random.random())[:3])\n\ndef random_size():\n return random.randint(10,100)\n\ndef on_mouse(event, x, y, flags, param):\n global oldx, oldy\n if event == cv2.EVENT_LBUTTONDOWN:\n oldx, oldy = x, y\n\n elif event == cv2.EVENT_MOUSEMOVE:\n if flags & cv2.EVENT_FLAG_LBUTTON:\n cv2.line(img, (oldx, oldy), (x, y), (0, 0, 255), 4, cv2.LINE_AA)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n\n elif event == cv2.EVENT_LBUTTONDBLCLK:\n cv2.circle(img, (oldx, oldy),random_size(),random_color(),-1,cv2.LINE_4,)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n\nimg = cv2.imread('images/car.jpg',cv2.IMREAD_UNCHANGED)\n\ncv2.imshow('image', img)\ncv2.setMouseCallback('image', on_mouse, img)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"import cv2\nimport numpy as np\nimport random\nimport os\nos.chdir('C:/Users/PC021/Downloads/05-컴퓨터비전-이미지파일/05-컴퓨터비전-이미지파일')\noldx = oldy = -1\n\n\ndef random_color():\n return tuple(sorted([i for i in range(256)] * 3, key=lambda x: random.\n random())[:3])\n\n\ndef random_size():\n return random.randint(10, 100)\n\n\ndef on_mouse(event, x, y, flags, param):\n global oldx, oldy\n if event == cv2.EVENT_LBUTTONDOWN:\n oldx, oldy = x, y\n elif event == cv2.EVENT_MOUSEMOVE:\n if flags & cv2.EVENT_FLAG_LBUTTON:\n cv2.line(img, (oldx, oldy), (x, y), (0, 0, 255), 4, cv2.LINE_AA)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n elif event == cv2.EVENT_LBUTTONDBLCLK:\n cv2.circle(img, (oldx, oldy), random_size(), random_color(), -1,\n cv2.LINE_4)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n\n\nimg = cv2.imread('images/car.jpg', cv2.IMREAD_UNCHANGED)\ncv2.imshow('image', img)\ncv2.setMouseCallback('image', on_mouse, img)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"<import token>\nos.chdir('C:/Users/PC021/Downloads/05-컴퓨터비전-이미지파일/05-컴퓨터비전-이미지파일')\noldx = oldy = -1\n\n\ndef random_color():\n return tuple(sorted([i for i in range(256)] * 3, key=lambda x: random.\n random())[:3])\n\n\ndef random_size():\n return random.randint(10, 100)\n\n\ndef on_mouse(event, x, y, flags, param):\n global oldx, oldy\n if event == cv2.EVENT_LBUTTONDOWN:\n oldx, oldy = x, y\n elif event == cv2.EVENT_MOUSEMOVE:\n if flags & cv2.EVENT_FLAG_LBUTTON:\n cv2.line(img, (oldx, oldy), (x, y), (0, 0, 255), 4, cv2.LINE_AA)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n elif event == cv2.EVENT_LBUTTONDBLCLK:\n cv2.circle(img, (oldx, oldy), random_size(), random_color(), -1,\n cv2.LINE_4)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n\n\nimg = cv2.imread('images/car.jpg', cv2.IMREAD_UNCHANGED)\ncv2.imshow('image', img)\ncv2.setMouseCallback('image', on_mouse, img)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"<import token>\nos.chdir('C:/Users/PC021/Downloads/05-컴퓨터비전-이미지파일/05-컴퓨터비전-이미지파일')\n<assignment token>\n\n\ndef random_color():\n return tuple(sorted([i for i in range(256)] * 3, key=lambda x: random.\n random())[:3])\n\n\ndef random_size():\n return random.randint(10, 100)\n\n\ndef on_mouse(event, x, y, flags, param):\n global oldx, oldy\n if event == cv2.EVENT_LBUTTONDOWN:\n oldx, oldy = x, y\n elif event == cv2.EVENT_MOUSEMOVE:\n if flags & cv2.EVENT_FLAG_LBUTTON:\n cv2.line(img, (oldx, oldy), (x, y), (0, 0, 255), 4, cv2.LINE_AA)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n elif event == cv2.EVENT_LBUTTONDBLCLK:\n cv2.circle(img, (oldx, oldy), random_size(), random_color(), -1,\n cv2.LINE_4)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n\n\n<assignment token>\ncv2.imshow('image', img)\ncv2.setMouseCallback('image', on_mouse, img)\ncv2.waitKey()\ncv2.destroyAllWindows()\n",
"<import token>\n<code token>\n<assignment token>\n\n\ndef random_color():\n return tuple(sorted([i for i in range(256)] * 3, key=lambda x: random.\n random())[:3])\n\n\ndef random_size():\n return random.randint(10, 100)\n\n\ndef on_mouse(event, x, y, flags, param):\n global oldx, oldy\n if event == cv2.EVENT_LBUTTONDOWN:\n oldx, oldy = x, y\n elif event == cv2.EVENT_MOUSEMOVE:\n if flags & cv2.EVENT_FLAG_LBUTTON:\n cv2.line(img, (oldx, oldy), (x, y), (0, 0, 255), 4, cv2.LINE_AA)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n elif event == cv2.EVENT_LBUTTONDBLCLK:\n cv2.circle(img, (oldx, oldy), random_size(), random_color(), -1,\n cv2.LINE_4)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<function token>\n\n\ndef random_size():\n return random.randint(10, 100)\n\n\ndef on_mouse(event, x, y, flags, param):\n global oldx, oldy\n if event == cv2.EVENT_LBUTTONDOWN:\n oldx, oldy = x, y\n elif event == cv2.EVENT_MOUSEMOVE:\n if flags & cv2.EVENT_FLAG_LBUTTON:\n cv2.line(img, (oldx, oldy), (x, y), (0, 0, 255), 4, cv2.LINE_AA)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n elif event == cv2.EVENT_LBUTTONDBLCLK:\n cv2.circle(img, (oldx, oldy), random_size(), random_color(), -1,\n cv2.LINE_4)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n\n\ndef on_mouse(event, x, y, flags, param):\n global oldx, oldy\n if event == cv2.EVENT_LBUTTONDOWN:\n oldx, oldy = x, y\n elif event == cv2.EVENT_MOUSEMOVE:\n if flags & cv2.EVENT_FLAG_LBUTTON:\n cv2.line(img, (oldx, oldy), (x, y), (0, 0, 255), 4, cv2.LINE_AA)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n elif event == cv2.EVENT_LBUTTONDBLCLK:\n cv2.circle(img, (oldx, oldy), random_size(), random_color(), -1,\n cv2.LINE_4)\n cv2.imshow('image', img)\n oldx, oldy = x, y\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<code token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
99,345 |
20bb22e6e5b0eb85f5836af4896bc31887a103b9
|
import os
import cv2
import glob
import random
import numpy as np
from pathlib import Path
from imutils import paths
from IPython.core.debugger import Tracer
char_dict = {"1": "a", "2": "b", "3": "c", "4": "d", "5": "e", "6": "f", "7": "g", "8": "h", "9": "i", "10": "j",
"11": "k", "12": "l", "13": "m", "14": "n", "15": "o", "16": "p", "17": "q", "18": "r", "19": "s",
"20": "t", "21": "u", "22": "v", "23": "w", "24": "x", "25": "y", "26": "z", "27": "A", "28": "B",
"29": "C", "30": "D", "31": "E", "32": "F", "33": "G", "34": "H", "35": "I", "36": "J", "37": "K",
"38": "L", "39": "M", "40": "N", "41": "O", "42": "P", "43": "Q", "44": "R", "45": "S", "46": "T",
"47": "U", "48": "V", "49": "W", "50": "X", "51": "Y", "52": "Z"}
train_dict = {"1": "a", "2": "b", "3": "c", "4": "d", "5": "e", "6": "f", "7": "g", "8": "h", "9": "i", "10": "j",
"11": "k", "12": "l", "13": "m", "14": "n", "15": "o", "16": "p", "17": "q", "18": "r", "19": "s",
"20": "t", "21": "u", "22": "v", "23": "w", "24": "x", "25": "y", "26": "z", "27": "\'", "28": "-","29":"&"}
img_h = 64
img_w = 400
def text_crop(img, threshold):
'''
切除图像空白边缘部分
'''
ret, image_mask = cv2.threshold(img, threshold, 1, cv2.THRESH_BINARY_INV)
n = np.argwhere(image_mask == 1)
rows = np.unique([n[i][0] for i in range(n.shape[0])])
cols = np.unique([n[i][1] for i in range(n.shape[0])])
min_row = np.min(rows)
max_row = np.max(rows)
min_col = np.min(cols)
max_col = np.max(cols)
image_crop = img[min_row: max_row, min_col: max_col]
return image_crop
def compute_padding_value(img_gray):
'''
计算padding的值
取图像累积直方图中大于0.8处的值
'''
hist = cv2.calcHist([img_gray], [0], None, [256], [0, 256])
cdf_img = np.cumsum(hist)
cdf_hist = cdf_img / np.max(cdf_img)
padding_value = np.min(np.where(cdf_hist > 0.8)[0])
return padding_value
def normalization_h(img):
'''
高度归一化
img shape (32, w)
'''
padding_value = compute_padding_value(img)
h, w = img.shape[:2]
if h >= img_h and w >= img_w:
img_ = cv2.resize(img, (img_w, img_h))
elif h > img_h and w < img_w:
img = cv2.resize(img, (w, img_h))
pad_l = random.randint(0, img_w - w)
img_ = cv2.copyMakeBorder(img, 0, 0, pad_l, img_w - w - pad_l, cv2.BORDER_CONSTANT, value=int(padding_value))
img_ = cv2.resize(img_, (img_w, img_h))
elif h <= img_h and w <= img_w:
pad_top = random.randint(0, img_h - h)
pad_l = random.randint(0, img_w - w)
img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, pad_l, img_w - w - pad_l, cv2.BORDER_CONSTANT, value=int(padding_value))
img_ = cv2.resize(img_, (img_w, img_h))
elif h < img_h and w > img_w:
img = cv2.resize(img, (img_w, h))
pad_top = random.randint(0, img_h - h)
img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, 0, 0, cv2.BORDER_CONSTANT, value=int(padding_value))
img_ = cv2.resize(img_, (img_w, img_h))
return img_
# data augment functions
def data_augment(img, background_path):
# if np.random.random() < 0.15:
# img = blur(img)
if np.random.random() < 0.25:
img = add_noise(img)
if np.random.random() < 0.95:
img = add_background(img, background_path)
return img
def resize_image(img):
img_h, img_w = img.shape[:2]
scale = np.random.uniform(0.8, 1.2, 1)
h = int(img_h * scale)
w = int(img_w * scale)
img_resize = cv2.resize(img, (w, h))
return img_resize
def blur(img):
img = cv2.blur(img, (3, 3))
return img
def add_noise(img):
noise_value = np.random.randint(0, 50)
temp_x = np.random.randint(0, img.shape[0])
temp_y = np.random.randint(0, img.shape[1])
img[temp_x][temp_y] = noise_value
return img
def add_background(img, background_path=None):
'''
添加背景
'''
# file list
bg_images = sorted(glob.glob(os.path.join(background_path, '*.JPEG')))
bg_images += sorted(glob.glob(os.path.join(background_path, '*.jpg')))
bg_images += sorted(glob.glob(os.path.join(background_path, '*.png')))
# 二值化处理
ret, image_gray_binary = cv2.threshold(img, 150, 1, cv2.THRESH_BINARY)
# random choose one background image
bg_img = ''.join(random.sample(bg_images, 1))
bg_image_gray = cv2.imread(bg_img, 0)
# processing blur image
bg_image_gray_resize = cv2.resize(bg_image_gray, (img_w, img_h), interpolation=cv2.INTER_LINEAR)
background_image = cv2.multiply(image_gray_binary, bg_image_gray_resize)
return background_image
if __name__ == '__main__':
image_path = r'E:\datasets\ocr_dataset\words\train3-11'
#background_path = r'E:\datasets\background1'
save_path = os.path.join(r'E:\datasets\ocr_dataset\words\words_data_1')
if not os.path.exists(save_path):
os.mkdir(save_path)
img_list = sorted(list(paths.list_images(image_path)))
file_index_lst = open(r'words_index_lst_1.txt', 'w', encoding='utf-8')
file_chars_lst = open(r'words_chars_lst_1.txt', 'w', encoding='utf-8')
for i, img_path in enumerate(img_list):
label_words = []
img = cv2.imread(img_path)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
norm = normalization_h(img_gray)
#img_aug = data_augment(norm, background_path)
label = img_path.split(os.path.sep)[-1].split('-')[1:]
for w in label:
if '.jpg' not in w:
label_words.append(w)
else:
label_words.append(w[:-4])
label_index = ' '.join(label_words)
label_char = ' '.join([train_dict[p] for p in label_words])
name = save_path + '/' + '%08d' % i + '.png'
cv2.imwrite(name, norm)
file_index_lst.write(name.split(os.path.sep)[-1] + ' ' + label_index + '\n')
file_chars_lst.write(name.split(os.path.sep)[-1] + ' ' + label_char + '\n')
if i % 100 == 0:
print('{} has processed over!'.format(i))
file_index_lst.close()
file_chars_lst.close()
print('=' * 50)
print('All words samples have generated sucessfully!')
|
[
"import os\nimport cv2\nimport glob\nimport random\nimport numpy as np\nfrom pathlib import Path\nfrom imutils import paths\nfrom IPython.core.debugger import Tracer\n\nchar_dict = {\"1\": \"a\", \"2\": \"b\", \"3\": \"c\", \"4\": \"d\", \"5\": \"e\", \"6\": \"f\", \"7\": \"g\", \"8\": \"h\", \"9\": \"i\", \"10\": \"j\",\n \"11\": \"k\", \"12\": \"l\", \"13\": \"m\", \"14\": \"n\", \"15\": \"o\", \"16\": \"p\", \"17\": \"q\", \"18\": \"r\", \"19\": \"s\",\n \"20\": \"t\", \"21\": \"u\", \"22\": \"v\", \"23\": \"w\", \"24\": \"x\", \"25\": \"y\", \"26\": \"z\", \"27\": \"A\", \"28\": \"B\",\n \"29\": \"C\", \"30\": \"D\", \"31\": \"E\", \"32\": \"F\", \"33\": \"G\", \"34\": \"H\", \"35\": \"I\", \"36\": \"J\", \"37\": \"K\",\n \"38\": \"L\", \"39\": \"M\", \"40\": \"N\", \"41\": \"O\", \"42\": \"P\", \"43\": \"Q\", \"44\": \"R\", \"45\": \"S\", \"46\": \"T\",\n \"47\": \"U\", \"48\": \"V\", \"49\": \"W\", \"50\": \"X\", \"51\": \"Y\", \"52\": \"Z\"}\n\ntrain_dict = {\"1\": \"a\", \"2\": \"b\", \"3\": \"c\", \"4\": \"d\", \"5\": \"e\", \"6\": \"f\", \"7\": \"g\", \"8\": \"h\", \"9\": \"i\", \"10\": \"j\",\n \"11\": \"k\", \"12\": \"l\", \"13\": \"m\", \"14\": \"n\", \"15\": \"o\", \"16\": \"p\", \"17\": \"q\", \"18\": \"r\", \"19\": \"s\",\n \"20\": \"t\", \"21\": \"u\", \"22\": \"v\", \"23\": \"w\", \"24\": \"x\", \"25\": \"y\", \"26\": \"z\", \"27\": \"\\'\", \"28\": \"-\",\"29\":\"&\"}\n\nimg_h = 64\nimg_w = 400\n\ndef text_crop(img, threshold):\n '''\n 切除图像空白边缘部分\n '''\n ret, image_mask = cv2.threshold(img, threshold, 1, cv2.THRESH_BINARY_INV)\n n = np.argwhere(image_mask == 1)\n rows = np.unique([n[i][0] for i in range(n.shape[0])])\n cols = np.unique([n[i][1] for i in range(n.shape[0])])\n min_row = np.min(rows)\n max_row = np.max(rows)\n min_col = np.min(cols)\n max_col = np.max(cols)\n\n image_crop = img[min_row: max_row, min_col: max_col]\n return image_crop\n\n\ndef compute_padding_value(img_gray):\n\t'''\n\t计算padding的值\n\t取图像累积直方图中大于0.8处的值\n\t'''\n\thist = cv2.calcHist([img_gray], [0], None, [256], [0, 256])\n\tcdf_img = np.cumsum(hist)\n\tcdf_hist = cdf_img / np.max(cdf_img)\n\tpadding_value = np.min(np.where(cdf_hist > 0.8)[0])\n\t\n\treturn padding_value\n\t\ndef normalization_h(img):\n\t'''\n\t高度归一化\n\timg shape (32, w)\n\t'''\n\tpadding_value = compute_padding_value(img)\n\t\n\th, w = img.shape[:2]\n\n\tif h >= img_h and w >= img_w:\n\t\timg_ = cv2.resize(img, (img_w, img_h))\n\telif h > img_h and w < img_w:\n\t\timg = cv2.resize(img, (w, img_h))\n\t\tpad_l = random.randint(0, img_w - w)\n\t\timg_ = cv2.copyMakeBorder(img, 0, 0, pad_l, img_w - w - pad_l, cv2.BORDER_CONSTANT, value=int(padding_value))\n\t\timg_ = cv2.resize(img_, (img_w, img_h))\n\telif h <= img_h and w <= img_w:\n\t\tpad_top = random.randint(0, img_h - h)\n\t\tpad_l = random.randint(0, img_w - w)\n\t\timg_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, pad_l, img_w - w - pad_l, cv2.BORDER_CONSTANT, value=int(padding_value))\n\t\timg_ = cv2.resize(img_, (img_w, img_h))\n\telif h < img_h and w > img_w:\n\t\timg = cv2.resize(img, (img_w, h))\n\t\tpad_top = random.randint(0, img_h - h)\n\t\timg_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, 0, 0, cv2.BORDER_CONSTANT, value=int(padding_value))\n\t\timg_ = cv2.resize(img_, (img_w, img_h))\n\treturn img_\n\n\n# data augment functions\ndef data_augment(img, background_path):\n\t# if np.random.random() < 0.15:\n\t# \timg = blur(img)\n\tif np.random.random() < 0.25:\n\t\timg = add_noise(img)\n\tif np.random.random() < 0.95:\n\t\timg = add_background(img, background_path)\n\treturn img\n\n\ndef resize_image(img):\n\timg_h, img_w = img.shape[:2]\n\tscale = np.random.uniform(0.8, 1.2, 1)\n\th = int(img_h * scale)\n\tw = int(img_w * scale)\n\timg_resize = cv2.resize(img, (w, h))\n\treturn img_resize\n\n\ndef blur(img):\n\timg = cv2.blur(img, (3, 3))\n\treturn img\n\n\ndef add_noise(img):\n\tnoise_value = np.random.randint(0, 50)\n\ttemp_x = np.random.randint(0, img.shape[0])\n\ttemp_y = np.random.randint(0, img.shape[1])\n\timg[temp_x][temp_y] = noise_value\n\treturn img\n\n\ndef add_background(img, background_path=None):\n\t'''\n\t添加背景\n\t'''\n\t# file list\n\tbg_images = sorted(glob.glob(os.path.join(background_path, '*.JPEG')))\n\tbg_images += sorted(glob.glob(os.path.join(background_path, '*.jpg')))\n\tbg_images += sorted(glob.glob(os.path.join(background_path, '*.png')))\n\t\n\t# 二值化处理\n\tret, image_gray_binary = cv2.threshold(img, 150, 1, cv2.THRESH_BINARY)\n\t\n\t# random choose one background image\n\tbg_img = ''.join(random.sample(bg_images, 1))\n\tbg_image_gray = cv2.imread(bg_img, 0)\n\t\n\t# processing blur image\n\tbg_image_gray_resize = cv2.resize(bg_image_gray, (img_w, img_h), interpolation=cv2.INTER_LINEAR)\n\tbackground_image = cv2.multiply(image_gray_binary, bg_image_gray_resize)\n\treturn background_image\n\n\nif __name__ == '__main__':\n\timage_path = r'E:\\datasets\\ocr_dataset\\words\\train3-11'\n\t#background_path = r'E:\\datasets\\background1'\n\tsave_path = os.path.join(r'E:\\datasets\\ocr_dataset\\words\\words_data_1')\n\t\n\tif not os.path.exists(save_path):\n\t\tos.mkdir(save_path)\n\t\n\timg_list = sorted(list(paths.list_images(image_path)))\n\t\n\tfile_index_lst = open(r'words_index_lst_1.txt', 'w', encoding='utf-8')\n\tfile_chars_lst = open(r'words_chars_lst_1.txt', 'w', encoding='utf-8')\n\t\n\n\tfor i, img_path in enumerate(img_list):\n\t\tlabel_words = []\n\t\timg = cv2.imread(img_path)\n\t\timg_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\tnorm = normalization_h(img_gray)\n\t\t#img_aug = data_augment(norm, background_path)\n\t\tlabel = img_path.split(os.path.sep)[-1].split('-')[1:]\n\t\t\n\t\tfor w in label:\n\t\t\tif '.jpg' not in w:\n\t\t\t\tlabel_words.append(w)\n\t\t\telse:\n\t\t\t\tlabel_words.append(w[:-4])\n\t\t\t\t\n\t\tlabel_index = ' '.join(label_words)\n\t\tlabel_char = ' '.join([train_dict[p] for p in label_words])\n\t\t\n\t\tname = save_path + '/' + '%08d' % i + '.png'\n\t\tcv2.imwrite(name, norm)\n\t\t\n\t\tfile_index_lst.write(name.split(os.path.sep)[-1] + ' ' + label_index + '\\n')\n\t\tfile_chars_lst.write(name.split(os.path.sep)[-1] + ' ' + label_char + '\\n')\n\t\t\n\t\tif i % 100 == 0:\n\t\t\tprint('{} has processed over!'.format(i))\n\t\t\t\n\tfile_index_lst.close()\n\tfile_chars_lst.close()\n\t\n\tprint('=' * 50)\n\tprint('All words samples have generated sucessfully!')\n\t",
"import os\nimport cv2\nimport glob\nimport random\nimport numpy as np\nfrom pathlib import Path\nfrom imutils import paths\nfrom IPython.core.debugger import Tracer\nchar_dict = {'1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f',\n '7': 'g', '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13':\n 'm', '14': 'n', '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's',\n '20': 't', '21': 'u', '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26':\n 'z', '27': 'A', '28': 'B', '29': 'C', '30': 'D', '31': 'E', '32': 'F',\n '33': 'G', '34': 'H', '35': 'I', '36': 'J', '37': 'K', '38': 'L', '39':\n 'M', '40': 'N', '41': 'O', '42': 'P', '43': 'Q', '44': 'R', '45': 'S',\n '46': 'T', '47': 'U', '48': 'V', '49': 'W', '50': 'X', '51': 'Y', '52': 'Z'\n }\ntrain_dict = {'1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f',\n '7': 'g', '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13':\n 'm', '14': 'n', '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's',\n '20': 't', '21': 'u', '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26':\n 'z', '27': \"'\", '28': '-', '29': '&'}\nimg_h = 64\nimg_w = 400\n\n\ndef text_crop(img, threshold):\n \"\"\"\n 切除图像空白边缘部分\n \"\"\"\n ret, image_mask = cv2.threshold(img, threshold, 1, cv2.THRESH_BINARY_INV)\n n = np.argwhere(image_mask == 1)\n rows = np.unique([n[i][0] for i in range(n.shape[0])])\n cols = np.unique([n[i][1] for i in range(n.shape[0])])\n min_row = np.min(rows)\n max_row = np.max(rows)\n min_col = np.min(cols)\n max_col = np.max(cols)\n image_crop = img[min_row:max_row, min_col:max_col]\n return image_crop\n\n\ndef compute_padding_value(img_gray):\n \"\"\"\n\t计算padding的值\n\t取图像累积直方图中大于0.8处的值\n\t\"\"\"\n hist = cv2.calcHist([img_gray], [0], None, [256], [0, 256])\n cdf_img = np.cumsum(hist)\n cdf_hist = cdf_img / np.max(cdf_img)\n padding_value = np.min(np.where(cdf_hist > 0.8)[0])\n return padding_value\n\n\ndef normalization_h(img):\n \"\"\"\n\t高度归一化\n\timg shape (32, w)\n\t\"\"\"\n padding_value = compute_padding_value(img)\n h, w = img.shape[:2]\n if h >= img_h and w >= img_w:\n img_ = cv2.resize(img, (img_w, img_h))\n elif h > img_h and w < img_w:\n img = cv2.resize(img, (w, img_h))\n pad_l = random.randint(0, img_w - w)\n img_ = cv2.copyMakeBorder(img, 0, 0, pad_l, img_w - w - pad_l, cv2.\n BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n elif h <= img_h and w <= img_w:\n pad_top = random.randint(0, img_h - h)\n pad_l = random.randint(0, img_w - w)\n img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, pad_l,\n img_w - w - pad_l, cv2.BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n elif h < img_h and w > img_w:\n img = cv2.resize(img, (img_w, h))\n pad_top = random.randint(0, img_h - h)\n img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, 0, 0,\n cv2.BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n return img_\n\n\ndef data_augment(img, background_path):\n if np.random.random() < 0.25:\n img = add_noise(img)\n if np.random.random() < 0.95:\n img = add_background(img, background_path)\n return img\n\n\ndef resize_image(img):\n img_h, img_w = img.shape[:2]\n scale = np.random.uniform(0.8, 1.2, 1)\n h = int(img_h * scale)\n w = int(img_w * scale)\n img_resize = cv2.resize(img, (w, h))\n return img_resize\n\n\ndef blur(img):\n img = cv2.blur(img, (3, 3))\n return img\n\n\ndef add_noise(img):\n noise_value = np.random.randint(0, 50)\n temp_x = np.random.randint(0, img.shape[0])\n temp_y = np.random.randint(0, img.shape[1])\n img[temp_x][temp_y] = noise_value\n return img\n\n\ndef add_background(img, background_path=None):\n \"\"\"\n\t添加背景\n\t\"\"\"\n bg_images = sorted(glob.glob(os.path.join(background_path, '*.JPEG')))\n bg_images += sorted(glob.glob(os.path.join(background_path, '*.jpg')))\n bg_images += sorted(glob.glob(os.path.join(background_path, '*.png')))\n ret, image_gray_binary = cv2.threshold(img, 150, 1, cv2.THRESH_BINARY)\n bg_img = ''.join(random.sample(bg_images, 1))\n bg_image_gray = cv2.imread(bg_img, 0)\n bg_image_gray_resize = cv2.resize(bg_image_gray, (img_w, img_h),\n interpolation=cv2.INTER_LINEAR)\n background_image = cv2.multiply(image_gray_binary, bg_image_gray_resize)\n return background_image\n\n\nif __name__ == '__main__':\n image_path = 'E:\\\\datasets\\\\ocr_dataset\\\\words\\\\train3-11'\n save_path = os.path.join('E:\\\\datasets\\\\ocr_dataset\\\\words\\\\words_data_1')\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n img_list = sorted(list(paths.list_images(image_path)))\n file_index_lst = open('words_index_lst_1.txt', 'w', encoding='utf-8')\n file_chars_lst = open('words_chars_lst_1.txt', 'w', encoding='utf-8')\n for i, img_path in enumerate(img_list):\n label_words = []\n img = cv2.imread(img_path)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n norm = normalization_h(img_gray)\n label = img_path.split(os.path.sep)[-1].split('-')[1:]\n for w in label:\n if '.jpg' not in w:\n label_words.append(w)\n else:\n label_words.append(w[:-4])\n label_index = ' '.join(label_words)\n label_char = ' '.join([train_dict[p] for p in label_words])\n name = save_path + '/' + '%08d' % i + '.png'\n cv2.imwrite(name, norm)\n file_index_lst.write(name.split(os.path.sep)[-1] + ' ' +\n label_index + '\\n')\n file_chars_lst.write(name.split(os.path.sep)[-1] + ' ' + label_char +\n '\\n')\n if i % 100 == 0:\n print('{} has processed over!'.format(i))\n file_index_lst.close()\n file_chars_lst.close()\n print('=' * 50)\n print('All words samples have generated sucessfully!')\n",
"<import token>\nchar_dict = {'1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f',\n '7': 'g', '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13':\n 'm', '14': 'n', '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's',\n '20': 't', '21': 'u', '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26':\n 'z', '27': 'A', '28': 'B', '29': 'C', '30': 'D', '31': 'E', '32': 'F',\n '33': 'G', '34': 'H', '35': 'I', '36': 'J', '37': 'K', '38': 'L', '39':\n 'M', '40': 'N', '41': 'O', '42': 'P', '43': 'Q', '44': 'R', '45': 'S',\n '46': 'T', '47': 'U', '48': 'V', '49': 'W', '50': 'X', '51': 'Y', '52': 'Z'\n }\ntrain_dict = {'1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f',\n '7': 'g', '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13':\n 'm', '14': 'n', '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's',\n '20': 't', '21': 'u', '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26':\n 'z', '27': \"'\", '28': '-', '29': '&'}\nimg_h = 64\nimg_w = 400\n\n\ndef text_crop(img, threshold):\n \"\"\"\n 切除图像空白边缘部分\n \"\"\"\n ret, image_mask = cv2.threshold(img, threshold, 1, cv2.THRESH_BINARY_INV)\n n = np.argwhere(image_mask == 1)\n rows = np.unique([n[i][0] for i in range(n.shape[0])])\n cols = np.unique([n[i][1] for i in range(n.shape[0])])\n min_row = np.min(rows)\n max_row = np.max(rows)\n min_col = np.min(cols)\n max_col = np.max(cols)\n image_crop = img[min_row:max_row, min_col:max_col]\n return image_crop\n\n\ndef compute_padding_value(img_gray):\n \"\"\"\n\t计算padding的值\n\t取图像累积直方图中大于0.8处的值\n\t\"\"\"\n hist = cv2.calcHist([img_gray], [0], None, [256], [0, 256])\n cdf_img = np.cumsum(hist)\n cdf_hist = cdf_img / np.max(cdf_img)\n padding_value = np.min(np.where(cdf_hist > 0.8)[0])\n return padding_value\n\n\ndef normalization_h(img):\n \"\"\"\n\t高度归一化\n\timg shape (32, w)\n\t\"\"\"\n padding_value = compute_padding_value(img)\n h, w = img.shape[:2]\n if h >= img_h and w >= img_w:\n img_ = cv2.resize(img, (img_w, img_h))\n elif h > img_h and w < img_w:\n img = cv2.resize(img, (w, img_h))\n pad_l = random.randint(0, img_w - w)\n img_ = cv2.copyMakeBorder(img, 0, 0, pad_l, img_w - w - pad_l, cv2.\n BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n elif h <= img_h and w <= img_w:\n pad_top = random.randint(0, img_h - h)\n pad_l = random.randint(0, img_w - w)\n img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, pad_l,\n img_w - w - pad_l, cv2.BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n elif h < img_h and w > img_w:\n img = cv2.resize(img, (img_w, h))\n pad_top = random.randint(0, img_h - h)\n img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, 0, 0,\n cv2.BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n return img_\n\n\ndef data_augment(img, background_path):\n if np.random.random() < 0.25:\n img = add_noise(img)\n if np.random.random() < 0.95:\n img = add_background(img, background_path)\n return img\n\n\ndef resize_image(img):\n img_h, img_w = img.shape[:2]\n scale = np.random.uniform(0.8, 1.2, 1)\n h = int(img_h * scale)\n w = int(img_w * scale)\n img_resize = cv2.resize(img, (w, h))\n return img_resize\n\n\ndef blur(img):\n img = cv2.blur(img, (3, 3))\n return img\n\n\ndef add_noise(img):\n noise_value = np.random.randint(0, 50)\n temp_x = np.random.randint(0, img.shape[0])\n temp_y = np.random.randint(0, img.shape[1])\n img[temp_x][temp_y] = noise_value\n return img\n\n\ndef add_background(img, background_path=None):\n \"\"\"\n\t添加背景\n\t\"\"\"\n bg_images = sorted(glob.glob(os.path.join(background_path, '*.JPEG')))\n bg_images += sorted(glob.glob(os.path.join(background_path, '*.jpg')))\n bg_images += sorted(glob.glob(os.path.join(background_path, '*.png')))\n ret, image_gray_binary = cv2.threshold(img, 150, 1, cv2.THRESH_BINARY)\n bg_img = ''.join(random.sample(bg_images, 1))\n bg_image_gray = cv2.imread(bg_img, 0)\n bg_image_gray_resize = cv2.resize(bg_image_gray, (img_w, img_h),\n interpolation=cv2.INTER_LINEAR)\n background_image = cv2.multiply(image_gray_binary, bg_image_gray_resize)\n return background_image\n\n\nif __name__ == '__main__':\n image_path = 'E:\\\\datasets\\\\ocr_dataset\\\\words\\\\train3-11'\n save_path = os.path.join('E:\\\\datasets\\\\ocr_dataset\\\\words\\\\words_data_1')\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n img_list = sorted(list(paths.list_images(image_path)))\n file_index_lst = open('words_index_lst_1.txt', 'w', encoding='utf-8')\n file_chars_lst = open('words_chars_lst_1.txt', 'w', encoding='utf-8')\n for i, img_path in enumerate(img_list):\n label_words = []\n img = cv2.imread(img_path)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n norm = normalization_h(img_gray)\n label = img_path.split(os.path.sep)[-1].split('-')[1:]\n for w in label:\n if '.jpg' not in w:\n label_words.append(w)\n else:\n label_words.append(w[:-4])\n label_index = ' '.join(label_words)\n label_char = ' '.join([train_dict[p] for p in label_words])\n name = save_path + '/' + '%08d' % i + '.png'\n cv2.imwrite(name, norm)\n file_index_lst.write(name.split(os.path.sep)[-1] + ' ' +\n label_index + '\\n')\n file_chars_lst.write(name.split(os.path.sep)[-1] + ' ' + label_char +\n '\\n')\n if i % 100 == 0:\n print('{} has processed over!'.format(i))\n file_index_lst.close()\n file_chars_lst.close()\n print('=' * 50)\n print('All words samples have generated sucessfully!')\n",
"<import token>\n<assignment token>\n\n\ndef text_crop(img, threshold):\n \"\"\"\n 切除图像空白边缘部分\n \"\"\"\n ret, image_mask = cv2.threshold(img, threshold, 1, cv2.THRESH_BINARY_INV)\n n = np.argwhere(image_mask == 1)\n rows = np.unique([n[i][0] for i in range(n.shape[0])])\n cols = np.unique([n[i][1] for i in range(n.shape[0])])\n min_row = np.min(rows)\n max_row = np.max(rows)\n min_col = np.min(cols)\n max_col = np.max(cols)\n image_crop = img[min_row:max_row, min_col:max_col]\n return image_crop\n\n\ndef compute_padding_value(img_gray):\n \"\"\"\n\t计算padding的值\n\t取图像累积直方图中大于0.8处的值\n\t\"\"\"\n hist = cv2.calcHist([img_gray], [0], None, [256], [0, 256])\n cdf_img = np.cumsum(hist)\n cdf_hist = cdf_img / np.max(cdf_img)\n padding_value = np.min(np.where(cdf_hist > 0.8)[0])\n return padding_value\n\n\ndef normalization_h(img):\n \"\"\"\n\t高度归一化\n\timg shape (32, w)\n\t\"\"\"\n padding_value = compute_padding_value(img)\n h, w = img.shape[:2]\n if h >= img_h and w >= img_w:\n img_ = cv2.resize(img, (img_w, img_h))\n elif h > img_h and w < img_w:\n img = cv2.resize(img, (w, img_h))\n pad_l = random.randint(0, img_w - w)\n img_ = cv2.copyMakeBorder(img, 0, 0, pad_l, img_w - w - pad_l, cv2.\n BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n elif h <= img_h and w <= img_w:\n pad_top = random.randint(0, img_h - h)\n pad_l = random.randint(0, img_w - w)\n img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, pad_l,\n img_w - w - pad_l, cv2.BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n elif h < img_h and w > img_w:\n img = cv2.resize(img, (img_w, h))\n pad_top = random.randint(0, img_h - h)\n img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, 0, 0,\n cv2.BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n return img_\n\n\ndef data_augment(img, background_path):\n if np.random.random() < 0.25:\n img = add_noise(img)\n if np.random.random() < 0.95:\n img = add_background(img, background_path)\n return img\n\n\ndef resize_image(img):\n img_h, img_w = img.shape[:2]\n scale = np.random.uniform(0.8, 1.2, 1)\n h = int(img_h * scale)\n w = int(img_w * scale)\n img_resize = cv2.resize(img, (w, h))\n return img_resize\n\n\ndef blur(img):\n img = cv2.blur(img, (3, 3))\n return img\n\n\ndef add_noise(img):\n noise_value = np.random.randint(0, 50)\n temp_x = np.random.randint(0, img.shape[0])\n temp_y = np.random.randint(0, img.shape[1])\n img[temp_x][temp_y] = noise_value\n return img\n\n\ndef add_background(img, background_path=None):\n \"\"\"\n\t添加背景\n\t\"\"\"\n bg_images = sorted(glob.glob(os.path.join(background_path, '*.JPEG')))\n bg_images += sorted(glob.glob(os.path.join(background_path, '*.jpg')))\n bg_images += sorted(glob.glob(os.path.join(background_path, '*.png')))\n ret, image_gray_binary = cv2.threshold(img, 150, 1, cv2.THRESH_BINARY)\n bg_img = ''.join(random.sample(bg_images, 1))\n bg_image_gray = cv2.imread(bg_img, 0)\n bg_image_gray_resize = cv2.resize(bg_image_gray, (img_w, img_h),\n interpolation=cv2.INTER_LINEAR)\n background_image = cv2.multiply(image_gray_binary, bg_image_gray_resize)\n return background_image\n\n\nif __name__ == '__main__':\n image_path = 'E:\\\\datasets\\\\ocr_dataset\\\\words\\\\train3-11'\n save_path = os.path.join('E:\\\\datasets\\\\ocr_dataset\\\\words\\\\words_data_1')\n if not os.path.exists(save_path):\n os.mkdir(save_path)\n img_list = sorted(list(paths.list_images(image_path)))\n file_index_lst = open('words_index_lst_1.txt', 'w', encoding='utf-8')\n file_chars_lst = open('words_chars_lst_1.txt', 'w', encoding='utf-8')\n for i, img_path in enumerate(img_list):\n label_words = []\n img = cv2.imread(img_path)\n img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n norm = normalization_h(img_gray)\n label = img_path.split(os.path.sep)[-1].split('-')[1:]\n for w in label:\n if '.jpg' not in w:\n label_words.append(w)\n else:\n label_words.append(w[:-4])\n label_index = ' '.join(label_words)\n label_char = ' '.join([train_dict[p] for p in label_words])\n name = save_path + '/' + '%08d' % i + '.png'\n cv2.imwrite(name, norm)\n file_index_lst.write(name.split(os.path.sep)[-1] + ' ' +\n label_index + '\\n')\n file_chars_lst.write(name.split(os.path.sep)[-1] + ' ' + label_char +\n '\\n')\n if i % 100 == 0:\n print('{} has processed over!'.format(i))\n file_index_lst.close()\n file_chars_lst.close()\n print('=' * 50)\n print('All words samples have generated sucessfully!')\n",
"<import token>\n<assignment token>\n\n\ndef text_crop(img, threshold):\n \"\"\"\n 切除图像空白边缘部分\n \"\"\"\n ret, image_mask = cv2.threshold(img, threshold, 1, cv2.THRESH_BINARY_INV)\n n = np.argwhere(image_mask == 1)\n rows = np.unique([n[i][0] for i in range(n.shape[0])])\n cols = np.unique([n[i][1] for i in range(n.shape[0])])\n min_row = np.min(rows)\n max_row = np.max(rows)\n min_col = np.min(cols)\n max_col = np.max(cols)\n image_crop = img[min_row:max_row, min_col:max_col]\n return image_crop\n\n\ndef compute_padding_value(img_gray):\n \"\"\"\n\t计算padding的值\n\t取图像累积直方图中大于0.8处的值\n\t\"\"\"\n hist = cv2.calcHist([img_gray], [0], None, [256], [0, 256])\n cdf_img = np.cumsum(hist)\n cdf_hist = cdf_img / np.max(cdf_img)\n padding_value = np.min(np.where(cdf_hist > 0.8)[0])\n return padding_value\n\n\ndef normalization_h(img):\n \"\"\"\n\t高度归一化\n\timg shape (32, w)\n\t\"\"\"\n padding_value = compute_padding_value(img)\n h, w = img.shape[:2]\n if h >= img_h and w >= img_w:\n img_ = cv2.resize(img, (img_w, img_h))\n elif h > img_h and w < img_w:\n img = cv2.resize(img, (w, img_h))\n pad_l = random.randint(0, img_w - w)\n img_ = cv2.copyMakeBorder(img, 0, 0, pad_l, img_w - w - pad_l, cv2.\n BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n elif h <= img_h and w <= img_w:\n pad_top = random.randint(0, img_h - h)\n pad_l = random.randint(0, img_w - w)\n img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, pad_l,\n img_w - w - pad_l, cv2.BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n elif h < img_h and w > img_w:\n img = cv2.resize(img, (img_w, h))\n pad_top = random.randint(0, img_h - h)\n img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, 0, 0,\n cv2.BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n return img_\n\n\ndef data_augment(img, background_path):\n if np.random.random() < 0.25:\n img = add_noise(img)\n if np.random.random() < 0.95:\n img = add_background(img, background_path)\n return img\n\n\ndef resize_image(img):\n img_h, img_w = img.shape[:2]\n scale = np.random.uniform(0.8, 1.2, 1)\n h = int(img_h * scale)\n w = int(img_w * scale)\n img_resize = cv2.resize(img, (w, h))\n return img_resize\n\n\ndef blur(img):\n img = cv2.blur(img, (3, 3))\n return img\n\n\ndef add_noise(img):\n noise_value = np.random.randint(0, 50)\n temp_x = np.random.randint(0, img.shape[0])\n temp_y = np.random.randint(0, img.shape[1])\n img[temp_x][temp_y] = noise_value\n return img\n\n\ndef add_background(img, background_path=None):\n \"\"\"\n\t添加背景\n\t\"\"\"\n bg_images = sorted(glob.glob(os.path.join(background_path, '*.JPEG')))\n bg_images += sorted(glob.glob(os.path.join(background_path, '*.jpg')))\n bg_images += sorted(glob.glob(os.path.join(background_path, '*.png')))\n ret, image_gray_binary = cv2.threshold(img, 150, 1, cv2.THRESH_BINARY)\n bg_img = ''.join(random.sample(bg_images, 1))\n bg_image_gray = cv2.imread(bg_img, 0)\n bg_image_gray_resize = cv2.resize(bg_image_gray, (img_w, img_h),\n interpolation=cv2.INTER_LINEAR)\n background_image = cv2.multiply(image_gray_binary, bg_image_gray_resize)\n return background_image\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef text_crop(img, threshold):\n \"\"\"\n 切除图像空白边缘部分\n \"\"\"\n ret, image_mask = cv2.threshold(img, threshold, 1, cv2.THRESH_BINARY_INV)\n n = np.argwhere(image_mask == 1)\n rows = np.unique([n[i][0] for i in range(n.shape[0])])\n cols = np.unique([n[i][1] for i in range(n.shape[0])])\n min_row = np.min(rows)\n max_row = np.max(rows)\n min_col = np.min(cols)\n max_col = np.max(cols)\n image_crop = img[min_row:max_row, min_col:max_col]\n return image_crop\n\n\ndef compute_padding_value(img_gray):\n \"\"\"\n\t计算padding的值\n\t取图像累积直方图中大于0.8处的值\n\t\"\"\"\n hist = cv2.calcHist([img_gray], [0], None, [256], [0, 256])\n cdf_img = np.cumsum(hist)\n cdf_hist = cdf_img / np.max(cdf_img)\n padding_value = np.min(np.where(cdf_hist > 0.8)[0])\n return padding_value\n\n\ndef normalization_h(img):\n \"\"\"\n\t高度归一化\n\timg shape (32, w)\n\t\"\"\"\n padding_value = compute_padding_value(img)\n h, w = img.shape[:2]\n if h >= img_h and w >= img_w:\n img_ = cv2.resize(img, (img_w, img_h))\n elif h > img_h and w < img_w:\n img = cv2.resize(img, (w, img_h))\n pad_l = random.randint(0, img_w - w)\n img_ = cv2.copyMakeBorder(img, 0, 0, pad_l, img_w - w - pad_l, cv2.\n BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n elif h <= img_h and w <= img_w:\n pad_top = random.randint(0, img_h - h)\n pad_l = random.randint(0, img_w - w)\n img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, pad_l,\n img_w - w - pad_l, cv2.BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n elif h < img_h and w > img_w:\n img = cv2.resize(img, (img_w, h))\n pad_top = random.randint(0, img_h - h)\n img_ = cv2.copyMakeBorder(img, pad_top, img_h - h - pad_top, 0, 0,\n cv2.BORDER_CONSTANT, value=int(padding_value))\n img_ = cv2.resize(img_, (img_w, img_h))\n return img_\n\n\ndef data_augment(img, background_path):\n if np.random.random() < 0.25:\n img = add_noise(img)\n if np.random.random() < 0.95:\n img = add_background(img, background_path)\n return img\n\n\ndef resize_image(img):\n img_h, img_w = img.shape[:2]\n scale = np.random.uniform(0.8, 1.2, 1)\n h = int(img_h * scale)\n w = int(img_w * scale)\n img_resize = cv2.resize(img, (w, h))\n return img_resize\n\n\ndef blur(img):\n img = cv2.blur(img, (3, 3))\n return img\n\n\ndef add_noise(img):\n noise_value = np.random.randint(0, 50)\n temp_x = np.random.randint(0, img.shape[0])\n temp_y = np.random.randint(0, img.shape[1])\n img[temp_x][temp_y] = noise_value\n return img\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef text_crop(img, threshold):\n \"\"\"\n 切除图像空白边缘部分\n \"\"\"\n ret, image_mask = cv2.threshold(img, threshold, 1, cv2.THRESH_BINARY_INV)\n n = np.argwhere(image_mask == 1)\n rows = np.unique([n[i][0] for i in range(n.shape[0])])\n cols = np.unique([n[i][1] for i in range(n.shape[0])])\n min_row = np.min(rows)\n max_row = np.max(rows)\n min_col = np.min(cols)\n max_col = np.max(cols)\n image_crop = img[min_row:max_row, min_col:max_col]\n return image_crop\n\n\ndef compute_padding_value(img_gray):\n \"\"\"\n\t计算padding的值\n\t取图像累积直方图中大于0.8处的值\n\t\"\"\"\n hist = cv2.calcHist([img_gray], [0], None, [256], [0, 256])\n cdf_img = np.cumsum(hist)\n cdf_hist = cdf_img / np.max(cdf_img)\n padding_value = np.min(np.where(cdf_hist > 0.8)[0])\n return padding_value\n\n\n<function token>\n\n\ndef data_augment(img, background_path):\n if np.random.random() < 0.25:\n img = add_noise(img)\n if np.random.random() < 0.95:\n img = add_background(img, background_path)\n return img\n\n\ndef resize_image(img):\n img_h, img_w = img.shape[:2]\n scale = np.random.uniform(0.8, 1.2, 1)\n h = int(img_h * scale)\n w = int(img_w * scale)\n img_resize = cv2.resize(img, (w, h))\n return img_resize\n\n\ndef blur(img):\n img = cv2.blur(img, (3, 3))\n return img\n\n\ndef add_noise(img):\n noise_value = np.random.randint(0, 50)\n temp_x = np.random.randint(0, img.shape[0])\n temp_y = np.random.randint(0, img.shape[1])\n img[temp_x][temp_y] = noise_value\n return img\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef text_crop(img, threshold):\n \"\"\"\n 切除图像空白边缘部分\n \"\"\"\n ret, image_mask = cv2.threshold(img, threshold, 1, cv2.THRESH_BINARY_INV)\n n = np.argwhere(image_mask == 1)\n rows = np.unique([n[i][0] for i in range(n.shape[0])])\n cols = np.unique([n[i][1] for i in range(n.shape[0])])\n min_row = np.min(rows)\n max_row = np.max(rows)\n min_col = np.min(cols)\n max_col = np.max(cols)\n image_crop = img[min_row:max_row, min_col:max_col]\n return image_crop\n\n\ndef compute_padding_value(img_gray):\n \"\"\"\n\t计算padding的值\n\t取图像累积直方图中大于0.8处的值\n\t\"\"\"\n hist = cv2.calcHist([img_gray], [0], None, [256], [0, 256])\n cdf_img = np.cumsum(hist)\n cdf_hist = cdf_img / np.max(cdf_img)\n padding_value = np.min(np.where(cdf_hist > 0.8)[0])\n return padding_value\n\n\n<function token>\n\n\ndef data_augment(img, background_path):\n if np.random.random() < 0.25:\n img = add_noise(img)\n if np.random.random() < 0.95:\n img = add_background(img, background_path)\n return img\n\n\ndef resize_image(img):\n img_h, img_w = img.shape[:2]\n scale = np.random.uniform(0.8, 1.2, 1)\n h = int(img_h * scale)\n w = int(img_w * scale)\n img_resize = cv2.resize(img, (w, h))\n return img_resize\n\n\n<function token>\n\n\ndef add_noise(img):\n noise_value = np.random.randint(0, 50)\n temp_x = np.random.randint(0, img.shape[0])\n temp_y = np.random.randint(0, img.shape[1])\n img[temp_x][temp_y] = noise_value\n return img\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef text_crop(img, threshold):\n \"\"\"\n 切除图像空白边缘部分\n \"\"\"\n ret, image_mask = cv2.threshold(img, threshold, 1, cv2.THRESH_BINARY_INV)\n n = np.argwhere(image_mask == 1)\n rows = np.unique([n[i][0] for i in range(n.shape[0])])\n cols = np.unique([n[i][1] for i in range(n.shape[0])])\n min_row = np.min(rows)\n max_row = np.max(rows)\n min_col = np.min(cols)\n max_col = np.max(cols)\n image_crop = img[min_row:max_row, min_col:max_col]\n return image_crop\n\n\n<function token>\n<function token>\n\n\ndef data_augment(img, background_path):\n if np.random.random() < 0.25:\n img = add_noise(img)\n if np.random.random() < 0.95:\n img = add_background(img, background_path)\n return img\n\n\ndef resize_image(img):\n img_h, img_w = img.shape[:2]\n scale = np.random.uniform(0.8, 1.2, 1)\n h = int(img_h * scale)\n w = int(img_w * scale)\n img_resize = cv2.resize(img, (w, h))\n return img_resize\n\n\n<function token>\n\n\ndef add_noise(img):\n noise_value = np.random.randint(0, 50)\n temp_x = np.random.randint(0, img.shape[0])\n temp_y = np.random.randint(0, img.shape[1])\n img[temp_x][temp_y] = noise_value\n return img\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\ndef text_crop(img, threshold):\n \"\"\"\n 切除图像空白边缘部分\n \"\"\"\n ret, image_mask = cv2.threshold(img, threshold, 1, cv2.THRESH_BINARY_INV)\n n = np.argwhere(image_mask == 1)\n rows = np.unique([n[i][0] for i in range(n.shape[0])])\n cols = np.unique([n[i][1] for i in range(n.shape[0])])\n min_row = np.min(rows)\n max_row = np.max(rows)\n min_col = np.min(cols)\n max_col = np.max(cols)\n image_crop = img[min_row:max_row, min_col:max_col]\n return image_crop\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef resize_image(img):\n img_h, img_w = img.shape[:2]\n scale = np.random.uniform(0.8, 1.2, 1)\n h = int(img_h * scale)\n w = int(img_w * scale)\n img_resize = cv2.resize(img, (w, h))\n return img_resize\n\n\n<function token>\n\n\ndef add_noise(img):\n noise_value = np.random.randint(0, 50)\n temp_x = np.random.randint(0, img.shape[0])\n temp_y = np.random.randint(0, img.shape[1])\n img[temp_x][temp_y] = noise_value\n return img\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef resize_image(img):\n img_h, img_w = img.shape[:2]\n scale = np.random.uniform(0.8, 1.2, 1)\n h = int(img_h * scale)\n w = int(img_w * scale)\n img_resize = cv2.resize(img, (w, h))\n return img_resize\n\n\n<function token>\n\n\ndef add_noise(img):\n noise_value = np.random.randint(0, 50)\n temp_x = np.random.randint(0, img.shape[0])\n temp_y = np.random.randint(0, img.shape[1])\n img[temp_x][temp_y] = noise_value\n return img\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef resize_image(img):\n img_h, img_w = img.shape[:2]\n scale = np.random.uniform(0.8, 1.2, 1)\n h = int(img_h * scale)\n w = int(img_w * scale)\n img_resize = cv2.resize(img, (w, h))\n return img_resize\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,346 |
3577e5bb2d2f81f50030a6de182b327b7f9e2e39
|
tup=('a', 'b', 'c')
str1 = ','.join(str(v) for v in tup)
print(str1)
str2 = ''
print(str2[:-1])
|
[
"\ntup=('a', 'b', 'c')\nstr1 = ','.join(str(v) for v in tup)\nprint(str1)\n\nstr2 = ''\nprint(str2[:-1])",
"tup = 'a', 'b', 'c'\nstr1 = ','.join(str(v) for v in tup)\nprint(str1)\nstr2 = ''\nprint(str2[:-1])\n",
"<assignment token>\nprint(str1)\n<assignment token>\nprint(str2[:-1])\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,347 |
969c346b5240dd66bbca0be23542d04154b533e9
|
import dbaccess as dba
import dataProcessing as dp
import numpy as np
import time
import colorsys
import copy
from types import *
colLabels = ['Lvl', 'Branch', 'Total', 'Succ.', 'Fail', 'Graph']
treeRootNodeName = 'All analyses'
treeLevelParameterNames = [
'crackRatio',
'analysisType',
'modelType',
'elements']
class TreeNode(object):
def __init__(self, name):
self.name = name
self.parent = None
self.children = []
self.failedMembers = set()
self.successfulMembers = set()
self.xmark = 0
self.barWidth = 0
self.legendName = ''
self.faceColor = '0.9'
self.edgeColor = '0.0'
self.hueRange = []
self.currentMarker = '<--'
self.cols = colLabels
def setParent(self, parent):
if (isinstance(parent, NoneType) or
isinstance(parent, TreeNode)):
self.parent = parent
else:
raise TypeError('parent must be wither NoneType or TreeNode')
def setChild(self, child):
if isinstance(child, TreeNode):
self.children.append(child)
self.sortChildren()
else:
raise TypeError('child must be wither NoneType or TreeNode')
def sortChildren(self):
try:
self.children.sort(key=lambda k: float(k.getName()))
except ValueError:
self.children.sort(key=lambda k: k.getName())
def addMembers(self, members, memberType):
assert memberType in ['successful', 'failed']
assert isinstance(
members, (NoneType, str, list, tuple, set, frozenset))
setsDict = {'successful': self.successfulMembers,
'failed': self.failedMembers}
if isinstance(members, NoneType):
pass
elif isinstance(members, str):
setsDict[memberType].add(members)
else:
for m in members:
setsDict[memberType].add(m)
def addFailedMember(self, member):
# self.failedMembers.add(member)
self.addMembers(member, 'failed')
def addSuccessfulMember(self, member):
# self.successfulMembers.add(member)
self.addMembers(member, 'successful')
def setXMark(self, mark):
self.xmark = mark
def setBarWidth(self, width):
self.barWidth = width
def setLegendName(self, name):
self.legendName = name
def setFaceColor(self, color):
self.faceColor = color
def setEdgeColor(self, color):
self.edgeColor = color
def setHueRange(self, hueRange):
self.hueRange = hueRange
def addMember(self, entryObj):
key = entryObj.getEntryKey()
if entryObj.getAnalysisSuccess():
self.addSuccessfulMember(key)
else:
self.addFailedMember(key)
def assignMemberAsFailed(self, simId, printChanges=True, rowlen=80):
assert isinstance(simId, str)
root = self.getRootNode()
leaves = self.getChildLeafNodes(root)
for l in leaves:
if simId in l.successfulMembers:
l.successfulMembers.remove(simId)
l.failedMembers.add(simId)
if printChanges:
l.printNode(l, rowlen)
return 1
if simId in l.failedMembers:
pass
return 0
def getParent(self):
return self.parent
def getChildren(self):
return self.children
def getName(self):
return self.name
def getChildLeafNodes(self, node):
stack = [node]
leaves = []
while len(stack) > 0:
tmp = stack.pop()
if tmp.getChildren() == [] and tmp != node:
leaves.append(tmp)
stack = tmp.getChildren() + stack
return leaves
def getSuccessfulMembers(self):
def getSM(node):
if node.getChildren() == []:
return node.successfulMembers
else:
ch = node.getChildren()
return frozenset().union(*[getSM(c) for c in ch])
return copy.deepcopy(getSM(self))
def getFailedMembers(self):
def getSM(node):
if node.getChildren() == []:
return node.failedMembers
else:
ch = node.getChildren()
return frozenset().union(*[getSM(c) for c in ch])
return copy.deepcopy(getSM(self))
def getAllMembers(self):
return self.getFailedMembers() | self.getSuccessfulMembers()
def getXMark(self):
return self.xmark
def getBarWidth(self):
return self.barWidth
def getFaceColor(self):
return self.faceColor
def getEdgeColor(self):
return self.edgeColor
def getHueRange(self):
return self.hueRange
def getLegendName(self):
return self.legendName
def getRootNode(self):
root = self
while root.getParent():
root = root.getParent()
return root
def hasChildNode(self, nodeName):
for child in self.getChildren():
if child.getName() == nodeName:
return child
return False
def getNodeLevelInTree(self):
if self.getParent():
return 1 + self.getParent().getNodeLevelInTree()
else:
return 0
def getNodeLevel(self, node):
path = tracePath(node)
return len(path) - 1
def getChildrenOfBranch(self, branchNames):
return self.getTreeBranch(branchNames).getChildren()
def getTreeBranch(self, branchNames):
stack = [self.getRootNode()]
nodes = []
ind1 = -(len(branchNames))
while len(stack) > 0:
tmp = stack.pop()
path = tracePath(tmp)
nodeNames = [a.getName() for a in path]
if nodeNames[ind1:] == branchNames:
nodes.append(tmp)
stack = tmp.getChildren() + stack
if len(nodes) == 1:
return nodes[0]
elif len(nodes) > 1:
raise KeyError(
'{0} is ambiguous. Corresponds to more than one node.'.format(
branchNames))
else:
raise KeyError('{0} not in the tree'.format(branchNames))
def countNumberOfTreeLevels(self):
maxLevel = 0
stack = [self.getRootNode()]
while len(stack) > 0:
tmp = stack.pop()
lvl = self.getNodeLevel(tmp)
if lvl > maxLevel:
maxLevel = lvl
stack = tmp.getChildren() + stack
return maxLevel
def countMaxNodeNameLength(self):
maxLen = 0
stack = [self.getRootNode()]
while len(stack) > 0:
tmp = stack.pop()
name = self.createNameStr(tmp)
if len(name) + 1 > maxLen:
maxLen = len(name) + 1
stack = tmp.getChildren() + stack
return maxLen
def getMemberCounts(self, node):
tot, succ, failed = 0, 0, 0
succ = len(node.getSuccessfulMembers())
failed = len(node.getFailedMembers())
tot = succ + failed
return [tot, succ, failed]
def getMaxMemberCounts(self):
return self.getMemberCounts(self.getRootNode())
def calcColumnsLength(self, rowlen):
lengths = [self.countNumberOfTreeLevels(), self.countMaxNodeNameLength(
)] + [len(str(a)) for a in self.getMaxMemberCounts()]
for i in range(len(self.cols) - 1):
if len(self.cols[i]) > lengths[i]:
lengths[i] = len(self.cols[i])
lengths.append(rowlen - sum(lengths))
return lengths
def printTitle(self, rowlen):
row = ''
sep = ''
lens = self.calcColumnsLength(rowlen)
for i in range(len(self.cols)):
colStr = self.createAlignedColStr(
self.cols[i], lens[i], 'center')
row = row + '|' + colStr
sep = sep + '|' + lens[i] * '-'
print row
print sep
def createAlignedColStr(self, value, colLen, align):
assert align in ['left', 'center', 'right']
vl = len(str(value))
if align == 'center':
f = (colLen - vl) / 2
b = colLen - vl - f
elif align == 'right':
b = 1
f = colLen - vl - b
elif align == 'left':
f = 0
b = colLen - vl - f
colStr = f * ' ' + str(value) + b * ' '
return colStr
def createNameStr(self, node):
level = self.getNodeLevel(node)
isCurrent = (self == node)
nodeName = str(node.getName())
nameStr = level * '-' + ' ' + nodeName
if isCurrent:
nameStr = nameStr + ' ' + self.currentMarker
return nameStr
def createBarGraph(self, node, length):
mt, ms, mf = self.getMaxMemberCounts()
t, s, f = self.getMemberCounts(node)
l = (length - 2) * float(t) / mt
plen = int(l * s / t)
mlen = int(l - plen)
blanks = int(l - (plen + mlen))
return '[' + plen * '+' + mlen * '-' + blanks * ' ' + ']'
def printNode(self, node, rowlen):
lens = self.calcColumnsLength(rowlen)
row = ''
total, succ, failed = self.getMemberCounts(node)
ncols = [self.getNodeLevel(node), self.createNameStr(node),
total, succ, failed]
alignment = ['right', 'left', 'right', 'right', 'right']
for i in range(len(ncols)):
row = row + '|' + self.createAlignedColStr(
ncols[i], lens[i], alignment[i])
row = row + '|' + self.createBarGraph(node, lens[-1])
print row
def printStats2(self, rowlen=80):
self.printTitle(rowlen)
path = tracePath(self)
for node in path:
if node is not self:
self.printNode(node, rowlen)
else:
break
stack = [self]
while len(stack) > 0:
tmp = stack.pop()
self.printNode(tmp, rowlen)
stack = stack + tmp.getChildren()
def printStructure(self):
root = self.getRootNode()
stack = [root]
while len(stack) > 0:
print generateNodePrStr(stack[0], stack[0] is self)
temp = stack.pop(0)
stack = temp.getChildren() + stack
def __eq__(self, other):
assert isinstance(self, type(other))
return self.getName() == other.getName()
def __str__(self):
return self.name
def printStats(self, maxChars=80):
root = self.getRootNode()
maxLen = 0
stack = [root]
while len(stack) > 0:
nodePrStr = generateNodePrStr(stack[0], stack[0] is self)
if len(nodePrStr) > maxLen:
maxLen = len(nodePrStr)
temp = stack.pop(0)
stack = temp.getChildren() + stack
print genNodePrintStrWithBar(
root, root, root is self, maxLen, maxChars)
for node in root.getChildren():
stack = [node]
while len(stack) > 0:
print genNodePrintStrWithBar(
stack[0], node, stack[0] is self, maxLen, maxChars)
temp = stack.pop(0)
stack = temp.getChildren() + stack
def createTreeFromDbKeys(dbKeys):
root = TreeNode(treeRootNodeName)
for key in dbKeys:
parent = root
anDataObj = dp.AnalysisData(key)
for tlevel in treeLevelParameterNames:
nodeName = anDataObj.getParameter(tlevel)
node = parent.hasChildNode(nodeName)
if not node:
node = TreeNode(nodeName)
node.setParent(parent)
parent.setChild(node)
if tlevel == treeLevelParameterNames[-1]:
node.addMember(anDataObj)
parent = node
return root
def nodesPerLevel(root):
stack = [root]
levelNodes = {}
while len(stack) > 0:
level = stack[0].getNodeLevelInTree()
if level not in levelNodes.keys():
levelNodes[level] = set()
levelNodes[level].add(stack[0])
temp = stack.pop(0)
stack = stack + temp.getChildren()
return levelNodes
def tracePath(node, limitLevel=0):
def getPathToRoot(node):
if not node.getParent():
return [node]
else:
return getPathToRoot(node.getParent()) + [node]
path = getPathToRoot(node)
if (limitLevel <= len(path) and limitLevel >= 0) or limitLevel is None:
return path[limitLevel:]
else:
raise IndexError(
'limitLevel argument must be >= 0 and <= {0}'.format(
len(path)))
def createTreeOfKeys(root):
leaves = nodesPerLevel(root)
leaves = leaves[max(leaves.keys())]
nroot = TreeNode('analyses')
for leaf in leaves:
path = tracePath(leaf, 2)
parent = nroot
for node in path:
if node.getName() not in [a.getName()
for a in parent.getChildren()]:
newNode = TreeNode(node.getName())
newNode.setParent(parent)
parent.setChild(newNode)
for n in parent.getChildren():
if n == node:
parent = n
return nroot
def maxNodesPerLevel(root):
maxChildren = {0: 1}
stack = [root]
while len(stack) > 0:
level = stack[0].getNodeLevelInTree() + 1
if len(stack[0].getChildren()) > maxChildren.get(level, 0):
maxChildren[level] = len(stack[0].getChildren())
temp = stack.pop(0)
stack = stack + temp.getChildren()
return maxChildren
def nodeNamesPerLevel(root):
levelNodes = nodesPerLevel(root)
namedNodes = {}
for key in levelNodes.keys():
nodes = list(levelNodes[key])
namedNodes[key] = set()
for node in nodes:
namedNodes[key].add(node.getName())
for key in namedNodes.keys():
namedNodes[key] = sorted(namedNodes[key])
return namedNodes
def generateNodePrStr(node, current):
level = node.getNodeLevelInTree()
if level < 9:
number = ' ' + str(level)
elif level > 9 and level < 99:
number = ' ' + str(level)
else:
number = str(level)
branch = level * ' ' + '|' + '-'
branch = '|' + level * '-'
if current:
nodeName = node.getName() + ' <--'
else:
nodeName = node.getName()
return "{0} {1} {2}".format(number, branch, nodeName)
def genNodePrintStrWithBar(
node, root, current, maxStrLen, maxChars):
if (len(node.getSuccessfulMembers()) +
len(node.getFailedMembers()) > 0):
barLength = maxChars - maxStrLen - 3
s, f, b = calcNodeBarNumbers(node, root, barLength)
nodeStr = generateNodePrStr(node, current)
blankSpace = maxChars - len(nodeStr) - s - f - b - 2
nps = '{0}{1}[{2}{3}{4}]'.format(
nodeStr, blankSpace * ' ', s * '+', f * '-', b * ' ')
return nps
else:
return generateNodePrStr(node, current)
def calcNodeBarNumbers(node, root, barLength):
nsm = len(node.getSuccessfulMembers())
nfm = len(node.getFailedMembers())
totm = (len(root.getSuccessfulMembers()) +
len(root.getFailedMembers()))
barUnitLen = barLength / float(totm)
totBarUnits = int(round(barUnitLen * (nsm + nfm)))
sBarUnits = int(round(barUnitLen * nsm))
fBarUnits = totBarUnits - sBarUnits
blankBarUnits = barLength - totBarUnits
return sBarUnits, fBarUnits, blankBarUnits
def calcBarWidth(node, refTree,
ulen=1.0, relPad=0.05, root=None, tlevelIncrement=1):
if not root:
root = node.getRootNode()
if node is not root:
maxNodes = maxNodesPerLevel(refTree)
nodeLevel = node.getNodeLevelInTree()
numNodes = maxNodes[nodeLevel - tlevelIncrement]
ulen = node.getParent().getBarWidth()
barWidth = (1 - (numNodes + 1) * relPad) * ulen / numNodes
else:
barWidth = (1 - 2 * relPad) * ulen
node.setBarWidth(barWidth)
def getRefSiblingsOfNode(node, refTree):
candidates = []
stack = [refTree]
while len(stack) > 0:
if stack[0] == node:
candidates.append(stack[0])
temp = stack.pop(0)
stack = temp.getChildren() + stack
parent = node.getParent()
for c in candidates:
if ((parent == c.getParent()) or
(c.getParent() is refTree)):
return c.getParent().getChildren(), c
def calcXMark(node, refTree):
parent = node.getParent()
pxmark = parent.getXMark()
refSiblings, rs = getRefSiblingsOfNode(node, refTree)
index = refSiblings.index(rs)
n = len(refSiblings)
pbw = parent.getBarWidth()
a = node.getBarWidth()
b = (pbw - a * n) / float(n + 1)
c = pxmark + b * (index + 1) + a * index
node.setXMark(c)
def assignBarWidthsAndMarks(root, refTree, ulen=1.0, relPad=0.05):
valNodes = root.getChildren()
count = 0
for node in valNodes:
stack = [node]
while len(stack) > 0:
calcBarWidth(stack[0], refTree,
ulen, relPad, node, 1)
if stack[0] is node:
stack[0].setXMark((count + relPad) * ulen)
else:
calcXMark(stack[0], refTree)
temp = stack.pop(0)
stack = stack + temp.getChildren()
count += 1
def setLegendName(node):
if node is node.getRootNode():
return None
name = node.getName()
analyses = ['FEM', 'XFEM']
elements = ['LinearTet', 'LinearRI', 'LinearFI']
types = {
'crackPartition': 'CP - xfem',
'multiplePartitions': 'MP - xfem', 'simple': 'S - xfem',
'elliptic': 'Elliptic tr.', 'simpleScale': 'Scale tr.'}
if name in analyses:
node.setLegendName('{0} - {1}'.format(name, 'analyses'))
elif name in elements:
n2 = node.getParent().getParent().getName()
node.setLegendName('{0} - {1}'.format(name, n2))
elif name in types.keys():
node.setLegendName(types[name])
elif node.getParent() == node.getRootNode():
node.setLegendName('All analyses')
def assignLegendNames(root):
stack = [root]
while len(stack) > 0:
setLegendName(stack[0])
temp = stack.pop(0)
stack = stack + temp.getChildren()
def setColorForNode(node, refNode, refTree):
level = node.getNodeLevelInTree() - refNode.getNodeLevelInTree()
n = 1000
if node is refNode:
hueRange = list(range(n))
else:
refSiblings, rc = getRefSiblingsOfNode(node, refTree)
lrs = len(refSiblings)
hueRange = node.getParent().getHueRange()
start = len(hueRange) / lrs * refSiblings.index(rc)
end = len(hueRange) / lrs * (1 + refSiblings.index(rc))
hueRange = hueRange[start:end]
h = hueRange[int(len(hueRange) / 2)] / float(n)
s = 1.0 - 1 / float(1 + level)
v = 0.9 / float(level + 1)
node.setHueRange(hueRange)
rgb = colorsys.hsv_to_rgb(h, s, v)
node.setFaceColor(rgb)
if node is refNode:
rgb = colorsys.hsv_to_rgb(h, 0., 0.)
else:
rgb = colorsys.hsv_to_rgb(h, 1.0, 1.0)
node.setEdgeColor(rgb)
def barPlot(root, refTree, fig):
ax = fig.add_subplot(111)
bars = {}
totals = []
count = 1
for node in root.getChildren():
stack = [node]
cc = 1
totals.append(node.getName())
while len(stack) > 0:
color = str(0.9 / cc)
setColorForNode(stack[0], node, refTree)
color = stack[0].getFaceColor()
ec_color = stack[0].getEdgeColor()
bars[stack[0].getLegendName()] = ax.bar(
stack[0].getXMark(),
len(stack[0].getSuccessfulMembers()),
width=stack[0].getBarWidth(),
color=color,
ec=ec_color)
bars['Errors'] = ax.bar(
stack[0].getXMark(),
len(stack[0].getFailedMembers()),
width=stack[0].getBarWidth(),
bottom=len(stack[0].getSuccessfulMembers()),
color='DarkRed',
ec='red')
temp = stack.pop(0)
stack = stack + temp.getChildren()
cc += 1
ax.legend([bars[k] for k in sorted(bars.keys())], sorted(bars.keys()),
bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0)
ax.set_xticks([0.5 + i for i in range(len(totals))])
ax.set_xticklabels(totals)
ax.set_xlabel('Crack ratio')
ax.set_ylabel('Number of simulations')
ax.set_title('Database summary')
ax.grid(True)
def plot(root, refTree, fig):
assignBarWidthsAndMarks(root, refTree)
assignLegendNames(root)
barPlot(root, refTree, fig)
def getTreeLeaves(root):
root = root.getRootNode()
stack = [root]
leaves = []
while len(stack) > 0:
temp = stack.pop()
stack = temp.getChildren() + stack
if len(temp.getChildren()) == 0:
leaves.append(temp)
return leaves
|
[
"import dbaccess as dba\nimport dataProcessing as dp\nimport numpy as np\nimport time\nimport colorsys\nimport copy\nfrom types import *\n\ncolLabels = ['Lvl', 'Branch', 'Total', 'Succ.', 'Fail', 'Graph']\ntreeRootNodeName = 'All analyses'\ntreeLevelParameterNames = [\n 'crackRatio',\n 'analysisType',\n 'modelType',\n 'elements']\n\n\nclass TreeNode(object):\n\n def __init__(self, name):\n self.name = name\n self.parent = None\n self.children = []\n self.failedMembers = set()\n self.successfulMembers = set()\n self.xmark = 0\n self.barWidth = 0\n self.legendName = ''\n self.faceColor = '0.9'\n self.edgeColor = '0.0'\n self.hueRange = []\n self.currentMarker = '<--'\n self.cols = colLabels\n\n def setParent(self, parent):\n if (isinstance(parent, NoneType) or\n isinstance(parent, TreeNode)):\n self.parent = parent\n else:\n raise TypeError('parent must be wither NoneType or TreeNode')\n\n def setChild(self, child):\n if isinstance(child, TreeNode):\n self.children.append(child)\n self.sortChildren()\n else:\n raise TypeError('child must be wither NoneType or TreeNode')\n\n def sortChildren(self):\n try:\n self.children.sort(key=lambda k: float(k.getName()))\n except ValueError:\n self.children.sort(key=lambda k: k.getName())\n\n def addMembers(self, members, memberType):\n assert memberType in ['successful', 'failed']\n assert isinstance(\n members, (NoneType, str, list, tuple, set, frozenset))\n setsDict = {'successful': self.successfulMembers,\n 'failed': self.failedMembers}\n if isinstance(members, NoneType):\n pass\n elif isinstance(members, str):\n setsDict[memberType].add(members)\n else:\n for m in members:\n setsDict[memberType].add(m)\n\n def addFailedMember(self, member):\n # self.failedMembers.add(member)\n self.addMembers(member, 'failed')\n\n def addSuccessfulMember(self, member):\n # self.successfulMembers.add(member)\n self.addMembers(member, 'successful')\n\n def setXMark(self, mark):\n self.xmark = mark\n\n def setBarWidth(self, width):\n self.barWidth = width\n\n def setLegendName(self, name):\n self.legendName = name\n\n def setFaceColor(self, color):\n self.faceColor = color\n\n def setEdgeColor(self, color):\n self.edgeColor = color\n\n def setHueRange(self, hueRange):\n self.hueRange = hueRange\n\n def addMember(self, entryObj):\n key = entryObj.getEntryKey()\n if entryObj.getAnalysisSuccess():\n self.addSuccessfulMember(key)\n else:\n self.addFailedMember(key)\n\n def assignMemberAsFailed(self, simId, printChanges=True, rowlen=80):\n assert isinstance(simId, str)\n root = self.getRootNode()\n leaves = self.getChildLeafNodes(root)\n for l in leaves:\n if simId in l.successfulMembers:\n l.successfulMembers.remove(simId)\n l.failedMembers.add(simId)\n if printChanges:\n l.printNode(l, rowlen)\n return 1\n if simId in l.failedMembers:\n pass\n return 0\n\n def getParent(self):\n return self.parent\n\n def getChildren(self):\n return self.children\n\n def getName(self):\n return self.name\n\n def getChildLeafNodes(self, node):\n stack = [node]\n leaves = []\n while len(stack) > 0:\n tmp = stack.pop()\n if tmp.getChildren() == [] and tmp != node:\n leaves.append(tmp)\n stack = tmp.getChildren() + stack\n return leaves\n\n def getSuccessfulMembers(self):\n def getSM(node):\n if node.getChildren() == []:\n return node.successfulMembers\n else:\n ch = node.getChildren()\n return frozenset().union(*[getSM(c) for c in ch])\n return copy.deepcopy(getSM(self))\n\n def getFailedMembers(self):\n def getSM(node):\n if node.getChildren() == []:\n return node.failedMembers\n else:\n ch = node.getChildren()\n return frozenset().union(*[getSM(c) for c in ch])\n return copy.deepcopy(getSM(self))\n\n def getAllMembers(self):\n return self.getFailedMembers() | self.getSuccessfulMembers()\n\n def getXMark(self):\n return self.xmark\n\n def getBarWidth(self):\n return self.barWidth\n\n def getFaceColor(self):\n return self.faceColor\n\n def getEdgeColor(self):\n return self.edgeColor\n\n def getHueRange(self):\n return self.hueRange\n\n def getLegendName(self):\n return self.legendName\n\n def getRootNode(self):\n root = self\n while root.getParent():\n root = root.getParent()\n return root\n\n def hasChildNode(self, nodeName):\n for child in self.getChildren():\n if child.getName() == nodeName:\n return child\n return False\n\n def getNodeLevelInTree(self):\n if self.getParent():\n return 1 + self.getParent().getNodeLevelInTree()\n else:\n return 0\n\n def getNodeLevel(self, node):\n path = tracePath(node)\n return len(path) - 1\n\n def getChildrenOfBranch(self, branchNames):\n return self.getTreeBranch(branchNames).getChildren()\n\n def getTreeBranch(self, branchNames):\n stack = [self.getRootNode()]\n nodes = []\n ind1 = -(len(branchNames))\n while len(stack) > 0:\n tmp = stack.pop()\n path = tracePath(tmp)\n nodeNames = [a.getName() for a in path]\n if nodeNames[ind1:] == branchNames:\n nodes.append(tmp)\n stack = tmp.getChildren() + stack\n if len(nodes) == 1:\n return nodes[0]\n elif len(nodes) > 1:\n raise KeyError(\n '{0} is ambiguous. Corresponds to more than one node.'.format(\n branchNames))\n else:\n raise KeyError('{0} not in the tree'.format(branchNames))\n\n def countNumberOfTreeLevels(self):\n maxLevel = 0\n stack = [self.getRootNode()]\n while len(stack) > 0:\n tmp = stack.pop()\n lvl = self.getNodeLevel(tmp)\n if lvl > maxLevel:\n maxLevel = lvl\n stack = tmp.getChildren() + stack\n return maxLevel\n\n def countMaxNodeNameLength(self):\n maxLen = 0\n stack = [self.getRootNode()]\n while len(stack) > 0:\n tmp = stack.pop()\n name = self.createNameStr(tmp)\n if len(name) + 1 > maxLen:\n maxLen = len(name) + 1\n stack = tmp.getChildren() + stack\n return maxLen\n\n def getMemberCounts(self, node):\n tot, succ, failed = 0, 0, 0\n succ = len(node.getSuccessfulMembers())\n failed = len(node.getFailedMembers())\n tot = succ + failed\n return [tot, succ, failed]\n\n def getMaxMemberCounts(self):\n return self.getMemberCounts(self.getRootNode())\n\n def calcColumnsLength(self, rowlen):\n lengths = [self.countNumberOfTreeLevels(), self.countMaxNodeNameLength(\n )] + [len(str(a)) for a in self.getMaxMemberCounts()]\n for i in range(len(self.cols) - 1):\n if len(self.cols[i]) > lengths[i]:\n lengths[i] = len(self.cols[i])\n lengths.append(rowlen - sum(lengths))\n return lengths\n\n def printTitle(self, rowlen):\n row = ''\n sep = ''\n lens = self.calcColumnsLength(rowlen)\n for i in range(len(self.cols)):\n colStr = self.createAlignedColStr(\n self.cols[i], lens[i], 'center')\n row = row + '|' + colStr\n sep = sep + '|' + lens[i] * '-'\n print row\n print sep\n\n def createAlignedColStr(self, value, colLen, align):\n assert align in ['left', 'center', 'right']\n vl = len(str(value))\n if align == 'center':\n f = (colLen - vl) / 2\n b = colLen - vl - f\n elif align == 'right':\n b = 1\n f = colLen - vl - b\n elif align == 'left':\n f = 0\n b = colLen - vl - f\n colStr = f * ' ' + str(value) + b * ' '\n return colStr\n\n def createNameStr(self, node):\n level = self.getNodeLevel(node)\n isCurrent = (self == node)\n nodeName = str(node.getName())\n nameStr = level * '-' + ' ' + nodeName\n if isCurrent:\n nameStr = nameStr + ' ' + self.currentMarker\n return nameStr\n\n def createBarGraph(self, node, length):\n mt, ms, mf = self.getMaxMemberCounts()\n t, s, f = self.getMemberCounts(node)\n l = (length - 2) * float(t) / mt\n plen = int(l * s / t)\n mlen = int(l - plen)\n blanks = int(l - (plen + mlen))\n return '[' + plen * '+' + mlen * '-' + blanks * ' ' + ']'\n\n def printNode(self, node, rowlen):\n lens = self.calcColumnsLength(rowlen)\n row = ''\n total, succ, failed = self.getMemberCounts(node)\n ncols = [self.getNodeLevel(node), self.createNameStr(node),\n total, succ, failed]\n alignment = ['right', 'left', 'right', 'right', 'right']\n for i in range(len(ncols)):\n row = row + '|' + self.createAlignedColStr(\n ncols[i], lens[i], alignment[i])\n row = row + '|' + self.createBarGraph(node, lens[-1])\n print row\n\n def printStats2(self, rowlen=80):\n self.printTitle(rowlen)\n path = tracePath(self)\n for node in path:\n if node is not self:\n self.printNode(node, rowlen)\n else:\n break\n stack = [self]\n while len(stack) > 0:\n tmp = stack.pop()\n self.printNode(tmp, rowlen)\n stack = stack + tmp.getChildren()\n\n def printStructure(self):\n root = self.getRootNode()\n stack = [root]\n while len(stack) > 0:\n print generateNodePrStr(stack[0], stack[0] is self)\n temp = stack.pop(0)\n stack = temp.getChildren() + stack\n\n def __eq__(self, other):\n assert isinstance(self, type(other))\n return self.getName() == other.getName()\n\n def __str__(self):\n return self.name\n\n def printStats(self, maxChars=80):\n root = self.getRootNode()\n maxLen = 0\n stack = [root]\n while len(stack) > 0:\n nodePrStr = generateNodePrStr(stack[0], stack[0] is self)\n if len(nodePrStr) > maxLen:\n maxLen = len(nodePrStr)\n temp = stack.pop(0)\n stack = temp.getChildren() + stack\n print genNodePrintStrWithBar(\n root, root, root is self, maxLen, maxChars)\n for node in root.getChildren():\n stack = [node]\n while len(stack) > 0:\n print genNodePrintStrWithBar(\n stack[0], node, stack[0] is self, maxLen, maxChars)\n temp = stack.pop(0)\n stack = temp.getChildren() + stack\n\n\ndef createTreeFromDbKeys(dbKeys):\n root = TreeNode(treeRootNodeName)\n for key in dbKeys:\n parent = root\n anDataObj = dp.AnalysisData(key)\n\n for tlevel in treeLevelParameterNames:\n nodeName = anDataObj.getParameter(tlevel)\n node = parent.hasChildNode(nodeName)\n if not node:\n node = TreeNode(nodeName)\n node.setParent(parent)\n parent.setChild(node)\n if tlevel == treeLevelParameterNames[-1]:\n node.addMember(anDataObj)\n parent = node\n return root\n\n\ndef nodesPerLevel(root):\n stack = [root]\n levelNodes = {}\n while len(stack) > 0:\n level = stack[0].getNodeLevelInTree()\n if level not in levelNodes.keys():\n levelNodes[level] = set()\n levelNodes[level].add(stack[0])\n temp = stack.pop(0)\n stack = stack + temp.getChildren()\n return levelNodes\n\n\ndef tracePath(node, limitLevel=0):\n def getPathToRoot(node):\n if not node.getParent():\n return [node]\n else:\n return getPathToRoot(node.getParent()) + [node]\n path = getPathToRoot(node)\n if (limitLevel <= len(path) and limitLevel >= 0) or limitLevel is None:\n return path[limitLevel:]\n else:\n raise IndexError(\n 'limitLevel argument must be >= 0 and <= {0}'.format(\n len(path)))\n\n\ndef createTreeOfKeys(root):\n leaves = nodesPerLevel(root)\n leaves = leaves[max(leaves.keys())]\n nroot = TreeNode('analyses')\n for leaf in leaves:\n path = tracePath(leaf, 2)\n parent = nroot\n for node in path:\n if node.getName() not in [a.getName()\n for a in parent.getChildren()]:\n newNode = TreeNode(node.getName())\n newNode.setParent(parent)\n parent.setChild(newNode)\n for n in parent.getChildren():\n if n == node:\n parent = n\n return nroot\n\n\ndef maxNodesPerLevel(root):\n maxChildren = {0: 1}\n stack = [root]\n while len(stack) > 0:\n level = stack[0].getNodeLevelInTree() + 1\n if len(stack[0].getChildren()) > maxChildren.get(level, 0):\n maxChildren[level] = len(stack[0].getChildren())\n temp = stack.pop(0)\n stack = stack + temp.getChildren()\n return maxChildren\n\n\ndef nodeNamesPerLevel(root):\n levelNodes = nodesPerLevel(root)\n namedNodes = {}\n for key in levelNodes.keys():\n nodes = list(levelNodes[key])\n namedNodes[key] = set()\n for node in nodes:\n namedNodes[key].add(node.getName())\n for key in namedNodes.keys():\n namedNodes[key] = sorted(namedNodes[key])\n return namedNodes\n\n\ndef generateNodePrStr(node, current):\n level = node.getNodeLevelInTree()\n if level < 9:\n number = ' ' + str(level)\n elif level > 9 and level < 99:\n number = ' ' + str(level)\n else:\n number = str(level)\n branch = level * ' ' + '|' + '-'\n branch = '|' + level * '-'\n if current:\n nodeName = node.getName() + ' <--'\n else:\n nodeName = node.getName()\n return \"{0} {1} {2}\".format(number, branch, nodeName)\n\n\ndef genNodePrintStrWithBar(\n node, root, current, maxStrLen, maxChars):\n if (len(node.getSuccessfulMembers()) +\n len(node.getFailedMembers()) > 0):\n barLength = maxChars - maxStrLen - 3\n s, f, b = calcNodeBarNumbers(node, root, barLength)\n nodeStr = generateNodePrStr(node, current)\n blankSpace = maxChars - len(nodeStr) - s - f - b - 2\n nps = '{0}{1}[{2}{3}{4}]'.format(\n nodeStr, blankSpace * ' ', s * '+', f * '-', b * ' ')\n return nps\n else:\n return generateNodePrStr(node, current)\n\n\ndef calcNodeBarNumbers(node, root, barLength):\n nsm = len(node.getSuccessfulMembers())\n nfm = len(node.getFailedMembers())\n totm = (len(root.getSuccessfulMembers()) +\n len(root.getFailedMembers()))\n barUnitLen = barLength / float(totm)\n totBarUnits = int(round(barUnitLen * (nsm + nfm)))\n sBarUnits = int(round(barUnitLen * nsm))\n fBarUnits = totBarUnits - sBarUnits\n blankBarUnits = barLength - totBarUnits\n return sBarUnits, fBarUnits, blankBarUnits\n\n\ndef calcBarWidth(node, refTree,\n ulen=1.0, relPad=0.05, root=None, tlevelIncrement=1):\n if not root:\n root = node.getRootNode()\n if node is not root:\n maxNodes = maxNodesPerLevel(refTree)\n nodeLevel = node.getNodeLevelInTree()\n numNodes = maxNodes[nodeLevel - tlevelIncrement]\n ulen = node.getParent().getBarWidth()\n barWidth = (1 - (numNodes + 1) * relPad) * ulen / numNodes\n else:\n barWidth = (1 - 2 * relPad) * ulen\n node.setBarWidth(barWidth)\n\n\ndef getRefSiblingsOfNode(node, refTree):\n candidates = []\n stack = [refTree]\n while len(stack) > 0:\n if stack[0] == node:\n candidates.append(stack[0])\n temp = stack.pop(0)\n stack = temp.getChildren() + stack\n parent = node.getParent()\n for c in candidates:\n if ((parent == c.getParent()) or\n (c.getParent() is refTree)):\n return c.getParent().getChildren(), c\n\n\ndef calcXMark(node, refTree):\n parent = node.getParent()\n pxmark = parent.getXMark()\n refSiblings, rs = getRefSiblingsOfNode(node, refTree)\n index = refSiblings.index(rs)\n n = len(refSiblings)\n pbw = parent.getBarWidth()\n a = node.getBarWidth()\n b = (pbw - a * n) / float(n + 1)\n c = pxmark + b * (index + 1) + a * index\n node.setXMark(c)\n\n\ndef assignBarWidthsAndMarks(root, refTree, ulen=1.0, relPad=0.05):\n valNodes = root.getChildren()\n count = 0\n for node in valNodes:\n stack = [node]\n while len(stack) > 0:\n calcBarWidth(stack[0], refTree,\n ulen, relPad, node, 1)\n if stack[0] is node:\n stack[0].setXMark((count + relPad) * ulen)\n else:\n calcXMark(stack[0], refTree)\n temp = stack.pop(0)\n stack = stack + temp.getChildren()\n count += 1\n\n\ndef setLegendName(node):\n if node is node.getRootNode():\n return None\n name = node.getName()\n analyses = ['FEM', 'XFEM']\n elements = ['LinearTet', 'LinearRI', 'LinearFI']\n types = {\n 'crackPartition': 'CP - xfem',\n 'multiplePartitions': 'MP - xfem', 'simple': 'S - xfem',\n 'elliptic': 'Elliptic tr.', 'simpleScale': 'Scale tr.'}\n if name in analyses:\n node.setLegendName('{0} - {1}'.format(name, 'analyses'))\n elif name in elements:\n n2 = node.getParent().getParent().getName()\n node.setLegendName('{0} - {1}'.format(name, n2))\n elif name in types.keys():\n node.setLegendName(types[name])\n elif node.getParent() == node.getRootNode():\n node.setLegendName('All analyses')\n\n\ndef assignLegendNames(root):\n stack = [root]\n while len(stack) > 0:\n setLegendName(stack[0])\n temp = stack.pop(0)\n stack = stack + temp.getChildren()\n\n\ndef setColorForNode(node, refNode, refTree):\n level = node.getNodeLevelInTree() - refNode.getNodeLevelInTree()\n n = 1000\n if node is refNode:\n hueRange = list(range(n))\n else:\n refSiblings, rc = getRefSiblingsOfNode(node, refTree)\n lrs = len(refSiblings)\n hueRange = node.getParent().getHueRange()\n start = len(hueRange) / lrs * refSiblings.index(rc)\n end = len(hueRange) / lrs * (1 + refSiblings.index(rc))\n hueRange = hueRange[start:end]\n\n h = hueRange[int(len(hueRange) / 2)] / float(n)\n s = 1.0 - 1 / float(1 + level)\n v = 0.9 / float(level + 1)\n node.setHueRange(hueRange)\n rgb = colorsys.hsv_to_rgb(h, s, v)\n node.setFaceColor(rgb)\n if node is refNode:\n rgb = colorsys.hsv_to_rgb(h, 0., 0.)\n else:\n rgb = colorsys.hsv_to_rgb(h, 1.0, 1.0)\n node.setEdgeColor(rgb)\n\n\ndef barPlot(root, refTree, fig):\n ax = fig.add_subplot(111)\n bars = {}\n totals = []\n count = 1\n for node in root.getChildren():\n stack = [node]\n cc = 1\n totals.append(node.getName())\n while len(stack) > 0:\n color = str(0.9 / cc)\n setColorForNode(stack[0], node, refTree)\n color = stack[0].getFaceColor()\n ec_color = stack[0].getEdgeColor()\n bars[stack[0].getLegendName()] = ax.bar(\n stack[0].getXMark(),\n len(stack[0].getSuccessfulMembers()),\n width=stack[0].getBarWidth(),\n color=color,\n ec=ec_color)\n bars['Errors'] = ax.bar(\n stack[0].getXMark(),\n len(stack[0].getFailedMembers()),\n width=stack[0].getBarWidth(),\n bottom=len(stack[0].getSuccessfulMembers()),\n color='DarkRed',\n ec='red')\n temp = stack.pop(0)\n stack = stack + temp.getChildren()\n cc += 1\n ax.legend([bars[k] for k in sorted(bars.keys())], sorted(bars.keys()),\n bbox_to_anchor=(1.01, 1), loc=2, borderaxespad=0)\n ax.set_xticks([0.5 + i for i in range(len(totals))])\n ax.set_xticklabels(totals)\n ax.set_xlabel('Crack ratio')\n ax.set_ylabel('Number of simulations')\n ax.set_title('Database summary')\n ax.grid(True)\n\n\ndef plot(root, refTree, fig):\n assignBarWidthsAndMarks(root, refTree)\n assignLegendNames(root)\n barPlot(root, refTree, fig)\n\n\ndef getTreeLeaves(root):\n root = root.getRootNode()\n stack = [root]\n leaves = []\n while len(stack) > 0:\n temp = stack.pop()\n stack = temp.getChildren() + stack\n if len(temp.getChildren()) == 0:\n leaves.append(temp)\n return leaves\n"
] | true |
99,348 |
76045926c8e56e77a2bf516ebff863b8ef4ea4ea
|
#!/usr/bin/python
import popen2
import re
(f, _) = popen2.popen2('wmctrl -d')
desktop_str = f.readline()
desktop_size = re.search('WA: \d,\d (\d+)x(\d+)', desktop_str)
desktop_width = int(desktop_size.group(1))
desktop_height = int(desktop_size.group(2))
(f, _) = popen2.popen2('wmctrl -r :ACTIVE: -L -G')
window_str = f.readline()
window_size = re.search(' +\d+ *\d+ *\d+ *(\d+) *(\d+)', window_str)
window_width = int(window_size.group(1))
window_height = int(window_size.group(2))
new_window_position_x = (desktop_width - window_width) / 2
new_window_position_y = (desktop_height - window_height) / 2
popen2.popen2('wmctrl -r :ACTIVE: -e 0,%d,%d,-1,-1' % (new_window_position_x, new_window_position_y))
|
[
"#!/usr/bin/python\n\nimport popen2\nimport re\n\n(f, _) = popen2.popen2('wmctrl -d')\ndesktop_str = f.readline()\ndesktop_size = re.search('WA: \\d,\\d (\\d+)x(\\d+)', desktop_str)\ndesktop_width = int(desktop_size.group(1))\ndesktop_height = int(desktop_size.group(2))\n\n(f, _) = popen2.popen2('wmctrl -r :ACTIVE: -L -G')\nwindow_str = f.readline()\nwindow_size = re.search(' +\\d+ *\\d+ *\\d+ *(\\d+) *(\\d+)', window_str)\nwindow_width = int(window_size.group(1))\nwindow_height = int(window_size.group(2))\n\nnew_window_position_x = (desktop_width - window_width) / 2\nnew_window_position_y = (desktop_height - window_height) / 2\n\npopen2.popen2('wmctrl -r :ACTIVE: -e 0,%d,%d,-1,-1' % (new_window_position_x, new_window_position_y))\n",
"import popen2\nimport re\nf, _ = popen2.popen2('wmctrl -d')\ndesktop_str = f.readline()\ndesktop_size = re.search('WA: \\\\d,\\\\d (\\\\d+)x(\\\\d+)', desktop_str)\ndesktop_width = int(desktop_size.group(1))\ndesktop_height = int(desktop_size.group(2))\nf, _ = popen2.popen2('wmctrl -r :ACTIVE: -L -G')\nwindow_str = f.readline()\nwindow_size = re.search(' +\\\\d+ *\\\\d+ *\\\\d+ *(\\\\d+) *(\\\\d+)', window_str)\nwindow_width = int(window_size.group(1))\nwindow_height = int(window_size.group(2))\nnew_window_position_x = (desktop_width - window_width) / 2\nnew_window_position_y = (desktop_height - window_height) / 2\npopen2.popen2('wmctrl -r :ACTIVE: -e 0,%d,%d,-1,-1' % (\n new_window_position_x, new_window_position_y))\n",
"<import token>\nf, _ = popen2.popen2('wmctrl -d')\ndesktop_str = f.readline()\ndesktop_size = re.search('WA: \\\\d,\\\\d (\\\\d+)x(\\\\d+)', desktop_str)\ndesktop_width = int(desktop_size.group(1))\ndesktop_height = int(desktop_size.group(2))\nf, _ = popen2.popen2('wmctrl -r :ACTIVE: -L -G')\nwindow_str = f.readline()\nwindow_size = re.search(' +\\\\d+ *\\\\d+ *\\\\d+ *(\\\\d+) *(\\\\d+)', window_str)\nwindow_width = int(window_size.group(1))\nwindow_height = int(window_size.group(2))\nnew_window_position_x = (desktop_width - window_width) / 2\nnew_window_position_y = (desktop_height - window_height) / 2\npopen2.popen2('wmctrl -r :ACTIVE: -e 0,%d,%d,-1,-1' % (\n new_window_position_x, new_window_position_y))\n",
"<import token>\n<assignment token>\npopen2.popen2('wmctrl -r :ACTIVE: -e 0,%d,%d,-1,-1' % (\n new_window_position_x, new_window_position_y))\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,349 |
b4e422a5f5240558b6d0471c11dd4978ca4c6a2a
|
def shortest_paths_from(from_user):
shortest_paths_to = { from_user['id']: [[]] }
frontier = deque(
(from_user, friend)
for friend
in from_user['friends']
)
while frontier:
prev_user, user = frontier.popleft()
user_id = user['id']
paths_to_prev_user = shortest_paths_to[prev_user['id']]
new_paths_to_user = [
path + [user_id]
for path
in paths_to_prev_user
]
old_paths_to_user = shortest_paths_to.get(user_id, [])
if old_paths_to_user:
min_path_length = len(old_paths_to_user[0])
else:
min_path_length = float('inf')
filtered_new_paths_to_user = [
path
for path in new_paths_to_user
if len(path) <= min_path_length
and path not in old_paths_to_user
]
shortest_paths_to[user_id] = old_paths_to_user + filtered_new_paths_to_user
frontier.extend(
(user, friend)
for friend in user['friends']
if friend['id'] not in shortest_paths_to
)
return shortest_paths_to
|
[
"def shortest_paths_from(from_user):\n\n shortest_paths_to = { from_user['id']: [[]] }\n\n frontier = deque(\n (from_user, friend)\n for friend\n in from_user['friends']\n )\n\n while frontier:\n\n prev_user, user = frontier.popleft()\n user_id = user['id']\n\n paths_to_prev_user = shortest_paths_to[prev_user['id']]\n new_paths_to_user = [\n path + [user_id]\n for path\n in paths_to_prev_user\n ]\n\n old_paths_to_user = shortest_paths_to.get(user_id, [])\n\n if old_paths_to_user:\n min_path_length = len(old_paths_to_user[0])\n else:\n min_path_length = float('inf')\n \n filtered_new_paths_to_user = [\n path\n for path in new_paths_to_user\n if len(path) <= min_path_length\n and path not in old_paths_to_user\n ]\n\n shortest_paths_to[user_id] = old_paths_to_user + filtered_new_paths_to_user\n\n frontier.extend(\n (user, friend)\n for friend in user['friends']\n if friend['id'] not in shortest_paths_to\n )\n \n return shortest_paths_to",
"def shortest_paths_from(from_user):\n shortest_paths_to = {from_user['id']: [[]]}\n frontier = deque((from_user, friend) for friend in from_user['friends'])\n while frontier:\n prev_user, user = frontier.popleft()\n user_id = user['id']\n paths_to_prev_user = shortest_paths_to[prev_user['id']]\n new_paths_to_user = [(path + [user_id]) for path in paths_to_prev_user]\n old_paths_to_user = shortest_paths_to.get(user_id, [])\n if old_paths_to_user:\n min_path_length = len(old_paths_to_user[0])\n else:\n min_path_length = float('inf')\n filtered_new_paths_to_user = [path for path in new_paths_to_user if\n len(path) <= min_path_length and path not in old_paths_to_user]\n shortest_paths_to[user_id\n ] = old_paths_to_user + filtered_new_paths_to_user\n frontier.extend((user, friend) for friend in user['friends'] if \n friend['id'] not in shortest_paths_to)\n return shortest_paths_to\n",
"<function token>\n"
] | false |
99,350 |
3ae955975a5855d198be773221b898a437f49b4b
|
from datetime import date
from unittest import TestCase, mock
from requests import HTTPError, codes
from basketball_reference_web_scraper.client import players_advanced_season_totals
from basketball_reference_web_scraper.data import OutputType, OutputWriteOption
from basketball_reference_web_scraper.errors import InvalidSeason
class TestPlayerAdvancedSeasonTotals(TestCase):
def test_players_advanced_season_totals(self):
result = players_advanced_season_totals(season_end_year=2018)
self.assertIsNotNone(result)
def test_players_advanced_season_totals_json(self):
result = players_advanced_season_totals(season_end_year=2018, output_type=OutputType.JSON)
self.assertIsNotNone(result)
def test_players_advanced_season_totals_csv(self):
players_advanced_season_totals(season_end_year=2018, output_type=OutputType.CSV, output_file_path="./player_advanced_season_totals_2019.csv")
def test_players_advanced_season_totals_csv_append(self):
players_advanced_season_totals(season_end_year=2018, output_type=OutputType.CSV, output_file_path="./player_advanced_season_totals_2019.csv", output_write_option=OutputWriteOption.APPEND)
def test_2001_players_advanced_season_totals_csv(self):
players_advanced_season_totals(season_end_year=2001, output_type=OutputType.CSV, output_file_path="./player_advanced_season_totals_2001.csv", output_write_option=OutputWriteOption.WRITE)
def test_future_season_raises_invalid_season(self):
current_year = date.today().year
future_year = current_year + 10
expected_message = "Season end year of {future_year} is invalid".format(future_year=future_year)
self.assertRaisesRegex(InvalidSeason, expected_message, players_advanced_season_totals, season_end_year=future_year)
@mock.patch("basketball_reference_web_scraper.client.http_client")
def test_not_found_raises_invalid_season(self, mocked_http_client):
end_year = "jaebaebae"
expected_message = "Season end year of {end_year} is invalid".format(end_year=end_year)
mocked_http_client.players_advanced_season_totals.side_effect = HTTPError(response=mock.Mock(status_code=codes.not_found))
self.assertRaisesRegex(InvalidSeason, expected_message, players_advanced_season_totals, season_end_year=end_year)
@mock.patch("basketball_reference_web_scraper.client.http_client")
def test_other_http_error_is_raised(self, mocked_http_client):
mocked_http_client.players_advanced_season_totals.side_effect = HTTPError(response=mock.Mock(status_code=codes.internal_server_error))
self.assertRaises(HTTPError, players_advanced_season_totals, season_end_year=2018)
|
[
"from datetime import date\nfrom unittest import TestCase, mock\n\nfrom requests import HTTPError, codes\n\nfrom basketball_reference_web_scraper.client import players_advanced_season_totals\nfrom basketball_reference_web_scraper.data import OutputType, OutputWriteOption\nfrom basketball_reference_web_scraper.errors import InvalidSeason\n\n\nclass TestPlayerAdvancedSeasonTotals(TestCase):\n\n def test_players_advanced_season_totals(self):\n result = players_advanced_season_totals(season_end_year=2018)\n self.assertIsNotNone(result)\n\n def test_players_advanced_season_totals_json(self):\n result = players_advanced_season_totals(season_end_year=2018, output_type=OutputType.JSON)\n self.assertIsNotNone(result)\n\n def test_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2018, output_type=OutputType.CSV, output_file_path=\"./player_advanced_season_totals_2019.csv\")\n\n def test_players_advanced_season_totals_csv_append(self):\n players_advanced_season_totals(season_end_year=2018, output_type=OutputType.CSV, output_file_path=\"./player_advanced_season_totals_2019.csv\", output_write_option=OutputWriteOption.APPEND)\n\n def test_2001_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2001, output_type=OutputType.CSV, output_file_path=\"./player_advanced_season_totals_2001.csv\", output_write_option=OutputWriteOption.WRITE)\n\n def test_future_season_raises_invalid_season(self):\n current_year = date.today().year\n future_year = current_year + 10\n expected_message = \"Season end year of {future_year} is invalid\".format(future_year=future_year)\n self.assertRaisesRegex(InvalidSeason, expected_message, players_advanced_season_totals, season_end_year=future_year)\n\n @mock.patch(\"basketball_reference_web_scraper.client.http_client\")\n def test_not_found_raises_invalid_season(self, mocked_http_client):\n end_year = \"jaebaebae\"\n expected_message = \"Season end year of {end_year} is invalid\".format(end_year=end_year)\n mocked_http_client.players_advanced_season_totals.side_effect = HTTPError(response=mock.Mock(status_code=codes.not_found))\n self.assertRaisesRegex(InvalidSeason, expected_message, players_advanced_season_totals, season_end_year=end_year)\n\n @mock.patch(\"basketball_reference_web_scraper.client.http_client\")\n def test_other_http_error_is_raised(self, mocked_http_client):\n mocked_http_client.players_advanced_season_totals.side_effect = HTTPError(response=mock.Mock(status_code=codes.internal_server_error))\n self.assertRaises(HTTPError, players_advanced_season_totals, season_end_year=2018)\n",
"from datetime import date\nfrom unittest import TestCase, mock\nfrom requests import HTTPError, codes\nfrom basketball_reference_web_scraper.client import players_advanced_season_totals\nfrom basketball_reference_web_scraper.data import OutputType, OutputWriteOption\nfrom basketball_reference_web_scraper.errors import InvalidSeason\n\n\nclass TestPlayerAdvancedSeasonTotals(TestCase):\n\n def test_players_advanced_season_totals(self):\n result = players_advanced_season_totals(season_end_year=2018)\n self.assertIsNotNone(result)\n\n def test_players_advanced_season_totals_json(self):\n result = players_advanced_season_totals(season_end_year=2018,\n output_type=OutputType.JSON)\n self.assertIsNotNone(result)\n\n def test_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv')\n\n def test_players_advanced_season_totals_csv_append(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv', output_write_option\n =OutputWriteOption.APPEND)\n\n def test_2001_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2001, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2001.csv', output_write_option\n =OutputWriteOption.WRITE)\n\n def test_future_season_raises_invalid_season(self):\n current_year = date.today().year\n future_year = current_year + 10\n expected_message = ('Season end year of {future_year} is invalid'.\n format(future_year=future_year))\n self.assertRaisesRegex(InvalidSeason, expected_message,\n players_advanced_season_totals, season_end_year=future_year)\n\n @mock.patch('basketball_reference_web_scraper.client.http_client')\n def test_not_found_raises_invalid_season(self, mocked_http_client):\n end_year = 'jaebaebae'\n expected_message = 'Season end year of {end_year} is invalid'.format(\n end_year=end_year)\n mocked_http_client.players_advanced_season_totals.side_effect = (\n HTTPError(response=mock.Mock(status_code=codes.not_found)))\n self.assertRaisesRegex(InvalidSeason, expected_message,\n players_advanced_season_totals, season_end_year=end_year)\n\n @mock.patch('basketball_reference_web_scraper.client.http_client')\n def test_other_http_error_is_raised(self, mocked_http_client):\n mocked_http_client.players_advanced_season_totals.side_effect = (\n HTTPError(response=mock.Mock(status_code=codes.\n internal_server_error)))\n self.assertRaises(HTTPError, players_advanced_season_totals,\n season_end_year=2018)\n",
"<import token>\n\n\nclass TestPlayerAdvancedSeasonTotals(TestCase):\n\n def test_players_advanced_season_totals(self):\n result = players_advanced_season_totals(season_end_year=2018)\n self.assertIsNotNone(result)\n\n def test_players_advanced_season_totals_json(self):\n result = players_advanced_season_totals(season_end_year=2018,\n output_type=OutputType.JSON)\n self.assertIsNotNone(result)\n\n def test_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv')\n\n def test_players_advanced_season_totals_csv_append(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv', output_write_option\n =OutputWriteOption.APPEND)\n\n def test_2001_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2001, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2001.csv', output_write_option\n =OutputWriteOption.WRITE)\n\n def test_future_season_raises_invalid_season(self):\n current_year = date.today().year\n future_year = current_year + 10\n expected_message = ('Season end year of {future_year} is invalid'.\n format(future_year=future_year))\n self.assertRaisesRegex(InvalidSeason, expected_message,\n players_advanced_season_totals, season_end_year=future_year)\n\n @mock.patch('basketball_reference_web_scraper.client.http_client')\n def test_not_found_raises_invalid_season(self, mocked_http_client):\n end_year = 'jaebaebae'\n expected_message = 'Season end year of {end_year} is invalid'.format(\n end_year=end_year)\n mocked_http_client.players_advanced_season_totals.side_effect = (\n HTTPError(response=mock.Mock(status_code=codes.not_found)))\n self.assertRaisesRegex(InvalidSeason, expected_message,\n players_advanced_season_totals, season_end_year=end_year)\n\n @mock.patch('basketball_reference_web_scraper.client.http_client')\n def test_other_http_error_is_raised(self, mocked_http_client):\n mocked_http_client.players_advanced_season_totals.side_effect = (\n HTTPError(response=mock.Mock(status_code=codes.\n internal_server_error)))\n self.assertRaises(HTTPError, players_advanced_season_totals,\n season_end_year=2018)\n",
"<import token>\n\n\nclass TestPlayerAdvancedSeasonTotals(TestCase):\n\n def test_players_advanced_season_totals(self):\n result = players_advanced_season_totals(season_end_year=2018)\n self.assertIsNotNone(result)\n\n def test_players_advanced_season_totals_json(self):\n result = players_advanced_season_totals(season_end_year=2018,\n output_type=OutputType.JSON)\n self.assertIsNotNone(result)\n\n def test_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv')\n\n def test_players_advanced_season_totals_csv_append(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv', output_write_option\n =OutputWriteOption.APPEND)\n\n def test_2001_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2001, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2001.csv', output_write_option\n =OutputWriteOption.WRITE)\n\n def test_future_season_raises_invalid_season(self):\n current_year = date.today().year\n future_year = current_year + 10\n expected_message = ('Season end year of {future_year} is invalid'.\n format(future_year=future_year))\n self.assertRaisesRegex(InvalidSeason, expected_message,\n players_advanced_season_totals, season_end_year=future_year)\n <function token>\n\n @mock.patch('basketball_reference_web_scraper.client.http_client')\n def test_other_http_error_is_raised(self, mocked_http_client):\n mocked_http_client.players_advanced_season_totals.side_effect = (\n HTTPError(response=mock.Mock(status_code=codes.\n internal_server_error)))\n self.assertRaises(HTTPError, players_advanced_season_totals,\n season_end_year=2018)\n",
"<import token>\n\n\nclass TestPlayerAdvancedSeasonTotals(TestCase):\n <function token>\n\n def test_players_advanced_season_totals_json(self):\n result = players_advanced_season_totals(season_end_year=2018,\n output_type=OutputType.JSON)\n self.assertIsNotNone(result)\n\n def test_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv')\n\n def test_players_advanced_season_totals_csv_append(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv', output_write_option\n =OutputWriteOption.APPEND)\n\n def test_2001_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2001, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2001.csv', output_write_option\n =OutputWriteOption.WRITE)\n\n def test_future_season_raises_invalid_season(self):\n current_year = date.today().year\n future_year = current_year + 10\n expected_message = ('Season end year of {future_year} is invalid'.\n format(future_year=future_year))\n self.assertRaisesRegex(InvalidSeason, expected_message,\n players_advanced_season_totals, season_end_year=future_year)\n <function token>\n\n @mock.patch('basketball_reference_web_scraper.client.http_client')\n def test_other_http_error_is_raised(self, mocked_http_client):\n mocked_http_client.players_advanced_season_totals.side_effect = (\n HTTPError(response=mock.Mock(status_code=codes.\n internal_server_error)))\n self.assertRaises(HTTPError, players_advanced_season_totals,\n season_end_year=2018)\n",
"<import token>\n\n\nclass TestPlayerAdvancedSeasonTotals(TestCase):\n <function token>\n\n def test_players_advanced_season_totals_json(self):\n result = players_advanced_season_totals(season_end_year=2018,\n output_type=OutputType.JSON)\n self.assertIsNotNone(result)\n\n def test_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv')\n\n def test_players_advanced_season_totals_csv_append(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv', output_write_option\n =OutputWriteOption.APPEND)\n\n def test_2001_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2001, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2001.csv', output_write_option\n =OutputWriteOption.WRITE)\n\n def test_future_season_raises_invalid_season(self):\n current_year = date.today().year\n future_year = current_year + 10\n expected_message = ('Season end year of {future_year} is invalid'.\n format(future_year=future_year))\n self.assertRaisesRegex(InvalidSeason, expected_message,\n players_advanced_season_totals, season_end_year=future_year)\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TestPlayerAdvancedSeasonTotals(TestCase):\n <function token>\n\n def test_players_advanced_season_totals_json(self):\n result = players_advanced_season_totals(season_end_year=2018,\n output_type=OutputType.JSON)\n self.assertIsNotNone(result)\n\n def test_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv')\n <function token>\n\n def test_2001_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2001, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2001.csv', output_write_option\n =OutputWriteOption.WRITE)\n\n def test_future_season_raises_invalid_season(self):\n current_year = date.today().year\n future_year = current_year + 10\n expected_message = ('Season end year of {future_year} is invalid'.\n format(future_year=future_year))\n self.assertRaisesRegex(InvalidSeason, expected_message,\n players_advanced_season_totals, season_end_year=future_year)\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TestPlayerAdvancedSeasonTotals(TestCase):\n <function token>\n <function token>\n\n def test_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv')\n <function token>\n\n def test_2001_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2001, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2001.csv', output_write_option\n =OutputWriteOption.WRITE)\n\n def test_future_season_raises_invalid_season(self):\n current_year = date.today().year\n future_year = current_year + 10\n expected_message = ('Season end year of {future_year} is invalid'.\n format(future_year=future_year))\n self.assertRaisesRegex(InvalidSeason, expected_message,\n players_advanced_season_totals, season_end_year=future_year)\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TestPlayerAdvancedSeasonTotals(TestCase):\n <function token>\n <function token>\n\n def test_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv')\n <function token>\n <function token>\n\n def test_future_season_raises_invalid_season(self):\n current_year = date.today().year\n future_year = current_year + 10\n expected_message = ('Season end year of {future_year} is invalid'.\n format(future_year=future_year))\n self.assertRaisesRegex(InvalidSeason, expected_message,\n players_advanced_season_totals, season_end_year=future_year)\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TestPlayerAdvancedSeasonTotals(TestCase):\n <function token>\n <function token>\n\n def test_players_advanced_season_totals_csv(self):\n players_advanced_season_totals(season_end_year=2018, output_type=\n OutputType.CSV, output_file_path=\n './player_advanced_season_totals_2019.csv')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TestPlayerAdvancedSeasonTotals(TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,351 |
c3d7b04b5c5062ed83bab6a698e6af4ac445b65e
|
class DatasetProfiling:
def __init__(self,dataset):
self.df=pd.read_csv(dataset, sep='|',encoding='iso8859_9', error_bad_lines=False,low_memory=False)
def dataset_abstract(self):
raw_data=self.df
df_prof=pd.DataFrame(columns=["column name","count","number of unique","number of null value","is binary?","number of 1","data type","fill rate","range","most_freq","variance", "modele_girer_mi ","comments"],index=np.arange(0,len(raw_data.columns)))
columns = raw_data.columns
ctr=0
var_values=raw_data.var()
max_values=raw_data.max(axis=0)
min_values=raw_data.min(axis=0)
for column in columns:
df_prof["column name"][ctr]=column
df_prof["count"][ctr]=raw_data[column].count()
df_prof["number of unique"][ctr]=raw_data[column].nunique()
df_prof["number of null value"][ctr] = raw_data[column].isnull().sum()
df_prof["is binary?"][ctr]=False
df_prof["number of 1"][ctr]=0
df_prof["data type"][ctr] = str(raw_data[column].dtype).split('(')[0]
df_prof["fill rate"][ctr] = raw_data[column].count()/len(raw_data)
####
if column in var_values.index :
df_prof['variance'][ctr]= var_values[column]
if column in min_values.index:
df_prof['range'][ctr]= "[{} - {}]".format( min_values[column] , max_values[column] )
try:
df_prof['most_freq'][ctr]=raw_data[column].value_counts().index[0] #column'un mode'unu kaydeder
except Exception as e:
pass
###
if raw_data[column].dropna().value_counts().index.isin([0,1]).all()==True and raw_data[column].nunique()==2:
df_prof["is binary?"][ctr]=True
df_prof["number of 1"][ctr]=(raw_data[column]==1).sum()
ctr+=1
return df_prof
def important_features(self):
raw_data=self.df
df_impt=pd.DataFrame(columns=["column name","count","number of unique","number of null value","is binary?","number of 1","data type","fill rate","range","most_freq","variance", "modele_girer_mi ","comments"],index=np.arange(0,len(raw_data.columns)))
columns = raw_data.columns
ctr=0
var_values=raw_data.var()
max_values=raw_data.max(axis=0)
min_values=raw_data.min(axis=0)
for column in columns:
if (raw_data[column].nunique() not in [0,1]):
df_impt["column name"][ctr]=column
df_impt["count"][ctr]=raw_data[column].count()
df_impt["number of unique"][ctr]=raw_data[column].nunique()
df_impt["number of null value"][ctr] = raw_data[column].isnull().sum()
df_impt["is binary?"][ctr]=False
df_impt["number of 1"][ctr]=0
df_impt["data type"][ctr] = str(raw_data[column].dtype).split('(')[0]
df_impt["fill rate"][ctr] = raw_data[column].count()/len(raw_data)
####
if column in var_values.index :
df_impt['variance'][ctr]= var_values[column]
if column in min_values.index:
df_impt['range'][ctr]= "[{} - {}]".format( min_values[column] , max_values[column] )
try:
df_impt['most_freq'][ctr]=raw_data[column].value_counts().index[0] #column'un mode'unu kaydeder
except Exception as e:
pass
###
if raw_data[column].dropna().value_counts().index.isin([0,1]).all()==True and raw_data[column].nunique()==2:
df_impt["is binary?"][ctr]=True
df_impt["number of 1"][ctr]=(raw_data[column]==1).sum()
else:
pass
ctr+=1
return df_impt.dropna(how="all",axis=0).reset_index(drop=True)
|
[
"class DatasetProfiling:\n \n def __init__(self,dataset):\n self.df=pd.read_csv(dataset, sep='|',encoding='iso8859_9', error_bad_lines=False,low_memory=False)\n \n def dataset_abstract(self):\n raw_data=self.df\n df_prof=pd.DataFrame(columns=[\"column name\",\"count\",\"number of unique\",\"number of null value\",\"is binary?\",\"number of 1\",\"data type\",\"fill rate\",\"range\",\"most_freq\",\"variance\", \"modele_girer_mi \",\"comments\"],index=np.arange(0,len(raw_data.columns)))\n columns = raw_data.columns\n ctr=0\n var_values=raw_data.var()\n max_values=raw_data.max(axis=0)\n min_values=raw_data.min(axis=0)\n for column in columns:\n df_prof[\"column name\"][ctr]=column\n df_prof[\"count\"][ctr]=raw_data[column].count()\n df_prof[\"number of unique\"][ctr]=raw_data[column].nunique()\n df_prof[\"number of null value\"][ctr] = raw_data[column].isnull().sum()\n df_prof[\"is binary?\"][ctr]=False\n df_prof[\"number of 1\"][ctr]=0\n df_prof[\"data type\"][ctr] = str(raw_data[column].dtype).split('(')[0]\n df_prof[\"fill rate\"][ctr] = raw_data[column].count()/len(raw_data)\n ####\n if column in var_values.index :\n df_prof['variance'][ctr]= var_values[column]\n if column in min_values.index:\n df_prof['range'][ctr]= \"[{} - {}]\".format( min_values[column] , max_values[column] )\n try:\n df_prof['most_freq'][ctr]=raw_data[column].value_counts().index[0] #column'un mode'unu kaydeder\n except Exception as e:\n pass\n ###\n if raw_data[column].dropna().value_counts().index.isin([0,1]).all()==True and raw_data[column].nunique()==2:\n df_prof[\"is binary?\"][ctr]=True\n df_prof[\"number of 1\"][ctr]=(raw_data[column]==1).sum()\n ctr+=1\n return df_prof\n \n def important_features(self):\n raw_data=self.df\n df_impt=pd.DataFrame(columns=[\"column name\",\"count\",\"number of unique\",\"number of null value\",\"is binary?\",\"number of 1\",\"data type\",\"fill rate\",\"range\",\"most_freq\",\"variance\", \"modele_girer_mi \",\"comments\"],index=np.arange(0,len(raw_data.columns)))\n columns = raw_data.columns\n ctr=0\n var_values=raw_data.var()\n max_values=raw_data.max(axis=0)\n min_values=raw_data.min(axis=0)\n for column in columns:\n if (raw_data[column].nunique() not in [0,1]):\n df_impt[\"column name\"][ctr]=column\n df_impt[\"count\"][ctr]=raw_data[column].count()\n df_impt[\"number of unique\"][ctr]=raw_data[column].nunique()\n df_impt[\"number of null value\"][ctr] = raw_data[column].isnull().sum()\n df_impt[\"is binary?\"][ctr]=False\n df_impt[\"number of 1\"][ctr]=0\n df_impt[\"data type\"][ctr] = str(raw_data[column].dtype).split('(')[0]\n df_impt[\"fill rate\"][ctr] = raw_data[column].count()/len(raw_data)\n ####\n if column in var_values.index :\n df_impt['variance'][ctr]= var_values[column]\n if column in min_values.index:\n df_impt['range'][ctr]= \"[{} - {}]\".format( min_values[column] , max_values[column] )\n try:\n df_impt['most_freq'][ctr]=raw_data[column].value_counts().index[0] #column'un mode'unu kaydeder\n except Exception as e:\n pass\n ###\n if raw_data[column].dropna().value_counts().index.isin([0,1]).all()==True and raw_data[column].nunique()==2:\n df_impt[\"is binary?\"][ctr]=True\n df_impt[\"number of 1\"][ctr]=(raw_data[column]==1).sum()\n else:\n pass\n ctr+=1\n return df_impt.dropna(how=\"all\",axis=0).reset_index(drop=True)\n \n",
"class DatasetProfiling:\n\n def __init__(self, dataset):\n self.df = pd.read_csv(dataset, sep='|', encoding='iso8859_9',\n error_bad_lines=False, low_memory=False)\n\n def dataset_abstract(self):\n raw_data = self.df\n df_prof = pd.DataFrame(columns=['column name', 'count',\n 'number of unique', 'number of null value', 'is binary?',\n 'number of 1', 'data type', 'fill rate', 'range', 'most_freq',\n 'variance', 'modele_girer_mi ', 'comments'], index=np.arange(0,\n len(raw_data.columns)))\n columns = raw_data.columns\n ctr = 0\n var_values = raw_data.var()\n max_values = raw_data.max(axis=0)\n min_values = raw_data.min(axis=0)\n for column in columns:\n df_prof['column name'][ctr] = column\n df_prof['count'][ctr] = raw_data[column].count()\n df_prof['number of unique'][ctr] = raw_data[column].nunique()\n df_prof['number of null value'][ctr] = raw_data[column].isnull(\n ).sum()\n df_prof['is binary?'][ctr] = False\n df_prof['number of 1'][ctr] = 0\n df_prof['data type'][ctr] = str(raw_data[column].dtype).split('(')[\n 0]\n df_prof['fill rate'][ctr] = raw_data[column].count() / len(raw_data\n )\n if column in var_values.index:\n df_prof['variance'][ctr] = var_values[column]\n if column in min_values.index:\n df_prof['range'][ctr] = '[{} - {}]'.format(min_values[\n column], max_values[column])\n try:\n df_prof['most_freq'][ctr] = raw_data[column].value_counts(\n ).index[0]\n except Exception as e:\n pass\n if raw_data[column].dropna().value_counts().index.isin([0, 1]).all(\n ) == True and raw_data[column].nunique() == 2:\n df_prof['is binary?'][ctr] = True\n df_prof['number of 1'][ctr] = (raw_data[column] == 1).sum()\n ctr += 1\n return df_prof\n\n def important_features(self):\n raw_data = self.df\n df_impt = pd.DataFrame(columns=['column name', 'count',\n 'number of unique', 'number of null value', 'is binary?',\n 'number of 1', 'data type', 'fill rate', 'range', 'most_freq',\n 'variance', 'modele_girer_mi ', 'comments'], index=np.arange(0,\n len(raw_data.columns)))\n columns = raw_data.columns\n ctr = 0\n var_values = raw_data.var()\n max_values = raw_data.max(axis=0)\n min_values = raw_data.min(axis=0)\n for column in columns:\n if raw_data[column].nunique() not in [0, 1]:\n df_impt['column name'][ctr] = column\n df_impt['count'][ctr] = raw_data[column].count()\n df_impt['number of unique'][ctr] = raw_data[column].nunique()\n df_impt['number of null value'][ctr] = raw_data[column].isnull(\n ).sum()\n df_impt['is binary?'][ctr] = False\n df_impt['number of 1'][ctr] = 0\n df_impt['data type'][ctr] = str(raw_data[column].dtype).split(\n '(')[0]\n df_impt['fill rate'][ctr] = raw_data[column].count() / len(\n raw_data)\n if column in var_values.index:\n df_impt['variance'][ctr] = var_values[column]\n if column in min_values.index:\n df_impt['range'][ctr] = '[{} - {}]'.format(min_values[\n column], max_values[column])\n try:\n df_impt['most_freq'][ctr] = raw_data[column].value_counts(\n ).index[0]\n except Exception as e:\n pass\n if raw_data[column].dropna().value_counts().index.isin([0, 1]\n ).all() == True and raw_data[column].nunique() == 2:\n df_impt['is binary?'][ctr] = True\n df_impt['number of 1'][ctr] = (raw_data[column] == 1).sum()\n else:\n pass\n ctr += 1\n return df_impt.dropna(how='all', axis=0).reset_index(drop=True)\n",
"class DatasetProfiling:\n <function token>\n\n def dataset_abstract(self):\n raw_data = self.df\n df_prof = pd.DataFrame(columns=['column name', 'count',\n 'number of unique', 'number of null value', 'is binary?',\n 'number of 1', 'data type', 'fill rate', 'range', 'most_freq',\n 'variance', 'modele_girer_mi ', 'comments'], index=np.arange(0,\n len(raw_data.columns)))\n columns = raw_data.columns\n ctr = 0\n var_values = raw_data.var()\n max_values = raw_data.max(axis=0)\n min_values = raw_data.min(axis=0)\n for column in columns:\n df_prof['column name'][ctr] = column\n df_prof['count'][ctr] = raw_data[column].count()\n df_prof['number of unique'][ctr] = raw_data[column].nunique()\n df_prof['number of null value'][ctr] = raw_data[column].isnull(\n ).sum()\n df_prof['is binary?'][ctr] = False\n df_prof['number of 1'][ctr] = 0\n df_prof['data type'][ctr] = str(raw_data[column].dtype).split('(')[\n 0]\n df_prof['fill rate'][ctr] = raw_data[column].count() / len(raw_data\n )\n if column in var_values.index:\n df_prof['variance'][ctr] = var_values[column]\n if column in min_values.index:\n df_prof['range'][ctr] = '[{} - {}]'.format(min_values[\n column], max_values[column])\n try:\n df_prof['most_freq'][ctr] = raw_data[column].value_counts(\n ).index[0]\n except Exception as e:\n pass\n if raw_data[column].dropna().value_counts().index.isin([0, 1]).all(\n ) == True and raw_data[column].nunique() == 2:\n df_prof['is binary?'][ctr] = True\n df_prof['number of 1'][ctr] = (raw_data[column] == 1).sum()\n ctr += 1\n return df_prof\n\n def important_features(self):\n raw_data = self.df\n df_impt = pd.DataFrame(columns=['column name', 'count',\n 'number of unique', 'number of null value', 'is binary?',\n 'number of 1', 'data type', 'fill rate', 'range', 'most_freq',\n 'variance', 'modele_girer_mi ', 'comments'], index=np.arange(0,\n len(raw_data.columns)))\n columns = raw_data.columns\n ctr = 0\n var_values = raw_data.var()\n max_values = raw_data.max(axis=0)\n min_values = raw_data.min(axis=0)\n for column in columns:\n if raw_data[column].nunique() not in [0, 1]:\n df_impt['column name'][ctr] = column\n df_impt['count'][ctr] = raw_data[column].count()\n df_impt['number of unique'][ctr] = raw_data[column].nunique()\n df_impt['number of null value'][ctr] = raw_data[column].isnull(\n ).sum()\n df_impt['is binary?'][ctr] = False\n df_impt['number of 1'][ctr] = 0\n df_impt['data type'][ctr] = str(raw_data[column].dtype).split(\n '(')[0]\n df_impt['fill rate'][ctr] = raw_data[column].count() / len(\n raw_data)\n if column in var_values.index:\n df_impt['variance'][ctr] = var_values[column]\n if column in min_values.index:\n df_impt['range'][ctr] = '[{} - {}]'.format(min_values[\n column], max_values[column])\n try:\n df_impt['most_freq'][ctr] = raw_data[column].value_counts(\n ).index[0]\n except Exception as e:\n pass\n if raw_data[column].dropna().value_counts().index.isin([0, 1]\n ).all() == True and raw_data[column].nunique() == 2:\n df_impt['is binary?'][ctr] = True\n df_impt['number of 1'][ctr] = (raw_data[column] == 1).sum()\n else:\n pass\n ctr += 1\n return df_impt.dropna(how='all', axis=0).reset_index(drop=True)\n",
"class DatasetProfiling:\n <function token>\n <function token>\n\n def important_features(self):\n raw_data = self.df\n df_impt = pd.DataFrame(columns=['column name', 'count',\n 'number of unique', 'number of null value', 'is binary?',\n 'number of 1', 'data type', 'fill rate', 'range', 'most_freq',\n 'variance', 'modele_girer_mi ', 'comments'], index=np.arange(0,\n len(raw_data.columns)))\n columns = raw_data.columns\n ctr = 0\n var_values = raw_data.var()\n max_values = raw_data.max(axis=0)\n min_values = raw_data.min(axis=0)\n for column in columns:\n if raw_data[column].nunique() not in [0, 1]:\n df_impt['column name'][ctr] = column\n df_impt['count'][ctr] = raw_data[column].count()\n df_impt['number of unique'][ctr] = raw_data[column].nunique()\n df_impt['number of null value'][ctr] = raw_data[column].isnull(\n ).sum()\n df_impt['is binary?'][ctr] = False\n df_impt['number of 1'][ctr] = 0\n df_impt['data type'][ctr] = str(raw_data[column].dtype).split(\n '(')[0]\n df_impt['fill rate'][ctr] = raw_data[column].count() / len(\n raw_data)\n if column in var_values.index:\n df_impt['variance'][ctr] = var_values[column]\n if column in min_values.index:\n df_impt['range'][ctr] = '[{} - {}]'.format(min_values[\n column], max_values[column])\n try:\n df_impt['most_freq'][ctr] = raw_data[column].value_counts(\n ).index[0]\n except Exception as e:\n pass\n if raw_data[column].dropna().value_counts().index.isin([0, 1]\n ).all() == True and raw_data[column].nunique() == 2:\n df_impt['is binary?'][ctr] = True\n df_impt['number of 1'][ctr] = (raw_data[column] == 1).sum()\n else:\n pass\n ctr += 1\n return df_impt.dropna(how='all', axis=0).reset_index(drop=True)\n",
"class DatasetProfiling:\n <function token>\n <function token>\n <function token>\n",
"<class token>\n"
] | false |
99,352 |
62ba0d85ea73402cdd32d088a6827316ea311ac4
|
""" Group all parts of the bot"""
from sc2.ids.ability_id import AbilityId
from sc2.ids.unit_typeid import UnitTypeId
from sc2.ids.upgrade_id import UpgradeId
from sc2.bot_ai import BotAI
class Mtsbot(BotAI):
""" mtsbot"""
async def build_pool(self):
""" Build pool logic
- improvements possible -> placement can be"""
pool = UnitTypeId.SPAWNINGPOOL # to save line breaks
if not self.structures(pool).ready and not self.already_pending(pool):
await self.build(pool, self.start_location.towards(self.game_info.map_center, distance=5))
async def build_extractor(self):
""" Build extractor logic
- improvements possible -> None that I can think of
- warnings -> Need the PR on the API to be accepted or it won't work using self.build(),
self.do(drone.build()) would have to be used instead"""
if (
not self.gas_buildings
and self.already_pending(UnitTypeId.SPAWNINGPOOL)
and not self.already_pending(UnitTypeId.EXTRACTOR)
):
await self.build(UnitTypeId.EXTRACTOR, self.vespene_geyser.closest_to(self.start_location))
async def queen_injection_logic(self):
""" Make queen inject logic
- improvements possible -> None that I can think of """
for queen in self.units(UnitTypeId.QUEEN):
if not queen.is_idle or queen.energy < 25:
continue
self.do(queen(AbilityId.EFFECT_INJECTLARVA, self.townhalls.closest_to(queen.position)))
async def research_zergling_speed(self):
""" Research zergling speed logic
- improvements possible -> None that I can think of """
if not self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):
self.research(UpgradeId.ZERGLINGMOVEMENTSPEED)
async def attacking_logic(self):
""" Attacking logic
- improvements possible -> Add new units(later), add priority targets, add retreat logic(other function),
add micro and probably much more"""
if len(self.units(UnitTypeId.ZERGLING)) >= 6:
for zergling in self.units(UnitTypeId.ZERGLING):
self.do(zergling.attack(self.enemy_start_locations[0]))
async def train_overlord(self):
"""Train overlord logic
- improvements possible -> make amount pending scale with base amount,
make supply left constraint scale with larva amount"""
if self.supply_left < 3 and not self.already_pending(UnitTypeId.OVERLORD):
self.train(UnitTypeId.OVERLORD)
async def train_zergling(self):
"""Train zergling logic
- improvements possible -> create constraints when other units starts to be built based on other unit amounts"""
if self.structures(UnitTypeId.SPAWNINGPOOL).ready:
self.train(UnitTypeId.ZERGLING)
async def train_queen(self):
"""Train zergling logic
- improvements possible -> Make the queen get created preferably on non-already-assigned bases
and maybe create some extra for creep spread(don't limit it by bases)"""
if (
self.structures(UnitTypeId.SPAWNINGPOOL).ready
and len(self.units(UnitTypeId.QUEEN)) < len(self.townhalls)
and self.already_pending(UnitTypeId.QUEEN) < len(self.townhalls.ready)
):
self.train(UnitTypeId.QUEEN)
async def send_drones_to_extractor(self):
""" Send drones to extractor from minerals
- improvements possible -> Expand it, make it trigger when the vespene - mineral ratio is to high
(only check it when at least 2 bases are saturated)make the closer_than distance 8 instead of 10,
also change the constraints completely(separate it later - this constraints are for the zergling speed,
make it a separated method) make it more general"""
if self.vespene < 100 and not self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):
for extractor in self.gas_buildings:
drones_needed_to_fill_extractor = extractor.ideal_harvesters - extractor.assigned_harvesters
if drones_needed_to_fill_extractor > 0:
for drone in self.workers.closer_than(10, extractor).take(drones_needed_to_fill_extractor):
self.do(drone.gather(extractor))
async def send_drones_to_minerals(self):
""" Send drones from extractor to minerals
- improvements possible -> Expand it, make it trigger when the mineral - vespene ratio is to high
(only check it when at least 2 bases are saturated)make the closer_than distance 8 instead of 10,
also change the constraints completely(separate it later - this constraints are for the zergling speed,
make it a separated method) make it more general"""
if self.vespene >= 100 or self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):
for drone in self.workers.filter(lambda w: w.is_carrying_vespene):
self.do(drone.gather(self.mineral_field.closer_than(10, drone).closest_to(drone)))
async def on_step(self, iteration):
# Build structures
await self.build_extractor()
await self.build_pool()
# Train units
await self.train_overlord()
await self.train_queen()
await self.train_zergling()
# Research upgrades
await self.research_zergling_speed()
# Control army units
await self.attacking_logic()
await self.queen_injection_logic()
# Control workers
await self.send_drones_to_extractor()
await self.send_drones_to_minerals()
|
[
"\"\"\" Group all parts of the bot\"\"\"\nfrom sc2.ids.ability_id import AbilityId\nfrom sc2.ids.unit_typeid import UnitTypeId\nfrom sc2.ids.upgrade_id import UpgradeId\nfrom sc2.bot_ai import BotAI\n\n\nclass Mtsbot(BotAI):\n \"\"\" mtsbot\"\"\"\n\n async def build_pool(self):\n \"\"\" Build pool logic\n - improvements possible -> placement can be\"\"\"\n pool = UnitTypeId.SPAWNINGPOOL # to save line breaks\n if not self.structures(pool).ready and not self.already_pending(pool):\n await self.build(pool, self.start_location.towards(self.game_info.map_center, distance=5))\n\n async def build_extractor(self):\n \"\"\" Build extractor logic\n - improvements possible -> None that I can think of\n - warnings -> Need the PR on the API to be accepted or it won't work using self.build(),\n self.do(drone.build()) would have to be used instead\"\"\"\n if (\n not self.gas_buildings\n and self.already_pending(UnitTypeId.SPAWNINGPOOL)\n and not self.already_pending(UnitTypeId.EXTRACTOR)\n ):\n await self.build(UnitTypeId.EXTRACTOR, self.vespene_geyser.closest_to(self.start_location))\n\n async def queen_injection_logic(self):\n \"\"\" Make queen inject logic\n - improvements possible -> None that I can think of \"\"\"\n for queen in self.units(UnitTypeId.QUEEN):\n if not queen.is_idle or queen.energy < 25:\n continue\n self.do(queen(AbilityId.EFFECT_INJECTLARVA, self.townhalls.closest_to(queen.position)))\n\n async def research_zergling_speed(self):\n \"\"\" Research zergling speed logic\n - improvements possible -> None that I can think of \"\"\"\n if not self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):\n self.research(UpgradeId.ZERGLINGMOVEMENTSPEED)\n\n async def attacking_logic(self):\n \"\"\" Attacking logic\n - improvements possible -> Add new units(later), add priority targets, add retreat logic(other function),\n add micro and probably much more\"\"\"\n if len(self.units(UnitTypeId.ZERGLING)) >= 6:\n for zergling in self.units(UnitTypeId.ZERGLING):\n self.do(zergling.attack(self.enemy_start_locations[0]))\n\n async def train_overlord(self):\n \"\"\"Train overlord logic\n - improvements possible -> make amount pending scale with base amount,\n make supply left constraint scale with larva amount\"\"\"\n if self.supply_left < 3 and not self.already_pending(UnitTypeId.OVERLORD):\n self.train(UnitTypeId.OVERLORD)\n\n async def train_zergling(self):\n \"\"\"Train zergling logic\n - improvements possible -> create constraints when other units starts to be built based on other unit amounts\"\"\"\n if self.structures(UnitTypeId.SPAWNINGPOOL).ready:\n self.train(UnitTypeId.ZERGLING)\n\n async def train_queen(self):\n \"\"\"Train zergling logic\n - improvements possible -> Make the queen get created preferably on non-already-assigned bases\n and maybe create some extra for creep spread(don't limit it by bases)\"\"\"\n if (\n self.structures(UnitTypeId.SPAWNINGPOOL).ready\n and len(self.units(UnitTypeId.QUEEN)) < len(self.townhalls)\n and self.already_pending(UnitTypeId.QUEEN) < len(self.townhalls.ready)\n ):\n self.train(UnitTypeId.QUEEN)\n\n async def send_drones_to_extractor(self):\n \"\"\" Send drones to extractor from minerals\n - improvements possible -> Expand it, make it trigger when the vespene - mineral ratio is to high\n (only check it when at least 2 bases are saturated)make the closer_than distance 8 instead of 10,\n also change the constraints completely(separate it later - this constraints are for the zergling speed,\n make it a separated method) make it more general\"\"\"\n if self.vespene < 100 and not self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):\n for extractor in self.gas_buildings:\n drones_needed_to_fill_extractor = extractor.ideal_harvesters - extractor.assigned_harvesters\n if drones_needed_to_fill_extractor > 0:\n for drone in self.workers.closer_than(10, extractor).take(drones_needed_to_fill_extractor):\n self.do(drone.gather(extractor))\n\n async def send_drones_to_minerals(self):\n \"\"\" Send drones from extractor to minerals\n - improvements possible -> Expand it, make it trigger when the mineral - vespene ratio is to high\n (only check it when at least 2 bases are saturated)make the closer_than distance 8 instead of 10,\n also change the constraints completely(separate it later - this constraints are for the zergling speed,\n make it a separated method) make it more general\"\"\"\n if self.vespene >= 100 or self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):\n for drone in self.workers.filter(lambda w: w.is_carrying_vespene):\n self.do(drone.gather(self.mineral_field.closer_than(10, drone).closest_to(drone)))\n\n async def on_step(self, iteration):\n # Build structures\n await self.build_extractor()\n await self.build_pool()\n # Train units\n await self.train_overlord()\n await self.train_queen()\n await self.train_zergling()\n # Research upgrades\n await self.research_zergling_speed()\n # Control army units\n await self.attacking_logic()\n await self.queen_injection_logic()\n # Control workers\n await self.send_drones_to_extractor()\n await self.send_drones_to_minerals()\n",
"<docstring token>\nfrom sc2.ids.ability_id import AbilityId\nfrom sc2.ids.unit_typeid import UnitTypeId\nfrom sc2.ids.upgrade_id import UpgradeId\nfrom sc2.bot_ai import BotAI\n\n\nclass Mtsbot(BotAI):\n \"\"\" mtsbot\"\"\"\n\n async def build_pool(self):\n \"\"\" Build pool logic\n - improvements possible -> placement can be\"\"\"\n pool = UnitTypeId.SPAWNINGPOOL\n if not self.structures(pool).ready and not self.already_pending(pool):\n await self.build(pool, self.start_location.towards(self.\n game_info.map_center, distance=5))\n\n async def build_extractor(self):\n \"\"\" Build extractor logic\n - improvements possible -> None that I can think of\n - warnings -> Need the PR on the API to be accepted or it won't work using self.build(),\n self.do(drone.build()) would have to be used instead\"\"\"\n if not self.gas_buildings and self.already_pending(UnitTypeId.\n SPAWNINGPOOL) and not self.already_pending(UnitTypeId.EXTRACTOR):\n await self.build(UnitTypeId.EXTRACTOR, self.vespene_geyser.\n closest_to(self.start_location))\n\n async def queen_injection_logic(self):\n \"\"\" Make queen inject logic\n - improvements possible -> None that I can think of \"\"\"\n for queen in self.units(UnitTypeId.QUEEN):\n if not queen.is_idle or queen.energy < 25:\n continue\n self.do(queen(AbilityId.EFFECT_INJECTLARVA, self.townhalls.\n closest_to(queen.position)))\n\n async def research_zergling_speed(self):\n \"\"\" Research zergling speed logic\n - improvements possible -> None that I can think of \"\"\"\n if not self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):\n self.research(UpgradeId.ZERGLINGMOVEMENTSPEED)\n\n async def attacking_logic(self):\n \"\"\" Attacking logic\n - improvements possible -> Add new units(later), add priority targets, add retreat logic(other function),\n add micro and probably much more\"\"\"\n if len(self.units(UnitTypeId.ZERGLING)) >= 6:\n for zergling in self.units(UnitTypeId.ZERGLING):\n self.do(zergling.attack(self.enemy_start_locations[0]))\n\n async def train_overlord(self):\n \"\"\"Train overlord logic\n - improvements possible -> make amount pending scale with base amount,\n make supply left constraint scale with larva amount\"\"\"\n if self.supply_left < 3 and not self.already_pending(UnitTypeId.\n OVERLORD):\n self.train(UnitTypeId.OVERLORD)\n\n async def train_zergling(self):\n \"\"\"Train zergling logic\n - improvements possible -> create constraints when other units starts to be built based on other unit amounts\"\"\"\n if self.structures(UnitTypeId.SPAWNINGPOOL).ready:\n self.train(UnitTypeId.ZERGLING)\n\n async def train_queen(self):\n \"\"\"Train zergling logic\n - improvements possible -> Make the queen get created preferably on non-already-assigned bases\n and maybe create some extra for creep spread(don't limit it by bases)\"\"\"\n if self.structures(UnitTypeId.SPAWNINGPOOL).ready and len(self.\n units(UnitTypeId.QUEEN)) < len(self.townhalls\n ) and self.already_pending(UnitTypeId.QUEEN) < len(self.\n townhalls.ready):\n self.train(UnitTypeId.QUEEN)\n\n async def send_drones_to_extractor(self):\n \"\"\" Send drones to extractor from minerals\n - improvements possible -> Expand it, make it trigger when the vespene - mineral ratio is to high\n (only check it when at least 2 bases are saturated)make the closer_than distance 8 instead of 10,\n also change the constraints completely(separate it later - this constraints are for the zergling speed,\n make it a separated method) make it more general\"\"\"\n if self.vespene < 100 and not self.already_pending_upgrade(UpgradeId\n .ZERGLINGMOVEMENTSPEED):\n for extractor in self.gas_buildings:\n drones_needed_to_fill_extractor = (extractor.\n ideal_harvesters - extractor.assigned_harvesters)\n if drones_needed_to_fill_extractor > 0:\n for drone in self.workers.closer_than(10, extractor).take(\n drones_needed_to_fill_extractor):\n self.do(drone.gather(extractor))\n\n async def send_drones_to_minerals(self):\n \"\"\" Send drones from extractor to minerals\n - improvements possible -> Expand it, make it trigger when the mineral - vespene ratio is to high\n (only check it when at least 2 bases are saturated)make the closer_than distance 8 instead of 10,\n also change the constraints completely(separate it later - this constraints are for the zergling speed,\n make it a separated method) make it more general\"\"\"\n if self.vespene >= 100 or self.already_pending_upgrade(UpgradeId.\n ZERGLINGMOVEMENTSPEED):\n for drone in self.workers.filter(lambda w: w.is_carrying_vespene):\n self.do(drone.gather(self.mineral_field.closer_than(10,\n drone).closest_to(drone)))\n\n async def on_step(self, iteration):\n await self.build_extractor()\n await self.build_pool()\n await self.train_overlord()\n await self.train_queen()\n await self.train_zergling()\n await self.research_zergling_speed()\n await self.attacking_logic()\n await self.queen_injection_logic()\n await self.send_drones_to_extractor()\n await self.send_drones_to_minerals()\n",
"<docstring token>\n<import token>\n\n\nclass Mtsbot(BotAI):\n \"\"\" mtsbot\"\"\"\n\n async def build_pool(self):\n \"\"\" Build pool logic\n - improvements possible -> placement can be\"\"\"\n pool = UnitTypeId.SPAWNINGPOOL\n if not self.structures(pool).ready and not self.already_pending(pool):\n await self.build(pool, self.start_location.towards(self.\n game_info.map_center, distance=5))\n\n async def build_extractor(self):\n \"\"\" Build extractor logic\n - improvements possible -> None that I can think of\n - warnings -> Need the PR on the API to be accepted or it won't work using self.build(),\n self.do(drone.build()) would have to be used instead\"\"\"\n if not self.gas_buildings and self.already_pending(UnitTypeId.\n SPAWNINGPOOL) and not self.already_pending(UnitTypeId.EXTRACTOR):\n await self.build(UnitTypeId.EXTRACTOR, self.vespene_geyser.\n closest_to(self.start_location))\n\n async def queen_injection_logic(self):\n \"\"\" Make queen inject logic\n - improvements possible -> None that I can think of \"\"\"\n for queen in self.units(UnitTypeId.QUEEN):\n if not queen.is_idle or queen.energy < 25:\n continue\n self.do(queen(AbilityId.EFFECT_INJECTLARVA, self.townhalls.\n closest_to(queen.position)))\n\n async def research_zergling_speed(self):\n \"\"\" Research zergling speed logic\n - improvements possible -> None that I can think of \"\"\"\n if not self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):\n self.research(UpgradeId.ZERGLINGMOVEMENTSPEED)\n\n async def attacking_logic(self):\n \"\"\" Attacking logic\n - improvements possible -> Add new units(later), add priority targets, add retreat logic(other function),\n add micro and probably much more\"\"\"\n if len(self.units(UnitTypeId.ZERGLING)) >= 6:\n for zergling in self.units(UnitTypeId.ZERGLING):\n self.do(zergling.attack(self.enemy_start_locations[0]))\n\n async def train_overlord(self):\n \"\"\"Train overlord logic\n - improvements possible -> make amount pending scale with base amount,\n make supply left constraint scale with larva amount\"\"\"\n if self.supply_left < 3 and not self.already_pending(UnitTypeId.\n OVERLORD):\n self.train(UnitTypeId.OVERLORD)\n\n async def train_zergling(self):\n \"\"\"Train zergling logic\n - improvements possible -> create constraints when other units starts to be built based on other unit amounts\"\"\"\n if self.structures(UnitTypeId.SPAWNINGPOOL).ready:\n self.train(UnitTypeId.ZERGLING)\n\n async def train_queen(self):\n \"\"\"Train zergling logic\n - improvements possible -> Make the queen get created preferably on non-already-assigned bases\n and maybe create some extra for creep spread(don't limit it by bases)\"\"\"\n if self.structures(UnitTypeId.SPAWNINGPOOL).ready and len(self.\n units(UnitTypeId.QUEEN)) < len(self.townhalls\n ) and self.already_pending(UnitTypeId.QUEEN) < len(self.\n townhalls.ready):\n self.train(UnitTypeId.QUEEN)\n\n async def send_drones_to_extractor(self):\n \"\"\" Send drones to extractor from minerals\n - improvements possible -> Expand it, make it trigger when the vespene - mineral ratio is to high\n (only check it when at least 2 bases are saturated)make the closer_than distance 8 instead of 10,\n also change the constraints completely(separate it later - this constraints are for the zergling speed,\n make it a separated method) make it more general\"\"\"\n if self.vespene < 100 and not self.already_pending_upgrade(UpgradeId\n .ZERGLINGMOVEMENTSPEED):\n for extractor in self.gas_buildings:\n drones_needed_to_fill_extractor = (extractor.\n ideal_harvesters - extractor.assigned_harvesters)\n if drones_needed_to_fill_extractor > 0:\n for drone in self.workers.closer_than(10, extractor).take(\n drones_needed_to_fill_extractor):\n self.do(drone.gather(extractor))\n\n async def send_drones_to_minerals(self):\n \"\"\" Send drones from extractor to minerals\n - improvements possible -> Expand it, make it trigger when the mineral - vespene ratio is to high\n (only check it when at least 2 bases are saturated)make the closer_than distance 8 instead of 10,\n also change the constraints completely(separate it later - this constraints are for the zergling speed,\n make it a separated method) make it more general\"\"\"\n if self.vespene >= 100 or self.already_pending_upgrade(UpgradeId.\n ZERGLINGMOVEMENTSPEED):\n for drone in self.workers.filter(lambda w: w.is_carrying_vespene):\n self.do(drone.gather(self.mineral_field.closer_than(10,\n drone).closest_to(drone)))\n\n async def on_step(self, iteration):\n await self.build_extractor()\n await self.build_pool()\n await self.train_overlord()\n await self.train_queen()\n await self.train_zergling()\n await self.research_zergling_speed()\n await self.attacking_logic()\n await self.queen_injection_logic()\n await self.send_drones_to_extractor()\n await self.send_drones_to_minerals()\n",
"<docstring token>\n<import token>\n\n\nclass Mtsbot(BotAI):\n <docstring token>\n\n async def build_pool(self):\n \"\"\" Build pool logic\n - improvements possible -> placement can be\"\"\"\n pool = UnitTypeId.SPAWNINGPOOL\n if not self.structures(pool).ready and not self.already_pending(pool):\n await self.build(pool, self.start_location.towards(self.\n game_info.map_center, distance=5))\n\n async def build_extractor(self):\n \"\"\" Build extractor logic\n - improvements possible -> None that I can think of\n - warnings -> Need the PR on the API to be accepted or it won't work using self.build(),\n self.do(drone.build()) would have to be used instead\"\"\"\n if not self.gas_buildings and self.already_pending(UnitTypeId.\n SPAWNINGPOOL) and not self.already_pending(UnitTypeId.EXTRACTOR):\n await self.build(UnitTypeId.EXTRACTOR, self.vespene_geyser.\n closest_to(self.start_location))\n\n async def queen_injection_logic(self):\n \"\"\" Make queen inject logic\n - improvements possible -> None that I can think of \"\"\"\n for queen in self.units(UnitTypeId.QUEEN):\n if not queen.is_idle or queen.energy < 25:\n continue\n self.do(queen(AbilityId.EFFECT_INJECTLARVA, self.townhalls.\n closest_to(queen.position)))\n\n async def research_zergling_speed(self):\n \"\"\" Research zergling speed logic\n - improvements possible -> None that I can think of \"\"\"\n if not self.already_pending_upgrade(UpgradeId.ZERGLINGMOVEMENTSPEED):\n self.research(UpgradeId.ZERGLINGMOVEMENTSPEED)\n\n async def attacking_logic(self):\n \"\"\" Attacking logic\n - improvements possible -> Add new units(later), add priority targets, add retreat logic(other function),\n add micro and probably much more\"\"\"\n if len(self.units(UnitTypeId.ZERGLING)) >= 6:\n for zergling in self.units(UnitTypeId.ZERGLING):\n self.do(zergling.attack(self.enemy_start_locations[0]))\n\n async def train_overlord(self):\n \"\"\"Train overlord logic\n - improvements possible -> make amount pending scale with base amount,\n make supply left constraint scale with larva amount\"\"\"\n if self.supply_left < 3 and not self.already_pending(UnitTypeId.\n OVERLORD):\n self.train(UnitTypeId.OVERLORD)\n\n async def train_zergling(self):\n \"\"\"Train zergling logic\n - improvements possible -> create constraints when other units starts to be built based on other unit amounts\"\"\"\n if self.structures(UnitTypeId.SPAWNINGPOOL).ready:\n self.train(UnitTypeId.ZERGLING)\n\n async def train_queen(self):\n \"\"\"Train zergling logic\n - improvements possible -> Make the queen get created preferably on non-already-assigned bases\n and maybe create some extra for creep spread(don't limit it by bases)\"\"\"\n if self.structures(UnitTypeId.SPAWNINGPOOL).ready and len(self.\n units(UnitTypeId.QUEEN)) < len(self.townhalls\n ) and self.already_pending(UnitTypeId.QUEEN) < len(self.\n townhalls.ready):\n self.train(UnitTypeId.QUEEN)\n\n async def send_drones_to_extractor(self):\n \"\"\" Send drones to extractor from minerals\n - improvements possible -> Expand it, make it trigger when the vespene - mineral ratio is to high\n (only check it when at least 2 bases are saturated)make the closer_than distance 8 instead of 10,\n also change the constraints completely(separate it later - this constraints are for the zergling speed,\n make it a separated method) make it more general\"\"\"\n if self.vespene < 100 and not self.already_pending_upgrade(UpgradeId\n .ZERGLINGMOVEMENTSPEED):\n for extractor in self.gas_buildings:\n drones_needed_to_fill_extractor = (extractor.\n ideal_harvesters - extractor.assigned_harvesters)\n if drones_needed_to_fill_extractor > 0:\n for drone in self.workers.closer_than(10, extractor).take(\n drones_needed_to_fill_extractor):\n self.do(drone.gather(extractor))\n\n async def send_drones_to_minerals(self):\n \"\"\" Send drones from extractor to minerals\n - improvements possible -> Expand it, make it trigger when the mineral - vespene ratio is to high\n (only check it when at least 2 bases are saturated)make the closer_than distance 8 instead of 10,\n also change the constraints completely(separate it later - this constraints are for the zergling speed,\n make it a separated method) make it more general\"\"\"\n if self.vespene >= 100 or self.already_pending_upgrade(UpgradeId.\n ZERGLINGMOVEMENTSPEED):\n for drone in self.workers.filter(lambda w: w.is_carrying_vespene):\n self.do(drone.gather(self.mineral_field.closer_than(10,\n drone).closest_to(drone)))\n\n async def on_step(self, iteration):\n await self.build_extractor()\n await self.build_pool()\n await self.train_overlord()\n await self.train_queen()\n await self.train_zergling()\n await self.research_zergling_speed()\n await self.attacking_logic()\n await self.queen_injection_logic()\n await self.send_drones_to_extractor()\n await self.send_drones_to_minerals()\n",
"<docstring token>\n<import token>\n<class token>\n"
] | false |
99,353 |
1939ec7b0b72201dcca04b7ddd9722b82332a743
|
from array import array
import numpy
def prime_Anagram(str1):
"""generating prime numbers by taking
method argument str1"""
anagram = []
non_Anagram = []
arr = array('i', [])
for i in range(0, str1):
count = 0
if i != 0 and i != 1:
for j in range(2, i):
if i % j == 0:
count = count + 1
break
if count == 0:
arr.append(i)
"""
Anagram Code For Prime Number
comparing length of two strings if
they are equal in length then sorting both and
comparing if found equal then appending to array
"""
flag = True
for i in range(len(arr) - 1):
for j in range(i + 1, len(arr)):
if len(str(arr[i])) == len(str(arr[j])):
var1 = ''.join(sorted(str(arr[i])))
var2 = ''.join(sorted(str(arr[j])))
if var1 == var2:
anagram.append(arr[i])
anagram.append(arr[j])
flag = False
if flag:
non_Anagram.append(arr[i])
else:
flag = True
"""
declaring numpy for 2 rows and 158 columns
"""
numarray = numpy.zeros((2, 158))
for j in range(0, len(anagram)):
numarray[0][j] = anagram[j]
for k in range(0, len(non_Anagram)):
numarray[1][k] = non_Anagram[k]
"""
printing 2D numpy array for anagram and non-anagram prime numbers
"""
print(numarray)
""" Main Method"""
if __name__ == '__main__':
"""
start of main method
validation of the range
Calling the prime anagram Method
with start as Method Argument
"""
start = int(input("Enter Range: "))
if start == 1000:
prime_Anagram(start)
else:
print("Range should be 1000")
|
[
"from array import array\nimport numpy\n\n\ndef prime_Anagram(str1):\n \"\"\"generating prime numbers by taking\n method argument str1\"\"\"\n anagram = []\n non_Anagram = []\n arr = array('i', [])\n for i in range(0, str1):\n count = 0\n if i != 0 and i != 1:\n for j in range(2, i):\n if i % j == 0:\n count = count + 1\n break\n if count == 0:\n arr.append(i)\n \"\"\"\n Anagram Code For Prime Number\n comparing length of two strings if \n they are equal in length then sorting both and \n comparing if found equal then appending to array\n \n \"\"\"\n flag = True\n for i in range(len(arr) - 1):\n for j in range(i + 1, len(arr)):\n if len(str(arr[i])) == len(str(arr[j])):\n var1 = ''.join(sorted(str(arr[i])))\n var2 = ''.join(sorted(str(arr[j])))\n if var1 == var2:\n anagram.append(arr[i])\n anagram.append(arr[j])\n flag = False\n if flag:\n non_Anagram.append(arr[i])\n else:\n flag = True\n \"\"\"\n declaring numpy for 2 rows and 158 columns\n \"\"\"\n numarray = numpy.zeros((2, 158))\n for j in range(0, len(anagram)):\n numarray[0][j] = anagram[j]\n for k in range(0, len(non_Anagram)):\n numarray[1][k] = non_Anagram[k]\n \"\"\"\n printing 2D numpy array for anagram and non-anagram prime numbers\n \"\"\"\n print(numarray)\n\n \"\"\" Main Method\"\"\"\n\n\nif __name__ == '__main__':\n \"\"\"\n start of main method\n validation of the range\n Calling the prime anagram Method \n with start as Method Argument\n\n \"\"\"\n start = int(input(\"Enter Range: \"))\n if start == 1000:\n prime_Anagram(start)\n else:\n print(\"Range should be 1000\")\n",
"from array import array\nimport numpy\n\n\ndef prime_Anagram(str1):\n \"\"\"generating prime numbers by taking\n method argument str1\"\"\"\n anagram = []\n non_Anagram = []\n arr = array('i', [])\n for i in range(0, str1):\n count = 0\n if i != 0 and i != 1:\n for j in range(2, i):\n if i % j == 0:\n count = count + 1\n break\n if count == 0:\n arr.append(i)\n \"\"\"\n Anagram Code For Prime Number\n comparing length of two strings if \n they are equal in length then sorting both and \n comparing if found equal then appending to array\n \n \"\"\"\n flag = True\n for i in range(len(arr) - 1):\n for j in range(i + 1, len(arr)):\n if len(str(arr[i])) == len(str(arr[j])):\n var1 = ''.join(sorted(str(arr[i])))\n var2 = ''.join(sorted(str(arr[j])))\n if var1 == var2:\n anagram.append(arr[i])\n anagram.append(arr[j])\n flag = False\n if flag:\n non_Anagram.append(arr[i])\n else:\n flag = True\n \"\"\"\n declaring numpy for 2 rows and 158 columns\n \"\"\"\n numarray = numpy.zeros((2, 158))\n for j in range(0, len(anagram)):\n numarray[0][j] = anagram[j]\n for k in range(0, len(non_Anagram)):\n numarray[1][k] = non_Anagram[k]\n \"\"\"\n printing 2D numpy array for anagram and non-anagram prime numbers\n \"\"\"\n print(numarray)\n \"\"\" Main Method\"\"\"\n\n\nif __name__ == '__main__':\n \"\"\"\n start of main method\n validation of the range\n Calling the prime anagram Method \n with start as Method Argument\n\n \"\"\"\n start = int(input('Enter Range: '))\n if start == 1000:\n prime_Anagram(start)\n else:\n print('Range should be 1000')\n",
"<import token>\n\n\ndef prime_Anagram(str1):\n \"\"\"generating prime numbers by taking\n method argument str1\"\"\"\n anagram = []\n non_Anagram = []\n arr = array('i', [])\n for i in range(0, str1):\n count = 0\n if i != 0 and i != 1:\n for j in range(2, i):\n if i % j == 0:\n count = count + 1\n break\n if count == 0:\n arr.append(i)\n \"\"\"\n Anagram Code For Prime Number\n comparing length of two strings if \n they are equal in length then sorting both and \n comparing if found equal then appending to array\n \n \"\"\"\n flag = True\n for i in range(len(arr) - 1):\n for j in range(i + 1, len(arr)):\n if len(str(arr[i])) == len(str(arr[j])):\n var1 = ''.join(sorted(str(arr[i])))\n var2 = ''.join(sorted(str(arr[j])))\n if var1 == var2:\n anagram.append(arr[i])\n anagram.append(arr[j])\n flag = False\n if flag:\n non_Anagram.append(arr[i])\n else:\n flag = True\n \"\"\"\n declaring numpy for 2 rows and 158 columns\n \"\"\"\n numarray = numpy.zeros((2, 158))\n for j in range(0, len(anagram)):\n numarray[0][j] = anagram[j]\n for k in range(0, len(non_Anagram)):\n numarray[1][k] = non_Anagram[k]\n \"\"\"\n printing 2D numpy array for anagram and non-anagram prime numbers\n \"\"\"\n print(numarray)\n \"\"\" Main Method\"\"\"\n\n\nif __name__ == '__main__':\n \"\"\"\n start of main method\n validation of the range\n Calling the prime anagram Method \n with start as Method Argument\n\n \"\"\"\n start = int(input('Enter Range: '))\n if start == 1000:\n prime_Anagram(start)\n else:\n print('Range should be 1000')\n",
"<import token>\n\n\ndef prime_Anagram(str1):\n \"\"\"generating prime numbers by taking\n method argument str1\"\"\"\n anagram = []\n non_Anagram = []\n arr = array('i', [])\n for i in range(0, str1):\n count = 0\n if i != 0 and i != 1:\n for j in range(2, i):\n if i % j == 0:\n count = count + 1\n break\n if count == 0:\n arr.append(i)\n \"\"\"\n Anagram Code For Prime Number\n comparing length of two strings if \n they are equal in length then sorting both and \n comparing if found equal then appending to array\n \n \"\"\"\n flag = True\n for i in range(len(arr) - 1):\n for j in range(i + 1, len(arr)):\n if len(str(arr[i])) == len(str(arr[j])):\n var1 = ''.join(sorted(str(arr[i])))\n var2 = ''.join(sorted(str(arr[j])))\n if var1 == var2:\n anagram.append(arr[i])\n anagram.append(arr[j])\n flag = False\n if flag:\n non_Anagram.append(arr[i])\n else:\n flag = True\n \"\"\"\n declaring numpy for 2 rows and 158 columns\n \"\"\"\n numarray = numpy.zeros((2, 158))\n for j in range(0, len(anagram)):\n numarray[0][j] = anagram[j]\n for k in range(0, len(non_Anagram)):\n numarray[1][k] = non_Anagram[k]\n \"\"\"\n printing 2D numpy array for anagram and non-anagram prime numbers\n \"\"\"\n print(numarray)\n \"\"\" Main Method\"\"\"\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
99,354 |
05ccdaae6baa4e403c47592287c212ccdfe982b9
|
from difficulty.models import nn_utils
from difficulty.readers import experiment_reader
from difficulty.readers import nyt_reader
from difficulty.readers import pico_reader
from difficulty.readers import pico_sentence_reader
import gensim
import numpy as np
import os
import tensorflow as tf
from tensorflow.contrib import learn
from tensorflow.contrib.tensorboard.plugins import projector
#W2VModelFILE="/mnt/data/workspace/nlp/w2v_models/PubMed-w2v.bin"
W2VModelFILE="/mnt/data/workspace/nlp/w2v_models/PICO-w2v.vec"
EMBEDDING_DIM=200
MODE_TRAIN = "train"
MODE_EVAL = "eval"
MODE_INFER = "inference"
class NNModel:
def __init__(self,
mode=MODE_TRAIN,
running_dir="./test/",
encoder="CNN",
num_tasks=1,
task_names=["Task"],
max_document_length=64,
is_classifier=True,
l2_reg_lambda=0.1,
cnn_filter_sizes=[3,4,5],
cnn_num_filters=128,
rnn_bidirectional=False,
rnn_cell_type="GRU",
rnn_num_layers=2,
dnn_layer_sizes=[512]):
self._train = True if mode == MODE_TRAIN else False
# Basic params
self._max_document_length = max_document_length
self._num_tasks = num_tasks
self._is_classifier = is_classifier
self._embedding_size = EMBEDDING_DIM
self._encoder = encoder
self._encoding_size = 300
self._vocab = None
self._task_names = task_names
# CNN params
self._cnn_filter_sizes = cnn_filter_sizes
self._cnn_num_filters = cnn_num_filters
# RNN params
self._rnn_bidirectional = rnn_bidirectional
self._rnn_cell_type = rnn_cell_type
self._rnn_num_layers = rnn_num_layers
# DNN params
self._dnn_layer_sizes = dnn_layer_sizes
self._dnn_activation = "relu"
# Hyper-params
self._l2_reg_lambda = l2_reg_lambda
self.ops = []
self.loss = None
self.eval_metrics = {}
self.saver = None
self.checkpoint_dir = os.path.join(running_dir, "train/")
self.eval_dir = os.path.join(running_dir, "test/")
def Graph(self):
self.input_x = tf.placeholder(tf.int32, [None, self._max_document_length], name="input_x")
self.input_l = tf.placeholder(tf.int32, [None], name="input_l")
self.input_y = tf.placeholder(tf.float32, [None, self._num_tasks], name="input_y")
self.input_w = tf.placeholder(tf.float32, [None, self._num_tasks], name="input_w")
self.dropout = tf.placeholder(tf.float32, name="dropout_prob")
if self._rnn_bidirectional:
self.input_x_bw = tf.placeholder(tf.int32,
[None, self._max_document_length], name="input_x_bw")
else:
self.input_x_bw = None
# Assuming input text is pre-tokenized and splited by space
vocab, init_embedding = self._LoadInitEmbeddings()
def _tokenizer(xs):
return [x.split(" ") for x in xs]
self._vocab = learn.preprocessing.VocabularyProcessor(
self._max_document_length, tokenizer_fn=_tokenizer)
self._vocab.fit(vocab)
# Insert init embedding for <UNK>
init_embedding = np.vstack(
[np.random.normal(size=self._embedding_size), init_embedding])
vocab_size = len(self._vocab.vocabulary_)
with tf.variable_scope("WordEmbeddings"):
embeddings = tf.get_variable(name="W", shape=init_embedding.shape,
initializer=tf.constant_initializer(init_embedding), trainable=False)
if self._encoder == "CNN":
input_encoded = self._CNNLayers(embeddings)
elif self._encoder == "RNN":
input_encoded = self._RNNLayers(embeddings)
elif self._encoder == "DNN":
input_encoded = self._DNNLayers(embeddings)
self.input_encoded = input_encoded
with tf.variable_scope("dropout"):
input_encoded = tf.nn.dropout(input_encoded, 1-self.dropout)
if self._is_classifier:
preds, pred_scores, loss = self._classifier(input_encoded, self.input_y, self.input_w)
self.ops.extend([preds, pred_scores, loss])
else:
# preds and pred_scores are the same for regression model
pred_scores, loss = self._regressor(input_encoded, self.input_y, self.input_w)
self.ops.extend([pred_scores, pred_scores, loss])
self.loss = loss
self.saver = tf.train.Saver(tf.global_variables())
return self
def _classifier(self, input_encoded, output, weights):
total_loss = tf.constant(0.0)
pooled_scores = []
pooled_predictions = []
for idx in range(self._num_tasks):
gts = tf.expand_dims(output[:, idx], -1)
wts = tf.expand_dims(weights[:, idx], -1)
with tf.variable_scope("{0}_classifier".format(self._task_names[idx])):
labels = tf.concat([1-gts, gts], 1)
logits = tf.layers.dense(input_encoded, 2,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
self._l2_reg_lambda))
scores = tf.reduce_max(tf.nn.softmax(logits), 1)
predictions = tf.argmax(logits, 1, name="predictions")
pooled_predictions.append(predictions)
pooled_scores.append(scores)
losses = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)
self.eval_metrics["{0}/Accuracy".format(self._task_names[idx])] = (
tf.metrics.accuracy(gts, predictions, weights=wts))
self.eval_metrics["{0}/Precision".format(self._task_names[idx])] = (
tf.metrics.precision(gts, predictions, weights=wts))
self.eval_metrics["{0}/Recall".format(self._task_names[idx])] = (
tf.metrics.recall(gts, predictions, weights=wts))
total_loss += tf.reduce_mean(losses * wts)
pooled_predictions = tf.stack(pooled_predictions, axis=1)
pooled_scores = tf.stack(pooled_scores, axis=1)
return pooled_predictions, pooled_scores, total_loss
def _regressor(self, input_encoded, output, weights):
total_loss = tf.constant(0.0)
pooled_logits = []
for idx in range(self._num_tasks):
with tf.variable_scope("{0}_regressor".format(self._task_names[idx])):
logits = tf.layers.dense(input_encoded, 1,
kernel_regularizer=tf.contrib.layers.l2_regularizer(
self._l2_reg_lambda))
gts = tf.expand_dims(output[:, idx], -1)
wts = tf.expand_dims(weights[:, idx], -1)
losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,
labels=gts)
total_loss += tf.reduce_mean(losses * wts)
pooled_logits.append(tf.sigmoid(logits))
self.eval_metrics["{0}/Pearsonr".format(self._task_names[idx])] = (
tf.contrib.metrics.streaming_pearson_correlation(
logits, gts, weights=wts))
pooled_logits = tf.stack(pooled_logits, axis=1)
pooled_logits = tf.squeeze(pooled_logits, axis=-1)
return pooled_logits, total_loss
def _LoadInitEmbeddings(self):
## Initialize word_embedding
w2v_model = gensim.models.KeyedVectors.load_word2vec_format(W2VModelFILE, binary=False)
vocab = []
embd = []
for token in w2v_model.vocab:
vec = w2v_model[token]
vocab.append(token)
embd.append(vec)
embedding = np.asarray(embd)
return vocab, embedding
def _LookupEmbeddings(self, embeddings, inputs):
# Return sequence length and inputs
mask = tf.to_float(tf.not_equal(inputs, 0))
inputs = tf.nn.embedding_lookup(embeddings, inputs)
lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)
return lengths, inputs
def _CNNLayers(self, embeddings):
_, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)
input_embeddings = tf.expand_dims(input_embeddings, -1)
with tf.variable_scope("CNN"):
pooled_outputs = []
for i, filter_size in enumerate(self._cnn_filter_sizes):
with tf.variable_scope("conv-maxpool-%s" % filter_size):
# Conv layer
filter_shape = [filter_size, self._embedding_size, 1, self._cnn_num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[self._cnn_num_filters]), name="b")
conv = tf.nn.conv2d(
input_embeddings,
W,
strides=[1,1,1,1],
padding="VALID",
name="conv")
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
pooled = tf.nn.max_pool(
h,
ksize=[1, self._max_document_length-filter_size+1, 1, 1],
strides=[1,1,1,1],
padding="VALID",
name="pool")
pooled_outputs.append(pooled)
num_filters_total = self._cnn_num_filters * len(self._cnn_filter_sizes)
cnn_encoding = tf.concat(pooled_outputs, 3)
cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])
with tf.variable_scope("dropout"):
cnn_encoding = tf.nn.dropout(cnn_encoding, 1-self.dropout)
cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)
return cnn_encoding
def _DNNLayers(self, embeddings):
lengths, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)
lengths = tf.expand_dims(lengths, -1)
input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf.to_float(lengths))
#input_embeddings = tf.reduce_mean(input_embeddings, 1)
#input_embeddings = tf.Print(input_embeddings, [input_embeddings], "input_embeddings: ", summarize=3)
with tf.variable_scope("DNN"):
input_tensor = tf.nn.dropout(input_embeddings, 1)
for i, out_size in enumerate(self._dnn_layer_sizes):
with tf.variable_scope("Layer_{0}".format(i+1)):
in_size = input_tensor.get_shape()[1]
stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size, out_size)))
W = tf.get_variable("W", (in_size, out_size),
initializer=tf.truncated_normal_initializer(stddev=stddev))
b = tf.get_variable("b", (out_size),
initializer=tf.constant_initializer(0.1))
input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b)
if self._dnn_activation == "relu":
input_tensor = tf.nn.relu(input_tensor, name="relu")
else:
raise ValueError("dnn_activation function not supported.")
#if i != len(self._dnn_activation)-1:
# input_tensor = tf.nn.dropout(input_tensor, 1 - self.dropout)
#input_tensor = tf.Print(input_tensor, [input_tensor], "input_tensor: ", summarize=30)
return input_tensor
def _RNNCells(self):
if self._rnn_cell_type == "GRU":
cells= tf.contrib.rnn.MultiRNNCell(
[tf.nn.rnn_cell.GRUCell(self._embedding_size)
for x in range(self._rnn_num_layers)], state_is_tuple=True)
elif self._rnn_cell_type == "LSTM":
cells= tf.contrib.rnn.MultiRNNCell(
[tf.nn.rnn_cell.LSTMCell(self._embedding_size)
for x in range(self._rnn_num_layers)], state_is_tuple=True)
return cells
def _RNNLayers(self, embeddings):
_, fw_embeddings = self._LookupEmbeddings(embeddings, self.input_x)
if self._rnn_bidirectional:
_, bw_embeddings = self._LookupEmbeddings(embeddings, self.input_x_bw)
with tf.variable_scope("RNN"):
with tf.variable_scope("forward"):
fw_cells = self._RNNCells()
_, fw_state = tf.nn.dynamic_rnn(fw_cells, fw_embeddings,
sequence_length=self.input_l, dtype=tf.float32)
fw_encoding = fw_state[-1]
if self._rnn_bidirectional:
with tf.variable_scope("backward"):
bw_cells = self._RNNCells()
_, bw_state = tf.nn.dynamic_rnn(bw_cells, bw_embeddings,
sequence_length=self.input_l, dtype=tf.float32)
bw_encoding = bw_state[-1]
rnn_encoding = tf.concat([fw_encoding, bw_encoding], axis=1)
else:
rnn_encoding = fw_encoding
with tf.variable_scope("dropout"):
rnn_encoding = tf.nn.dropout(rnn_encoding, 1-self.dropout)
rnn_encoding = tf.layers.dense(rnn_encoding, self._encoding_size)
return rnn_encoding
def main():
#target = "PICO"
target = "PICOSentence"
#target = "NYT"
if target == "PICO":
model = NNModel(
mode=FLAGS.mode,
is_classifier=True,
encoder=FLAGS.encoder,
num_tasks=1,
task_names=["Classification"],
max_document_length=FLAGS.max_document_length,
cnn_filter_sizes=list(map(int, FLAGS.cnn_filter_sizes.split(","))),
cnn_num_filters=FLAGS.cnn_num_filters,
rnn_bidirectional=FLAGS.rnn_bidirectional,
rnn_cell_type=FLAGS.rnn_cell_type,
rnn_num_layers=FLAGS.rnn_num_layers)
document_reader = pico_reader.PICOReader(annotype="Outcome")
elif target == "PICOSentence":
is_classifier = False
model = NNModel(
mode=FLAGS.mode,
is_classifier=is_classifier,
encoder="CNN",
num_tasks=1,
task_names=["Outcome"],
max_document_length=FLAGS.max_document_length,
cnn_filter_sizes=list(map(int, FLAGS.cnn_filter_sizes.split(","))),
cnn_num_filters=FLAGS.cnn_num_filters,
rnn_bidirectional=FLAGS.rnn_bidirectional,
rnn_cell_type=FLAGS.rnn_cell_type,
rnn_num_layers=FLAGS.rnn_num_layers)
#document_reader = pico_sentence_reader.PICOSentenceReader(annotype="multitask")
document_reader = experiment_reader.ExperimentReader(annotype="Outcome", binary=is_classifier)
elif target == "NYT":
model = NNModel(
mode=FLAGS.mode,
is_classifier=True,
encoder="CNN",
num_tasks=1,
task_names=["Business"],
max_document_length=FLAGS.max_document_length,
cnn_filter_sizes=list(map(int, FLAGS.cnn_filter_sizes.split(","))),
cnn_num_filters=FLAGS.cnn_num_filters,
rnn_bidirectional=FLAGS.rnn_bidirectional,
rnn_cell_type=FLAGS.rnn_cell_type,
rnn_num_layers=FLAGS.rnn_num_layers,
dnn_layer_sizes=list(map(int, FLAGS.dnn_layer_sizes.split(","))))
document_reader = nyt_reader.NYTReader(genre="Business")
else:
raise ValueError("Error")
if FLAGS.mode == MODE_TRAIN:
nn_utils.train(model, document_reader, is_classifier=is_classifier, FLAGS=FLAGS)
elif FLAGS.mode == MODE_EVAL:
checkpoint = "./test/train/model-2000"
nn_utils.eval(model, document_reader, checkpoint, FLAGS=FLAGS)
if __name__ == "__main__":
flags = tf.app.flags
flags.DEFINE_string("mode", "train", "Model mode")
flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
flags.DEFINE_integer("max_steps", 3000, "Max steps of training (default: 3000)")
flags.DEFINE_integer("num_epochs", 100, "Number of training epochs (default: 100)")
tf.flags.DEFINE_integer("evaluate_every", 500,
"Evaluate model on dev set after this many steps (default: 100)")
tf.flags.DEFINE_integer("checkpoint_every", 2000,
"Save model after this many steps (default: 1000)")
flags.DEFINE_float("dropout", 0.4, "dropout")
flags.DEFINE_float("learning_rate", 1e-3, "learning rate")
flags.DEFINE_integer("max_document_length", 50, "Max document length")
flags.DEFINE_bool("rnn_bidirectional", True,
"Whther rnn is undirectional or bidirectional")
flags.DEFINE_string("rnn_cell_type", "GRU", "RNN cell type, GRU or LSTM")
flags.DEFINE_integer("rnn_num_layers", 2, "Number of layers of RNN")
flags.DEFINE_string("encoder", "RNN", "Type of encoder used to embed document")
flags.DEFINE_string("cnn_filter_sizes", "3,4,5", "Filter sizes in CNN encoder")
flags.DEFINE_integer("cnn_num_filters", 32,
"Number of filters per filter size in CNN encoder")
flags.DEFINE_string("dnn_layer_sizes", "256", "Filter sizes in CNN encoder")
flags.DEFINE_string("output_fname", "./tmp/output.out", "Output file")
FLAGS = tf.flags.FLAGS
main()
|
[
"from difficulty.models import nn_utils\nfrom difficulty.readers import experiment_reader\nfrom difficulty.readers import nyt_reader\nfrom difficulty.readers import pico_reader\nfrom difficulty.readers import pico_sentence_reader\nimport gensim\nimport numpy as np\nimport os\nimport tensorflow as tf\n\nfrom tensorflow.contrib import learn\nfrom tensorflow.contrib.tensorboard.plugins import projector\n\n#W2VModelFILE=\"/mnt/data/workspace/nlp/w2v_models/PubMed-w2v.bin\"\nW2VModelFILE=\"/mnt/data/workspace/nlp/w2v_models/PICO-w2v.vec\"\nEMBEDDING_DIM=200\n\nMODE_TRAIN = \"train\"\nMODE_EVAL = \"eval\"\nMODE_INFER = \"inference\"\n\nclass NNModel:\n\n def __init__(self,\n mode=MODE_TRAIN,\n running_dir=\"./test/\",\n encoder=\"CNN\",\n num_tasks=1,\n task_names=[\"Task\"],\n max_document_length=64,\n is_classifier=True,\n l2_reg_lambda=0.1,\n cnn_filter_sizes=[3,4,5],\n cnn_num_filters=128,\n rnn_bidirectional=False,\n rnn_cell_type=\"GRU\",\n rnn_num_layers=2,\n dnn_layer_sizes=[512]):\n\n self._train = True if mode == MODE_TRAIN else False\n\n # Basic params\n self._max_document_length = max_document_length\n self._num_tasks = num_tasks\n self._is_classifier = is_classifier\n self._embedding_size = EMBEDDING_DIM\n self._encoder = encoder\n self._encoding_size = 300\n self._vocab = None\n self._task_names = task_names\n\n # CNN params\n self._cnn_filter_sizes = cnn_filter_sizes\n self._cnn_num_filters = cnn_num_filters\n\n # RNN params\n self._rnn_bidirectional = rnn_bidirectional\n self._rnn_cell_type = rnn_cell_type\n self._rnn_num_layers = rnn_num_layers\n\n # DNN params\n self._dnn_layer_sizes = dnn_layer_sizes\n self._dnn_activation = \"relu\"\n\n # Hyper-params\n self._l2_reg_lambda = l2_reg_lambda\n\n self.ops = []\n self.loss = None\n self.eval_metrics = {}\n self.saver = None\n self.checkpoint_dir = os.path.join(running_dir, \"train/\")\n self.eval_dir = os.path.join(running_dir, \"test/\")\n\n\n def Graph(self):\n self.input_x = tf.placeholder(tf.int32, [None, self._max_document_length], name=\"input_x\")\n self.input_l = tf.placeholder(tf.int32, [None], name=\"input_l\")\n self.input_y = tf.placeholder(tf.float32, [None, self._num_tasks], name=\"input_y\")\n self.input_w = tf.placeholder(tf.float32, [None, self._num_tasks], name=\"input_w\")\n self.dropout = tf.placeholder(tf.float32, name=\"dropout_prob\")\n\n if self._rnn_bidirectional:\n self.input_x_bw = tf.placeholder(tf.int32,\n [None, self._max_document_length], name=\"input_x_bw\")\n else:\n self.input_x_bw = None\n\n # Assuming input text is pre-tokenized and splited by space\n vocab, init_embedding = self._LoadInitEmbeddings()\n\n def _tokenizer(xs):\n return [x.split(\" \") for x in xs]\n self._vocab = learn.preprocessing.VocabularyProcessor(\n self._max_document_length, tokenizer_fn=_tokenizer)\n self._vocab.fit(vocab)\n\n # Insert init embedding for <UNK>\n init_embedding = np.vstack(\n [np.random.normal(size=self._embedding_size), init_embedding])\n\n vocab_size = len(self._vocab.vocabulary_)\n with tf.variable_scope(\"WordEmbeddings\"):\n embeddings = tf.get_variable(name=\"W\", shape=init_embedding.shape,\n initializer=tf.constant_initializer(init_embedding), trainable=False)\n\n if self._encoder == \"CNN\":\n input_encoded = self._CNNLayers(embeddings)\n elif self._encoder == \"RNN\":\n input_encoded = self._RNNLayers(embeddings)\n elif self._encoder == \"DNN\":\n input_encoded = self._DNNLayers(embeddings)\n\n self.input_encoded = input_encoded\n\n with tf.variable_scope(\"dropout\"):\n input_encoded = tf.nn.dropout(input_encoded, 1-self.dropout)\n\n if self._is_classifier:\n preds, pred_scores, loss = self._classifier(input_encoded, self.input_y, self.input_w)\n self.ops.extend([preds, pred_scores, loss])\n else:\n # preds and pred_scores are the same for regression model\n pred_scores, loss = self._regressor(input_encoded, self.input_y, self.input_w)\n self.ops.extend([pred_scores, pred_scores, loss])\n\n self.loss = loss\n\n self.saver = tf.train.Saver(tf.global_variables())\n\n return self\n\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope(\"{0}_classifier\".format(self._task_names[idx])):\n\n labels = tf.concat([1-gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name=\"predictions\")\n\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)\n\n self.eval_metrics[\"{0}/Accuracy\".format(self._task_names[idx])] = (\n tf.metrics.accuracy(gts, predictions, weights=wts))\n self.eval_metrics[\"{0}/Precision\".format(self._task_names[idx])] = (\n tf.metrics.precision(gts, predictions, weights=wts))\n self.eval_metrics[\"{0}/Recall\".format(self._task_names[idx])] = (\n tf.metrics.recall(gts, predictions, weights=wts))\n\n total_loss += tf.reduce_mean(losses * wts)\n\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n\n\n def _regressor(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_logits = []\n for idx in range(self._num_tasks):\n with tf.variable_scope(\"{0}_regressor\".format(self._task_names[idx])):\n logits = tf.layers.dense(input_encoded, 1,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n\n losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,\n labels=gts)\n total_loss += tf.reduce_mean(losses * wts)\n\n pooled_logits.append(tf.sigmoid(logits))\n\n self.eval_metrics[\"{0}/Pearsonr\".format(self._task_names[idx])] = (\n tf.contrib.metrics.streaming_pearson_correlation(\n logits, gts, weights=wts))\n\n pooled_logits = tf.stack(pooled_logits, axis=1)\n pooled_logits = tf.squeeze(pooled_logits, axis=-1)\n return pooled_logits, total_loss\n\n\n def _LoadInitEmbeddings(self):\n ## Initialize word_embedding\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(W2VModelFILE, binary=False)\n vocab = []\n embd = []\n\n for token in w2v_model.vocab:\n vec = w2v_model[token]\n vocab.append(token)\n embd.append(vec)\n\n embedding = np.asarray(embd)\n return vocab, embedding\n\n\n def _LookupEmbeddings(self, embeddings, inputs):\n # Return sequence length and inputs\n\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope(\"CNN\"):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope(\"conv-maxpool-%s\" % filter_size):\n # Conv layer\n filter_shape = [filter_size, self._embedding_size, 1, self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name=\"W\")\n b = tf.Variable(tf.constant(0.1, shape=[self._cnn_num_filters]), name=\"b\")\n conv = tf.nn.conv2d(\n input_embeddings,\n W,\n strides=[1,1,1,1],\n padding=\"VALID\",\n name=\"conv\")\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name=\"relu\")\n\n pooled = tf.nn.max_pool(\n h,\n ksize=[1, self._max_document_length-filter_size+1, 1, 1],\n strides=[1,1,1,1],\n padding=\"VALID\",\n name=\"pool\")\n pooled_outputs.append(pooled)\n\n num_filters_total = self._cnn_num_filters * len(self._cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n\n with tf.variable_scope(\"dropout\"):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1-self.dropout)\n\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n\n return cnn_encoding\n\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf.to_float(lengths))\n #input_embeddings = tf.reduce_mean(input_embeddings, 1)\n #input_embeddings = tf.Print(input_embeddings, [input_embeddings], \"input_embeddings: \", summarize=3)\n\n with tf.variable_scope(\"DNN\"):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope(\"Layer_{0}\".format(i+1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size, out_size)))\n W = tf.get_variable(\"W\", (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=stddev))\n b = tf.get_variable(\"b\", (out_size),\n initializer=tf.constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b)\n if self._dnn_activation == \"relu\":\n input_tensor = tf.nn.relu(input_tensor, name=\"relu\")\n else:\n raise ValueError(\"dnn_activation function not supported.\")\n\n #if i != len(self._dnn_activation)-1:\n # input_tensor = tf.nn.dropout(input_tensor, 1 - self.dropout)\n #input_tensor = tf.Print(input_tensor, [input_tensor], \"input_tensor: \", summarize=30)\n return input_tensor\n\n\n def _RNNCells(self):\n if self._rnn_cell_type == \"GRU\":\n cells= tf.contrib.rnn.MultiRNNCell(\n [tf.nn.rnn_cell.GRUCell(self._embedding_size)\n for x in range(self._rnn_num_layers)], state_is_tuple=True)\n elif self._rnn_cell_type == \"LSTM\":\n cells= tf.contrib.rnn.MultiRNNCell(\n [tf.nn.rnn_cell.LSTMCell(self._embedding_size)\n for x in range(self._rnn_num_layers)], state_is_tuple=True)\n return cells\n\n\n def _RNNLayers(self, embeddings):\n _, fw_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n\n if self._rnn_bidirectional:\n _, bw_embeddings = self._LookupEmbeddings(embeddings, self.input_x_bw)\n\n with tf.variable_scope(\"RNN\"):\n\n with tf.variable_scope(\"forward\"):\n fw_cells = self._RNNCells()\n _, fw_state = tf.nn.dynamic_rnn(fw_cells, fw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n fw_encoding = fw_state[-1]\n\n if self._rnn_bidirectional:\n with tf.variable_scope(\"backward\"):\n bw_cells = self._RNNCells()\n _, bw_state = tf.nn.dynamic_rnn(bw_cells, bw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n\n bw_encoding = bw_state[-1]\n rnn_encoding = tf.concat([fw_encoding, bw_encoding], axis=1)\n else:\n rnn_encoding = fw_encoding\n\n with tf.variable_scope(\"dropout\"):\n rnn_encoding = tf.nn.dropout(rnn_encoding, 1-self.dropout)\n\n rnn_encoding = tf.layers.dense(rnn_encoding, self._encoding_size)\n\n return rnn_encoding\n\n\ndef main():\n #target = \"PICO\"\n target = \"PICOSentence\"\n #target = \"NYT\"\n\n if target == \"PICO\":\n model = NNModel(\n mode=FLAGS.mode,\n is_classifier=True,\n encoder=FLAGS.encoder,\n num_tasks=1,\n task_names=[\"Classification\"],\n max_document_length=FLAGS.max_document_length,\n cnn_filter_sizes=list(map(int, FLAGS.cnn_filter_sizes.split(\",\"))),\n cnn_num_filters=FLAGS.cnn_num_filters,\n rnn_bidirectional=FLAGS.rnn_bidirectional,\n rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_layers=FLAGS.rnn_num_layers)\n\n document_reader = pico_reader.PICOReader(annotype=\"Outcome\")\n elif target == \"PICOSentence\":\n is_classifier = False\n model = NNModel(\n mode=FLAGS.mode,\n is_classifier=is_classifier,\n encoder=\"CNN\",\n num_tasks=1,\n task_names=[\"Outcome\"],\n max_document_length=FLAGS.max_document_length,\n cnn_filter_sizes=list(map(int, FLAGS.cnn_filter_sizes.split(\",\"))),\n cnn_num_filters=FLAGS.cnn_num_filters,\n rnn_bidirectional=FLAGS.rnn_bidirectional,\n rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_layers=FLAGS.rnn_num_layers)\n\n #document_reader = pico_sentence_reader.PICOSentenceReader(annotype=\"multitask\")\n document_reader = experiment_reader.ExperimentReader(annotype=\"Outcome\", binary=is_classifier)\n elif target == \"NYT\":\n model = NNModel(\n mode=FLAGS.mode,\n is_classifier=True,\n encoder=\"CNN\",\n num_tasks=1,\n task_names=[\"Business\"],\n max_document_length=FLAGS.max_document_length,\n cnn_filter_sizes=list(map(int, FLAGS.cnn_filter_sizes.split(\",\"))),\n cnn_num_filters=FLAGS.cnn_num_filters,\n rnn_bidirectional=FLAGS.rnn_bidirectional,\n rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_layers=FLAGS.rnn_num_layers,\n dnn_layer_sizes=list(map(int, FLAGS.dnn_layer_sizes.split(\",\"))))\n\n document_reader = nyt_reader.NYTReader(genre=\"Business\")\n else:\n raise ValueError(\"Error\")\n\n if FLAGS.mode == MODE_TRAIN:\n nn_utils.train(model, document_reader, is_classifier=is_classifier, FLAGS=FLAGS)\n elif FLAGS.mode == MODE_EVAL:\n checkpoint = \"./test/train/model-2000\"\n nn_utils.eval(model, document_reader, checkpoint, FLAGS=FLAGS)\n\n\nif __name__ == \"__main__\":\n flags = tf.app.flags\n flags.DEFINE_string(\"mode\", \"train\", \"Model mode\")\n flags.DEFINE_integer(\"batch_size\", 64, \"Batch Size (default: 64)\")\n flags.DEFINE_integer(\"max_steps\", 3000, \"Max steps of training (default: 3000)\")\n flags.DEFINE_integer(\"num_epochs\", 100, \"Number of training epochs (default: 100)\")\n tf.flags.DEFINE_integer(\"evaluate_every\", 500,\n \"Evaluate model on dev set after this many steps (default: 100)\")\n tf.flags.DEFINE_integer(\"checkpoint_every\", 2000,\n \"Save model after this many steps (default: 1000)\")\n flags.DEFINE_float(\"dropout\", 0.4, \"dropout\")\n flags.DEFINE_float(\"learning_rate\", 1e-3, \"learning rate\")\n flags.DEFINE_integer(\"max_document_length\", 50, \"Max document length\")\n flags.DEFINE_bool(\"rnn_bidirectional\", True,\n \"Whther rnn is undirectional or bidirectional\")\n flags.DEFINE_string(\"rnn_cell_type\", \"GRU\", \"RNN cell type, GRU or LSTM\")\n flags.DEFINE_integer(\"rnn_num_layers\", 2, \"Number of layers of RNN\")\n flags.DEFINE_string(\"encoder\", \"RNN\", \"Type of encoder used to embed document\")\n flags.DEFINE_string(\"cnn_filter_sizes\", \"3,4,5\", \"Filter sizes in CNN encoder\")\n flags.DEFINE_integer(\"cnn_num_filters\", 32,\n \"Number of filters per filter size in CNN encoder\")\n flags.DEFINE_string(\"dnn_layer_sizes\", \"256\", \"Filter sizes in CNN encoder\")\n flags.DEFINE_string(\"output_fname\", \"./tmp/output.out\", \"Output file\")\n\n FLAGS = tf.flags.FLAGS\n main()\n",
"from difficulty.models import nn_utils\nfrom difficulty.readers import experiment_reader\nfrom difficulty.readers import nyt_reader\nfrom difficulty.readers import pico_reader\nfrom difficulty.readers import pico_sentence_reader\nimport gensim\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom tensorflow.contrib import learn\nfrom tensorflow.contrib.tensorboard.plugins import projector\nW2VModelFILE = '/mnt/data/workspace/nlp/w2v_models/PICO-w2v.vec'\nEMBEDDING_DIM = 200\nMODE_TRAIN = 'train'\nMODE_EVAL = 'eval'\nMODE_INFER = 'inference'\n\n\nclass NNModel:\n\n def __init__(self, mode=MODE_TRAIN, running_dir='./test/', encoder=\n 'CNN', num_tasks=1, task_names=['Task'], max_document_length=64,\n is_classifier=True, l2_reg_lambda=0.1, cnn_filter_sizes=[3, 4, 5],\n cnn_num_filters=128, rnn_bidirectional=False, rnn_cell_type='GRU',\n rnn_num_layers=2, dnn_layer_sizes=[512]):\n self._train = True if mode == MODE_TRAIN else False\n self._max_document_length = max_document_length\n self._num_tasks = num_tasks\n self._is_classifier = is_classifier\n self._embedding_size = EMBEDDING_DIM\n self._encoder = encoder\n self._encoding_size = 300\n self._vocab = None\n self._task_names = task_names\n self._cnn_filter_sizes = cnn_filter_sizes\n self._cnn_num_filters = cnn_num_filters\n self._rnn_bidirectional = rnn_bidirectional\n self._rnn_cell_type = rnn_cell_type\n self._rnn_num_layers = rnn_num_layers\n self._dnn_layer_sizes = dnn_layer_sizes\n self._dnn_activation = 'relu'\n self._l2_reg_lambda = l2_reg_lambda\n self.ops = []\n self.loss = None\n self.eval_metrics = {}\n self.saver = None\n self.checkpoint_dir = os.path.join(running_dir, 'train/')\n self.eval_dir = os.path.join(running_dir, 'test/')\n\n def Graph(self):\n self.input_x = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x')\n self.input_l = tf.placeholder(tf.int32, [None], name='input_l')\n self.input_y = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_y')\n self.input_w = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_w')\n self.dropout = tf.placeholder(tf.float32, name='dropout_prob')\n if self._rnn_bidirectional:\n self.input_x_bw = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x_bw')\n else:\n self.input_x_bw = None\n vocab, init_embedding = self._LoadInitEmbeddings()\n\n def _tokenizer(xs):\n return [x.split(' ') for x in xs]\n self._vocab = learn.preprocessing.VocabularyProcessor(self.\n _max_document_length, tokenizer_fn=_tokenizer)\n self._vocab.fit(vocab)\n init_embedding = np.vstack([np.random.normal(size=self.\n _embedding_size), init_embedding])\n vocab_size = len(self._vocab.vocabulary_)\n with tf.variable_scope('WordEmbeddings'):\n embeddings = tf.get_variable(name='W', shape=init_embedding.\n shape, initializer=tf.constant_initializer(init_embedding),\n trainable=False)\n if self._encoder == 'CNN':\n input_encoded = self._CNNLayers(embeddings)\n elif self._encoder == 'RNN':\n input_encoded = self._RNNLayers(embeddings)\n elif self._encoder == 'DNN':\n input_encoded = self._DNNLayers(embeddings)\n self.input_encoded = input_encoded\n with tf.variable_scope('dropout'):\n input_encoded = tf.nn.dropout(input_encoded, 1 - self.dropout)\n if self._is_classifier:\n preds, pred_scores, loss = self._classifier(input_encoded, self\n .input_y, self.input_w)\n self.ops.extend([preds, pred_scores, loss])\n else:\n pred_scores, loss = self._regressor(input_encoded, self.input_y,\n self.input_w)\n self.ops.extend([pred_scores, pred_scores, loss])\n self.loss = loss\n self.saver = tf.train.Saver(tf.global_variables())\n return self\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n\n def _regressor(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_logits = []\n for idx in range(self._num_tasks):\n with tf.variable_scope('{0}_regressor'.format(self._task_names[\n idx])):\n logits = tf.layers.dense(input_encoded, 1,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=gts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_logits.append(tf.sigmoid(logits))\n self.eval_metrics['{0}/Pearsonr'.format(self._task_names[idx])\n ] = tf.contrib.metrics.streaming_pearson_correlation(logits\n , gts, weights=wts)\n pooled_logits = tf.stack(pooled_logits, axis=1)\n pooled_logits = tf.squeeze(pooled_logits, axis=-1)\n return pooled_logits, total_loss\n\n def _LoadInitEmbeddings(self):\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(\n W2VModelFILE, binary=False)\n vocab = []\n embd = []\n for token in w2v_model.vocab:\n vec = w2v_model[token]\n vocab.append(token)\n embd.append(vec)\n embedding = np.asarray(embd)\n return vocab, embedding\n\n def _LookupEmbeddings(self, embeddings, inputs):\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n\n def _RNNCells(self):\n if self._rnn_cell_type == 'GRU':\n cells = tf.contrib.rnn.MultiRNNCell([tf.nn.rnn_cell.GRUCell(\n self._embedding_size) for x in range(self._rnn_num_layers)],\n state_is_tuple=True)\n elif self._rnn_cell_type == 'LSTM':\n cells = tf.contrib.rnn.MultiRNNCell([tf.nn.rnn_cell.LSTMCell(\n self._embedding_size) for x in range(self._rnn_num_layers)],\n state_is_tuple=True)\n return cells\n\n def _RNNLayers(self, embeddings):\n _, fw_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n if self._rnn_bidirectional:\n _, bw_embeddings = self._LookupEmbeddings(embeddings, self.\n input_x_bw)\n with tf.variable_scope('RNN'):\n with tf.variable_scope('forward'):\n fw_cells = self._RNNCells()\n _, fw_state = tf.nn.dynamic_rnn(fw_cells, fw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n fw_encoding = fw_state[-1]\n if self._rnn_bidirectional:\n with tf.variable_scope('backward'):\n bw_cells = self._RNNCells()\n _, bw_state = tf.nn.dynamic_rnn(bw_cells, bw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n bw_encoding = bw_state[-1]\n rnn_encoding = tf.concat([fw_encoding, bw_encoding], axis=1)\n else:\n rnn_encoding = fw_encoding\n with tf.variable_scope('dropout'):\n rnn_encoding = tf.nn.dropout(rnn_encoding, 1 - self.dropout)\n rnn_encoding = tf.layers.dense(rnn_encoding, self._encoding_size)\n return rnn_encoding\n\n\ndef main():\n target = 'PICOSentence'\n if target == 'PICO':\n model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder=FLAGS.\n encoder, num_tasks=1, task_names=['Classification'],\n max_document_length=FLAGS.max_document_length, cnn_filter_sizes\n =list(map(int, FLAGS.cnn_filter_sizes.split(','))),\n cnn_num_filters=FLAGS.cnn_num_filters, rnn_bidirectional=FLAGS.\n rnn_bidirectional, rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_layers=FLAGS.rnn_num_layers)\n document_reader = pico_reader.PICOReader(annotype='Outcome')\n elif target == 'PICOSentence':\n is_classifier = False\n model = NNModel(mode=FLAGS.mode, is_classifier=is_classifier,\n encoder='CNN', num_tasks=1, task_names=['Outcome'],\n max_document_length=FLAGS.max_document_length, cnn_filter_sizes\n =list(map(int, FLAGS.cnn_filter_sizes.split(','))),\n cnn_num_filters=FLAGS.cnn_num_filters, rnn_bidirectional=FLAGS.\n rnn_bidirectional, rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_layers=FLAGS.rnn_num_layers)\n document_reader = experiment_reader.ExperimentReader(annotype=\n 'Outcome', binary=is_classifier)\n elif target == 'NYT':\n model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder='CNN',\n num_tasks=1, task_names=['Business'], max_document_length=FLAGS\n .max_document_length, cnn_filter_sizes=list(map(int, FLAGS.\n cnn_filter_sizes.split(','))), cnn_num_filters=FLAGS.\n cnn_num_filters, rnn_bidirectional=FLAGS.rnn_bidirectional,\n rnn_cell_type=FLAGS.rnn_cell_type, rnn_num_layers=FLAGS.\n rnn_num_layers, dnn_layer_sizes=list(map(int, FLAGS.\n dnn_layer_sizes.split(','))))\n document_reader = nyt_reader.NYTReader(genre='Business')\n else:\n raise ValueError('Error')\n if FLAGS.mode == MODE_TRAIN:\n nn_utils.train(model, document_reader, is_classifier=is_classifier,\n FLAGS=FLAGS)\n elif FLAGS.mode == MODE_EVAL:\n checkpoint = './test/train/model-2000'\n nn_utils.eval(model, document_reader, checkpoint, FLAGS=FLAGS)\n\n\nif __name__ == '__main__':\n flags = tf.app.flags\n flags.DEFINE_string('mode', 'train', 'Model mode')\n flags.DEFINE_integer('batch_size', 64, 'Batch Size (default: 64)')\n flags.DEFINE_integer('max_steps', 3000,\n 'Max steps of training (default: 3000)')\n flags.DEFINE_integer('num_epochs', 100,\n 'Number of training epochs (default: 100)')\n tf.flags.DEFINE_integer('evaluate_every', 500,\n 'Evaluate model on dev set after this many steps (default: 100)')\n tf.flags.DEFINE_integer('checkpoint_every', 2000,\n 'Save model after this many steps (default: 1000)')\n flags.DEFINE_float('dropout', 0.4, 'dropout')\n flags.DEFINE_float('learning_rate', 0.001, 'learning rate')\n flags.DEFINE_integer('max_document_length', 50, 'Max document length')\n flags.DEFINE_bool('rnn_bidirectional', True,\n 'Whther rnn is undirectional or bidirectional')\n flags.DEFINE_string('rnn_cell_type', 'GRU', 'RNN cell type, GRU or LSTM')\n flags.DEFINE_integer('rnn_num_layers', 2, 'Number of layers of RNN')\n flags.DEFINE_string('encoder', 'RNN',\n 'Type of encoder used to embed document')\n flags.DEFINE_string('cnn_filter_sizes', '3,4,5',\n 'Filter sizes in CNN encoder')\n flags.DEFINE_integer('cnn_num_filters', 32,\n 'Number of filters per filter size in CNN encoder')\n flags.DEFINE_string('dnn_layer_sizes', '256', 'Filter sizes in CNN encoder'\n )\n flags.DEFINE_string('output_fname', './tmp/output.out', 'Output file')\n FLAGS = tf.flags.FLAGS\n main()\n",
"<import token>\nW2VModelFILE = '/mnt/data/workspace/nlp/w2v_models/PICO-w2v.vec'\nEMBEDDING_DIM = 200\nMODE_TRAIN = 'train'\nMODE_EVAL = 'eval'\nMODE_INFER = 'inference'\n\n\nclass NNModel:\n\n def __init__(self, mode=MODE_TRAIN, running_dir='./test/', encoder=\n 'CNN', num_tasks=1, task_names=['Task'], max_document_length=64,\n is_classifier=True, l2_reg_lambda=0.1, cnn_filter_sizes=[3, 4, 5],\n cnn_num_filters=128, rnn_bidirectional=False, rnn_cell_type='GRU',\n rnn_num_layers=2, dnn_layer_sizes=[512]):\n self._train = True if mode == MODE_TRAIN else False\n self._max_document_length = max_document_length\n self._num_tasks = num_tasks\n self._is_classifier = is_classifier\n self._embedding_size = EMBEDDING_DIM\n self._encoder = encoder\n self._encoding_size = 300\n self._vocab = None\n self._task_names = task_names\n self._cnn_filter_sizes = cnn_filter_sizes\n self._cnn_num_filters = cnn_num_filters\n self._rnn_bidirectional = rnn_bidirectional\n self._rnn_cell_type = rnn_cell_type\n self._rnn_num_layers = rnn_num_layers\n self._dnn_layer_sizes = dnn_layer_sizes\n self._dnn_activation = 'relu'\n self._l2_reg_lambda = l2_reg_lambda\n self.ops = []\n self.loss = None\n self.eval_metrics = {}\n self.saver = None\n self.checkpoint_dir = os.path.join(running_dir, 'train/')\n self.eval_dir = os.path.join(running_dir, 'test/')\n\n def Graph(self):\n self.input_x = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x')\n self.input_l = tf.placeholder(tf.int32, [None], name='input_l')\n self.input_y = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_y')\n self.input_w = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_w')\n self.dropout = tf.placeholder(tf.float32, name='dropout_prob')\n if self._rnn_bidirectional:\n self.input_x_bw = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x_bw')\n else:\n self.input_x_bw = None\n vocab, init_embedding = self._LoadInitEmbeddings()\n\n def _tokenizer(xs):\n return [x.split(' ') for x in xs]\n self._vocab = learn.preprocessing.VocabularyProcessor(self.\n _max_document_length, tokenizer_fn=_tokenizer)\n self._vocab.fit(vocab)\n init_embedding = np.vstack([np.random.normal(size=self.\n _embedding_size), init_embedding])\n vocab_size = len(self._vocab.vocabulary_)\n with tf.variable_scope('WordEmbeddings'):\n embeddings = tf.get_variable(name='W', shape=init_embedding.\n shape, initializer=tf.constant_initializer(init_embedding),\n trainable=False)\n if self._encoder == 'CNN':\n input_encoded = self._CNNLayers(embeddings)\n elif self._encoder == 'RNN':\n input_encoded = self._RNNLayers(embeddings)\n elif self._encoder == 'DNN':\n input_encoded = self._DNNLayers(embeddings)\n self.input_encoded = input_encoded\n with tf.variable_scope('dropout'):\n input_encoded = tf.nn.dropout(input_encoded, 1 - self.dropout)\n if self._is_classifier:\n preds, pred_scores, loss = self._classifier(input_encoded, self\n .input_y, self.input_w)\n self.ops.extend([preds, pred_scores, loss])\n else:\n pred_scores, loss = self._regressor(input_encoded, self.input_y,\n self.input_w)\n self.ops.extend([pred_scores, pred_scores, loss])\n self.loss = loss\n self.saver = tf.train.Saver(tf.global_variables())\n return self\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n\n def _regressor(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_logits = []\n for idx in range(self._num_tasks):\n with tf.variable_scope('{0}_regressor'.format(self._task_names[\n idx])):\n logits = tf.layers.dense(input_encoded, 1,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=gts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_logits.append(tf.sigmoid(logits))\n self.eval_metrics['{0}/Pearsonr'.format(self._task_names[idx])\n ] = tf.contrib.metrics.streaming_pearson_correlation(logits\n , gts, weights=wts)\n pooled_logits = tf.stack(pooled_logits, axis=1)\n pooled_logits = tf.squeeze(pooled_logits, axis=-1)\n return pooled_logits, total_loss\n\n def _LoadInitEmbeddings(self):\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(\n W2VModelFILE, binary=False)\n vocab = []\n embd = []\n for token in w2v_model.vocab:\n vec = w2v_model[token]\n vocab.append(token)\n embd.append(vec)\n embedding = np.asarray(embd)\n return vocab, embedding\n\n def _LookupEmbeddings(self, embeddings, inputs):\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n\n def _RNNCells(self):\n if self._rnn_cell_type == 'GRU':\n cells = tf.contrib.rnn.MultiRNNCell([tf.nn.rnn_cell.GRUCell(\n self._embedding_size) for x in range(self._rnn_num_layers)],\n state_is_tuple=True)\n elif self._rnn_cell_type == 'LSTM':\n cells = tf.contrib.rnn.MultiRNNCell([tf.nn.rnn_cell.LSTMCell(\n self._embedding_size) for x in range(self._rnn_num_layers)],\n state_is_tuple=True)\n return cells\n\n def _RNNLayers(self, embeddings):\n _, fw_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n if self._rnn_bidirectional:\n _, bw_embeddings = self._LookupEmbeddings(embeddings, self.\n input_x_bw)\n with tf.variable_scope('RNN'):\n with tf.variable_scope('forward'):\n fw_cells = self._RNNCells()\n _, fw_state = tf.nn.dynamic_rnn(fw_cells, fw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n fw_encoding = fw_state[-1]\n if self._rnn_bidirectional:\n with tf.variable_scope('backward'):\n bw_cells = self._RNNCells()\n _, bw_state = tf.nn.dynamic_rnn(bw_cells, bw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n bw_encoding = bw_state[-1]\n rnn_encoding = tf.concat([fw_encoding, bw_encoding], axis=1)\n else:\n rnn_encoding = fw_encoding\n with tf.variable_scope('dropout'):\n rnn_encoding = tf.nn.dropout(rnn_encoding, 1 - self.dropout)\n rnn_encoding = tf.layers.dense(rnn_encoding, self._encoding_size)\n return rnn_encoding\n\n\ndef main():\n target = 'PICOSentence'\n if target == 'PICO':\n model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder=FLAGS.\n encoder, num_tasks=1, task_names=['Classification'],\n max_document_length=FLAGS.max_document_length, cnn_filter_sizes\n =list(map(int, FLAGS.cnn_filter_sizes.split(','))),\n cnn_num_filters=FLAGS.cnn_num_filters, rnn_bidirectional=FLAGS.\n rnn_bidirectional, rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_layers=FLAGS.rnn_num_layers)\n document_reader = pico_reader.PICOReader(annotype='Outcome')\n elif target == 'PICOSentence':\n is_classifier = False\n model = NNModel(mode=FLAGS.mode, is_classifier=is_classifier,\n encoder='CNN', num_tasks=1, task_names=['Outcome'],\n max_document_length=FLAGS.max_document_length, cnn_filter_sizes\n =list(map(int, FLAGS.cnn_filter_sizes.split(','))),\n cnn_num_filters=FLAGS.cnn_num_filters, rnn_bidirectional=FLAGS.\n rnn_bidirectional, rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_layers=FLAGS.rnn_num_layers)\n document_reader = experiment_reader.ExperimentReader(annotype=\n 'Outcome', binary=is_classifier)\n elif target == 'NYT':\n model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder='CNN',\n num_tasks=1, task_names=['Business'], max_document_length=FLAGS\n .max_document_length, cnn_filter_sizes=list(map(int, FLAGS.\n cnn_filter_sizes.split(','))), cnn_num_filters=FLAGS.\n cnn_num_filters, rnn_bidirectional=FLAGS.rnn_bidirectional,\n rnn_cell_type=FLAGS.rnn_cell_type, rnn_num_layers=FLAGS.\n rnn_num_layers, dnn_layer_sizes=list(map(int, FLAGS.\n dnn_layer_sizes.split(','))))\n document_reader = nyt_reader.NYTReader(genre='Business')\n else:\n raise ValueError('Error')\n if FLAGS.mode == MODE_TRAIN:\n nn_utils.train(model, document_reader, is_classifier=is_classifier,\n FLAGS=FLAGS)\n elif FLAGS.mode == MODE_EVAL:\n checkpoint = './test/train/model-2000'\n nn_utils.eval(model, document_reader, checkpoint, FLAGS=FLAGS)\n\n\nif __name__ == '__main__':\n flags = tf.app.flags\n flags.DEFINE_string('mode', 'train', 'Model mode')\n flags.DEFINE_integer('batch_size', 64, 'Batch Size (default: 64)')\n flags.DEFINE_integer('max_steps', 3000,\n 'Max steps of training (default: 3000)')\n flags.DEFINE_integer('num_epochs', 100,\n 'Number of training epochs (default: 100)')\n tf.flags.DEFINE_integer('evaluate_every', 500,\n 'Evaluate model on dev set after this many steps (default: 100)')\n tf.flags.DEFINE_integer('checkpoint_every', 2000,\n 'Save model after this many steps (default: 1000)')\n flags.DEFINE_float('dropout', 0.4, 'dropout')\n flags.DEFINE_float('learning_rate', 0.001, 'learning rate')\n flags.DEFINE_integer('max_document_length', 50, 'Max document length')\n flags.DEFINE_bool('rnn_bidirectional', True,\n 'Whther rnn is undirectional or bidirectional')\n flags.DEFINE_string('rnn_cell_type', 'GRU', 'RNN cell type, GRU or LSTM')\n flags.DEFINE_integer('rnn_num_layers', 2, 'Number of layers of RNN')\n flags.DEFINE_string('encoder', 'RNN',\n 'Type of encoder used to embed document')\n flags.DEFINE_string('cnn_filter_sizes', '3,4,5',\n 'Filter sizes in CNN encoder')\n flags.DEFINE_integer('cnn_num_filters', 32,\n 'Number of filters per filter size in CNN encoder')\n flags.DEFINE_string('dnn_layer_sizes', '256', 'Filter sizes in CNN encoder'\n )\n flags.DEFINE_string('output_fname', './tmp/output.out', 'Output file')\n FLAGS = tf.flags.FLAGS\n main()\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n\n def __init__(self, mode=MODE_TRAIN, running_dir='./test/', encoder=\n 'CNN', num_tasks=1, task_names=['Task'], max_document_length=64,\n is_classifier=True, l2_reg_lambda=0.1, cnn_filter_sizes=[3, 4, 5],\n cnn_num_filters=128, rnn_bidirectional=False, rnn_cell_type='GRU',\n rnn_num_layers=2, dnn_layer_sizes=[512]):\n self._train = True if mode == MODE_TRAIN else False\n self._max_document_length = max_document_length\n self._num_tasks = num_tasks\n self._is_classifier = is_classifier\n self._embedding_size = EMBEDDING_DIM\n self._encoder = encoder\n self._encoding_size = 300\n self._vocab = None\n self._task_names = task_names\n self._cnn_filter_sizes = cnn_filter_sizes\n self._cnn_num_filters = cnn_num_filters\n self._rnn_bidirectional = rnn_bidirectional\n self._rnn_cell_type = rnn_cell_type\n self._rnn_num_layers = rnn_num_layers\n self._dnn_layer_sizes = dnn_layer_sizes\n self._dnn_activation = 'relu'\n self._l2_reg_lambda = l2_reg_lambda\n self.ops = []\n self.loss = None\n self.eval_metrics = {}\n self.saver = None\n self.checkpoint_dir = os.path.join(running_dir, 'train/')\n self.eval_dir = os.path.join(running_dir, 'test/')\n\n def Graph(self):\n self.input_x = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x')\n self.input_l = tf.placeholder(tf.int32, [None], name='input_l')\n self.input_y = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_y')\n self.input_w = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_w')\n self.dropout = tf.placeholder(tf.float32, name='dropout_prob')\n if self._rnn_bidirectional:\n self.input_x_bw = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x_bw')\n else:\n self.input_x_bw = None\n vocab, init_embedding = self._LoadInitEmbeddings()\n\n def _tokenizer(xs):\n return [x.split(' ') for x in xs]\n self._vocab = learn.preprocessing.VocabularyProcessor(self.\n _max_document_length, tokenizer_fn=_tokenizer)\n self._vocab.fit(vocab)\n init_embedding = np.vstack([np.random.normal(size=self.\n _embedding_size), init_embedding])\n vocab_size = len(self._vocab.vocabulary_)\n with tf.variable_scope('WordEmbeddings'):\n embeddings = tf.get_variable(name='W', shape=init_embedding.\n shape, initializer=tf.constant_initializer(init_embedding),\n trainable=False)\n if self._encoder == 'CNN':\n input_encoded = self._CNNLayers(embeddings)\n elif self._encoder == 'RNN':\n input_encoded = self._RNNLayers(embeddings)\n elif self._encoder == 'DNN':\n input_encoded = self._DNNLayers(embeddings)\n self.input_encoded = input_encoded\n with tf.variable_scope('dropout'):\n input_encoded = tf.nn.dropout(input_encoded, 1 - self.dropout)\n if self._is_classifier:\n preds, pred_scores, loss = self._classifier(input_encoded, self\n .input_y, self.input_w)\n self.ops.extend([preds, pred_scores, loss])\n else:\n pred_scores, loss = self._regressor(input_encoded, self.input_y,\n self.input_w)\n self.ops.extend([pred_scores, pred_scores, loss])\n self.loss = loss\n self.saver = tf.train.Saver(tf.global_variables())\n return self\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n\n def _regressor(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_logits = []\n for idx in range(self._num_tasks):\n with tf.variable_scope('{0}_regressor'.format(self._task_names[\n idx])):\n logits = tf.layers.dense(input_encoded, 1,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=gts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_logits.append(tf.sigmoid(logits))\n self.eval_metrics['{0}/Pearsonr'.format(self._task_names[idx])\n ] = tf.contrib.metrics.streaming_pearson_correlation(logits\n , gts, weights=wts)\n pooled_logits = tf.stack(pooled_logits, axis=1)\n pooled_logits = tf.squeeze(pooled_logits, axis=-1)\n return pooled_logits, total_loss\n\n def _LoadInitEmbeddings(self):\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(\n W2VModelFILE, binary=False)\n vocab = []\n embd = []\n for token in w2v_model.vocab:\n vec = w2v_model[token]\n vocab.append(token)\n embd.append(vec)\n embedding = np.asarray(embd)\n return vocab, embedding\n\n def _LookupEmbeddings(self, embeddings, inputs):\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n\n def _RNNCells(self):\n if self._rnn_cell_type == 'GRU':\n cells = tf.contrib.rnn.MultiRNNCell([tf.nn.rnn_cell.GRUCell(\n self._embedding_size) for x in range(self._rnn_num_layers)],\n state_is_tuple=True)\n elif self._rnn_cell_type == 'LSTM':\n cells = tf.contrib.rnn.MultiRNNCell([tf.nn.rnn_cell.LSTMCell(\n self._embedding_size) for x in range(self._rnn_num_layers)],\n state_is_tuple=True)\n return cells\n\n def _RNNLayers(self, embeddings):\n _, fw_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n if self._rnn_bidirectional:\n _, bw_embeddings = self._LookupEmbeddings(embeddings, self.\n input_x_bw)\n with tf.variable_scope('RNN'):\n with tf.variable_scope('forward'):\n fw_cells = self._RNNCells()\n _, fw_state = tf.nn.dynamic_rnn(fw_cells, fw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n fw_encoding = fw_state[-1]\n if self._rnn_bidirectional:\n with tf.variable_scope('backward'):\n bw_cells = self._RNNCells()\n _, bw_state = tf.nn.dynamic_rnn(bw_cells, bw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n bw_encoding = bw_state[-1]\n rnn_encoding = tf.concat([fw_encoding, bw_encoding], axis=1)\n else:\n rnn_encoding = fw_encoding\n with tf.variable_scope('dropout'):\n rnn_encoding = tf.nn.dropout(rnn_encoding, 1 - self.dropout)\n rnn_encoding = tf.layers.dense(rnn_encoding, self._encoding_size)\n return rnn_encoding\n\n\ndef main():\n target = 'PICOSentence'\n if target == 'PICO':\n model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder=FLAGS.\n encoder, num_tasks=1, task_names=['Classification'],\n max_document_length=FLAGS.max_document_length, cnn_filter_sizes\n =list(map(int, FLAGS.cnn_filter_sizes.split(','))),\n cnn_num_filters=FLAGS.cnn_num_filters, rnn_bidirectional=FLAGS.\n rnn_bidirectional, rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_layers=FLAGS.rnn_num_layers)\n document_reader = pico_reader.PICOReader(annotype='Outcome')\n elif target == 'PICOSentence':\n is_classifier = False\n model = NNModel(mode=FLAGS.mode, is_classifier=is_classifier,\n encoder='CNN', num_tasks=1, task_names=['Outcome'],\n max_document_length=FLAGS.max_document_length, cnn_filter_sizes\n =list(map(int, FLAGS.cnn_filter_sizes.split(','))),\n cnn_num_filters=FLAGS.cnn_num_filters, rnn_bidirectional=FLAGS.\n rnn_bidirectional, rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_layers=FLAGS.rnn_num_layers)\n document_reader = experiment_reader.ExperimentReader(annotype=\n 'Outcome', binary=is_classifier)\n elif target == 'NYT':\n model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder='CNN',\n num_tasks=1, task_names=['Business'], max_document_length=FLAGS\n .max_document_length, cnn_filter_sizes=list(map(int, FLAGS.\n cnn_filter_sizes.split(','))), cnn_num_filters=FLAGS.\n cnn_num_filters, rnn_bidirectional=FLAGS.rnn_bidirectional,\n rnn_cell_type=FLAGS.rnn_cell_type, rnn_num_layers=FLAGS.\n rnn_num_layers, dnn_layer_sizes=list(map(int, FLAGS.\n dnn_layer_sizes.split(','))))\n document_reader = nyt_reader.NYTReader(genre='Business')\n else:\n raise ValueError('Error')\n if FLAGS.mode == MODE_TRAIN:\n nn_utils.train(model, document_reader, is_classifier=is_classifier,\n FLAGS=FLAGS)\n elif FLAGS.mode == MODE_EVAL:\n checkpoint = './test/train/model-2000'\n nn_utils.eval(model, document_reader, checkpoint, FLAGS=FLAGS)\n\n\nif __name__ == '__main__':\n flags = tf.app.flags\n flags.DEFINE_string('mode', 'train', 'Model mode')\n flags.DEFINE_integer('batch_size', 64, 'Batch Size (default: 64)')\n flags.DEFINE_integer('max_steps', 3000,\n 'Max steps of training (default: 3000)')\n flags.DEFINE_integer('num_epochs', 100,\n 'Number of training epochs (default: 100)')\n tf.flags.DEFINE_integer('evaluate_every', 500,\n 'Evaluate model on dev set after this many steps (default: 100)')\n tf.flags.DEFINE_integer('checkpoint_every', 2000,\n 'Save model after this many steps (default: 1000)')\n flags.DEFINE_float('dropout', 0.4, 'dropout')\n flags.DEFINE_float('learning_rate', 0.001, 'learning rate')\n flags.DEFINE_integer('max_document_length', 50, 'Max document length')\n flags.DEFINE_bool('rnn_bidirectional', True,\n 'Whther rnn is undirectional or bidirectional')\n flags.DEFINE_string('rnn_cell_type', 'GRU', 'RNN cell type, GRU or LSTM')\n flags.DEFINE_integer('rnn_num_layers', 2, 'Number of layers of RNN')\n flags.DEFINE_string('encoder', 'RNN',\n 'Type of encoder used to embed document')\n flags.DEFINE_string('cnn_filter_sizes', '3,4,5',\n 'Filter sizes in CNN encoder')\n flags.DEFINE_integer('cnn_num_filters', 32,\n 'Number of filters per filter size in CNN encoder')\n flags.DEFINE_string('dnn_layer_sizes', '256', 'Filter sizes in CNN encoder'\n )\n flags.DEFINE_string('output_fname', './tmp/output.out', 'Output file')\n FLAGS = tf.flags.FLAGS\n main()\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n\n def __init__(self, mode=MODE_TRAIN, running_dir='./test/', encoder=\n 'CNN', num_tasks=1, task_names=['Task'], max_document_length=64,\n is_classifier=True, l2_reg_lambda=0.1, cnn_filter_sizes=[3, 4, 5],\n cnn_num_filters=128, rnn_bidirectional=False, rnn_cell_type='GRU',\n rnn_num_layers=2, dnn_layer_sizes=[512]):\n self._train = True if mode == MODE_TRAIN else False\n self._max_document_length = max_document_length\n self._num_tasks = num_tasks\n self._is_classifier = is_classifier\n self._embedding_size = EMBEDDING_DIM\n self._encoder = encoder\n self._encoding_size = 300\n self._vocab = None\n self._task_names = task_names\n self._cnn_filter_sizes = cnn_filter_sizes\n self._cnn_num_filters = cnn_num_filters\n self._rnn_bidirectional = rnn_bidirectional\n self._rnn_cell_type = rnn_cell_type\n self._rnn_num_layers = rnn_num_layers\n self._dnn_layer_sizes = dnn_layer_sizes\n self._dnn_activation = 'relu'\n self._l2_reg_lambda = l2_reg_lambda\n self.ops = []\n self.loss = None\n self.eval_metrics = {}\n self.saver = None\n self.checkpoint_dir = os.path.join(running_dir, 'train/')\n self.eval_dir = os.path.join(running_dir, 'test/')\n\n def Graph(self):\n self.input_x = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x')\n self.input_l = tf.placeholder(tf.int32, [None], name='input_l')\n self.input_y = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_y')\n self.input_w = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_w')\n self.dropout = tf.placeholder(tf.float32, name='dropout_prob')\n if self._rnn_bidirectional:\n self.input_x_bw = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x_bw')\n else:\n self.input_x_bw = None\n vocab, init_embedding = self._LoadInitEmbeddings()\n\n def _tokenizer(xs):\n return [x.split(' ') for x in xs]\n self._vocab = learn.preprocessing.VocabularyProcessor(self.\n _max_document_length, tokenizer_fn=_tokenizer)\n self._vocab.fit(vocab)\n init_embedding = np.vstack([np.random.normal(size=self.\n _embedding_size), init_embedding])\n vocab_size = len(self._vocab.vocabulary_)\n with tf.variable_scope('WordEmbeddings'):\n embeddings = tf.get_variable(name='W', shape=init_embedding.\n shape, initializer=tf.constant_initializer(init_embedding),\n trainable=False)\n if self._encoder == 'CNN':\n input_encoded = self._CNNLayers(embeddings)\n elif self._encoder == 'RNN':\n input_encoded = self._RNNLayers(embeddings)\n elif self._encoder == 'DNN':\n input_encoded = self._DNNLayers(embeddings)\n self.input_encoded = input_encoded\n with tf.variable_scope('dropout'):\n input_encoded = tf.nn.dropout(input_encoded, 1 - self.dropout)\n if self._is_classifier:\n preds, pred_scores, loss = self._classifier(input_encoded, self\n .input_y, self.input_w)\n self.ops.extend([preds, pred_scores, loss])\n else:\n pred_scores, loss = self._regressor(input_encoded, self.input_y,\n self.input_w)\n self.ops.extend([pred_scores, pred_scores, loss])\n self.loss = loss\n self.saver = tf.train.Saver(tf.global_variables())\n return self\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n\n def _regressor(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_logits = []\n for idx in range(self._num_tasks):\n with tf.variable_scope('{0}_regressor'.format(self._task_names[\n idx])):\n logits = tf.layers.dense(input_encoded, 1,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=gts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_logits.append(tf.sigmoid(logits))\n self.eval_metrics['{0}/Pearsonr'.format(self._task_names[idx])\n ] = tf.contrib.metrics.streaming_pearson_correlation(logits\n , gts, weights=wts)\n pooled_logits = tf.stack(pooled_logits, axis=1)\n pooled_logits = tf.squeeze(pooled_logits, axis=-1)\n return pooled_logits, total_loss\n\n def _LoadInitEmbeddings(self):\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(\n W2VModelFILE, binary=False)\n vocab = []\n embd = []\n for token in w2v_model.vocab:\n vec = w2v_model[token]\n vocab.append(token)\n embd.append(vec)\n embedding = np.asarray(embd)\n return vocab, embedding\n\n def _LookupEmbeddings(self, embeddings, inputs):\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n\n def _RNNCells(self):\n if self._rnn_cell_type == 'GRU':\n cells = tf.contrib.rnn.MultiRNNCell([tf.nn.rnn_cell.GRUCell(\n self._embedding_size) for x in range(self._rnn_num_layers)],\n state_is_tuple=True)\n elif self._rnn_cell_type == 'LSTM':\n cells = tf.contrib.rnn.MultiRNNCell([tf.nn.rnn_cell.LSTMCell(\n self._embedding_size) for x in range(self._rnn_num_layers)],\n state_is_tuple=True)\n return cells\n\n def _RNNLayers(self, embeddings):\n _, fw_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n if self._rnn_bidirectional:\n _, bw_embeddings = self._LookupEmbeddings(embeddings, self.\n input_x_bw)\n with tf.variable_scope('RNN'):\n with tf.variable_scope('forward'):\n fw_cells = self._RNNCells()\n _, fw_state = tf.nn.dynamic_rnn(fw_cells, fw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n fw_encoding = fw_state[-1]\n if self._rnn_bidirectional:\n with tf.variable_scope('backward'):\n bw_cells = self._RNNCells()\n _, bw_state = tf.nn.dynamic_rnn(bw_cells, bw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n bw_encoding = bw_state[-1]\n rnn_encoding = tf.concat([fw_encoding, bw_encoding], axis=1)\n else:\n rnn_encoding = fw_encoding\n with tf.variable_scope('dropout'):\n rnn_encoding = tf.nn.dropout(rnn_encoding, 1 - self.dropout)\n rnn_encoding = tf.layers.dense(rnn_encoding, self._encoding_size)\n return rnn_encoding\n\n\ndef main():\n target = 'PICOSentence'\n if target == 'PICO':\n model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder=FLAGS.\n encoder, num_tasks=1, task_names=['Classification'],\n max_document_length=FLAGS.max_document_length, cnn_filter_sizes\n =list(map(int, FLAGS.cnn_filter_sizes.split(','))),\n cnn_num_filters=FLAGS.cnn_num_filters, rnn_bidirectional=FLAGS.\n rnn_bidirectional, rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_layers=FLAGS.rnn_num_layers)\n document_reader = pico_reader.PICOReader(annotype='Outcome')\n elif target == 'PICOSentence':\n is_classifier = False\n model = NNModel(mode=FLAGS.mode, is_classifier=is_classifier,\n encoder='CNN', num_tasks=1, task_names=['Outcome'],\n max_document_length=FLAGS.max_document_length, cnn_filter_sizes\n =list(map(int, FLAGS.cnn_filter_sizes.split(','))),\n cnn_num_filters=FLAGS.cnn_num_filters, rnn_bidirectional=FLAGS.\n rnn_bidirectional, rnn_cell_type=FLAGS.rnn_cell_type,\n rnn_num_layers=FLAGS.rnn_num_layers)\n document_reader = experiment_reader.ExperimentReader(annotype=\n 'Outcome', binary=is_classifier)\n elif target == 'NYT':\n model = NNModel(mode=FLAGS.mode, is_classifier=True, encoder='CNN',\n num_tasks=1, task_names=['Business'], max_document_length=FLAGS\n .max_document_length, cnn_filter_sizes=list(map(int, FLAGS.\n cnn_filter_sizes.split(','))), cnn_num_filters=FLAGS.\n cnn_num_filters, rnn_bidirectional=FLAGS.rnn_bidirectional,\n rnn_cell_type=FLAGS.rnn_cell_type, rnn_num_layers=FLAGS.\n rnn_num_layers, dnn_layer_sizes=list(map(int, FLAGS.\n dnn_layer_sizes.split(','))))\n document_reader = nyt_reader.NYTReader(genre='Business')\n else:\n raise ValueError('Error')\n if FLAGS.mode == MODE_TRAIN:\n nn_utils.train(model, document_reader, is_classifier=is_classifier,\n FLAGS=FLAGS)\n elif FLAGS.mode == MODE_EVAL:\n checkpoint = './test/train/model-2000'\n nn_utils.eval(model, document_reader, checkpoint, FLAGS=FLAGS)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n\n def __init__(self, mode=MODE_TRAIN, running_dir='./test/', encoder=\n 'CNN', num_tasks=1, task_names=['Task'], max_document_length=64,\n is_classifier=True, l2_reg_lambda=0.1, cnn_filter_sizes=[3, 4, 5],\n cnn_num_filters=128, rnn_bidirectional=False, rnn_cell_type='GRU',\n rnn_num_layers=2, dnn_layer_sizes=[512]):\n self._train = True if mode == MODE_TRAIN else False\n self._max_document_length = max_document_length\n self._num_tasks = num_tasks\n self._is_classifier = is_classifier\n self._embedding_size = EMBEDDING_DIM\n self._encoder = encoder\n self._encoding_size = 300\n self._vocab = None\n self._task_names = task_names\n self._cnn_filter_sizes = cnn_filter_sizes\n self._cnn_num_filters = cnn_num_filters\n self._rnn_bidirectional = rnn_bidirectional\n self._rnn_cell_type = rnn_cell_type\n self._rnn_num_layers = rnn_num_layers\n self._dnn_layer_sizes = dnn_layer_sizes\n self._dnn_activation = 'relu'\n self._l2_reg_lambda = l2_reg_lambda\n self.ops = []\n self.loss = None\n self.eval_metrics = {}\n self.saver = None\n self.checkpoint_dir = os.path.join(running_dir, 'train/')\n self.eval_dir = os.path.join(running_dir, 'test/')\n\n def Graph(self):\n self.input_x = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x')\n self.input_l = tf.placeholder(tf.int32, [None], name='input_l')\n self.input_y = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_y')\n self.input_w = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_w')\n self.dropout = tf.placeholder(tf.float32, name='dropout_prob')\n if self._rnn_bidirectional:\n self.input_x_bw = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x_bw')\n else:\n self.input_x_bw = None\n vocab, init_embedding = self._LoadInitEmbeddings()\n\n def _tokenizer(xs):\n return [x.split(' ') for x in xs]\n self._vocab = learn.preprocessing.VocabularyProcessor(self.\n _max_document_length, tokenizer_fn=_tokenizer)\n self._vocab.fit(vocab)\n init_embedding = np.vstack([np.random.normal(size=self.\n _embedding_size), init_embedding])\n vocab_size = len(self._vocab.vocabulary_)\n with tf.variable_scope('WordEmbeddings'):\n embeddings = tf.get_variable(name='W', shape=init_embedding.\n shape, initializer=tf.constant_initializer(init_embedding),\n trainable=False)\n if self._encoder == 'CNN':\n input_encoded = self._CNNLayers(embeddings)\n elif self._encoder == 'RNN':\n input_encoded = self._RNNLayers(embeddings)\n elif self._encoder == 'DNN':\n input_encoded = self._DNNLayers(embeddings)\n self.input_encoded = input_encoded\n with tf.variable_scope('dropout'):\n input_encoded = tf.nn.dropout(input_encoded, 1 - self.dropout)\n if self._is_classifier:\n preds, pred_scores, loss = self._classifier(input_encoded, self\n .input_y, self.input_w)\n self.ops.extend([preds, pred_scores, loss])\n else:\n pred_scores, loss = self._regressor(input_encoded, self.input_y,\n self.input_w)\n self.ops.extend([pred_scores, pred_scores, loss])\n self.loss = loss\n self.saver = tf.train.Saver(tf.global_variables())\n return self\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n\n def _regressor(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_logits = []\n for idx in range(self._num_tasks):\n with tf.variable_scope('{0}_regressor'.format(self._task_names[\n idx])):\n logits = tf.layers.dense(input_encoded, 1,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=gts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_logits.append(tf.sigmoid(logits))\n self.eval_metrics['{0}/Pearsonr'.format(self._task_names[idx])\n ] = tf.contrib.metrics.streaming_pearson_correlation(logits\n , gts, weights=wts)\n pooled_logits = tf.stack(pooled_logits, axis=1)\n pooled_logits = tf.squeeze(pooled_logits, axis=-1)\n return pooled_logits, total_loss\n\n def _LoadInitEmbeddings(self):\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(\n W2VModelFILE, binary=False)\n vocab = []\n embd = []\n for token in w2v_model.vocab:\n vec = w2v_model[token]\n vocab.append(token)\n embd.append(vec)\n embedding = np.asarray(embd)\n return vocab, embedding\n\n def _LookupEmbeddings(self, embeddings, inputs):\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n\n def _RNNCells(self):\n if self._rnn_cell_type == 'GRU':\n cells = tf.contrib.rnn.MultiRNNCell([tf.nn.rnn_cell.GRUCell(\n self._embedding_size) for x in range(self._rnn_num_layers)],\n state_is_tuple=True)\n elif self._rnn_cell_type == 'LSTM':\n cells = tf.contrib.rnn.MultiRNNCell([tf.nn.rnn_cell.LSTMCell(\n self._embedding_size) for x in range(self._rnn_num_layers)],\n state_is_tuple=True)\n return cells\n\n def _RNNLayers(self, embeddings):\n _, fw_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n if self._rnn_bidirectional:\n _, bw_embeddings = self._LookupEmbeddings(embeddings, self.\n input_x_bw)\n with tf.variable_scope('RNN'):\n with tf.variable_scope('forward'):\n fw_cells = self._RNNCells()\n _, fw_state = tf.nn.dynamic_rnn(fw_cells, fw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n fw_encoding = fw_state[-1]\n if self._rnn_bidirectional:\n with tf.variable_scope('backward'):\n bw_cells = self._RNNCells()\n _, bw_state = tf.nn.dynamic_rnn(bw_cells, bw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n bw_encoding = bw_state[-1]\n rnn_encoding = tf.concat([fw_encoding, bw_encoding], axis=1)\n else:\n rnn_encoding = fw_encoding\n with tf.variable_scope('dropout'):\n rnn_encoding = tf.nn.dropout(rnn_encoding, 1 - self.dropout)\n rnn_encoding = tf.layers.dense(rnn_encoding, self._encoding_size)\n return rnn_encoding\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n\n def __init__(self, mode=MODE_TRAIN, running_dir='./test/', encoder=\n 'CNN', num_tasks=1, task_names=['Task'], max_document_length=64,\n is_classifier=True, l2_reg_lambda=0.1, cnn_filter_sizes=[3, 4, 5],\n cnn_num_filters=128, rnn_bidirectional=False, rnn_cell_type='GRU',\n rnn_num_layers=2, dnn_layer_sizes=[512]):\n self._train = True if mode == MODE_TRAIN else False\n self._max_document_length = max_document_length\n self._num_tasks = num_tasks\n self._is_classifier = is_classifier\n self._embedding_size = EMBEDDING_DIM\n self._encoder = encoder\n self._encoding_size = 300\n self._vocab = None\n self._task_names = task_names\n self._cnn_filter_sizes = cnn_filter_sizes\n self._cnn_num_filters = cnn_num_filters\n self._rnn_bidirectional = rnn_bidirectional\n self._rnn_cell_type = rnn_cell_type\n self._rnn_num_layers = rnn_num_layers\n self._dnn_layer_sizes = dnn_layer_sizes\n self._dnn_activation = 'relu'\n self._l2_reg_lambda = l2_reg_lambda\n self.ops = []\n self.loss = None\n self.eval_metrics = {}\n self.saver = None\n self.checkpoint_dir = os.path.join(running_dir, 'train/')\n self.eval_dir = os.path.join(running_dir, 'test/')\n\n def Graph(self):\n self.input_x = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x')\n self.input_l = tf.placeholder(tf.int32, [None], name='input_l')\n self.input_y = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_y')\n self.input_w = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_w')\n self.dropout = tf.placeholder(tf.float32, name='dropout_prob')\n if self._rnn_bidirectional:\n self.input_x_bw = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x_bw')\n else:\n self.input_x_bw = None\n vocab, init_embedding = self._LoadInitEmbeddings()\n\n def _tokenizer(xs):\n return [x.split(' ') for x in xs]\n self._vocab = learn.preprocessing.VocabularyProcessor(self.\n _max_document_length, tokenizer_fn=_tokenizer)\n self._vocab.fit(vocab)\n init_embedding = np.vstack([np.random.normal(size=self.\n _embedding_size), init_embedding])\n vocab_size = len(self._vocab.vocabulary_)\n with tf.variable_scope('WordEmbeddings'):\n embeddings = tf.get_variable(name='W', shape=init_embedding.\n shape, initializer=tf.constant_initializer(init_embedding),\n trainable=False)\n if self._encoder == 'CNN':\n input_encoded = self._CNNLayers(embeddings)\n elif self._encoder == 'RNN':\n input_encoded = self._RNNLayers(embeddings)\n elif self._encoder == 'DNN':\n input_encoded = self._DNNLayers(embeddings)\n self.input_encoded = input_encoded\n with tf.variable_scope('dropout'):\n input_encoded = tf.nn.dropout(input_encoded, 1 - self.dropout)\n if self._is_classifier:\n preds, pred_scores, loss = self._classifier(input_encoded, self\n .input_y, self.input_w)\n self.ops.extend([preds, pred_scores, loss])\n else:\n pred_scores, loss = self._regressor(input_encoded, self.input_y,\n self.input_w)\n self.ops.extend([pred_scores, pred_scores, loss])\n self.loss = loss\n self.saver = tf.train.Saver(tf.global_variables())\n return self\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n\n def _regressor(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_logits = []\n for idx in range(self._num_tasks):\n with tf.variable_scope('{0}_regressor'.format(self._task_names[\n idx])):\n logits = tf.layers.dense(input_encoded, 1,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=gts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_logits.append(tf.sigmoid(logits))\n self.eval_metrics['{0}/Pearsonr'.format(self._task_names[idx])\n ] = tf.contrib.metrics.streaming_pearson_correlation(logits\n , gts, weights=wts)\n pooled_logits = tf.stack(pooled_logits, axis=1)\n pooled_logits = tf.squeeze(pooled_logits, axis=-1)\n return pooled_logits, total_loss\n\n def _LoadInitEmbeddings(self):\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(\n W2VModelFILE, binary=False)\n vocab = []\n embd = []\n for token in w2v_model.vocab:\n vec = w2v_model[token]\n vocab.append(token)\n embd.append(vec)\n embedding = np.asarray(embd)\n return vocab, embedding\n\n def _LookupEmbeddings(self, embeddings, inputs):\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n <function token>\n\n def _RNNLayers(self, embeddings):\n _, fw_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n if self._rnn_bidirectional:\n _, bw_embeddings = self._LookupEmbeddings(embeddings, self.\n input_x_bw)\n with tf.variable_scope('RNN'):\n with tf.variable_scope('forward'):\n fw_cells = self._RNNCells()\n _, fw_state = tf.nn.dynamic_rnn(fw_cells, fw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n fw_encoding = fw_state[-1]\n if self._rnn_bidirectional:\n with tf.variable_scope('backward'):\n bw_cells = self._RNNCells()\n _, bw_state = tf.nn.dynamic_rnn(bw_cells, bw_embeddings,\n sequence_length=self.input_l, dtype=tf.float32)\n bw_encoding = bw_state[-1]\n rnn_encoding = tf.concat([fw_encoding, bw_encoding], axis=1)\n else:\n rnn_encoding = fw_encoding\n with tf.variable_scope('dropout'):\n rnn_encoding = tf.nn.dropout(rnn_encoding, 1 - self.dropout)\n rnn_encoding = tf.layers.dense(rnn_encoding, self._encoding_size)\n return rnn_encoding\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n\n def __init__(self, mode=MODE_TRAIN, running_dir='./test/', encoder=\n 'CNN', num_tasks=1, task_names=['Task'], max_document_length=64,\n is_classifier=True, l2_reg_lambda=0.1, cnn_filter_sizes=[3, 4, 5],\n cnn_num_filters=128, rnn_bidirectional=False, rnn_cell_type='GRU',\n rnn_num_layers=2, dnn_layer_sizes=[512]):\n self._train = True if mode == MODE_TRAIN else False\n self._max_document_length = max_document_length\n self._num_tasks = num_tasks\n self._is_classifier = is_classifier\n self._embedding_size = EMBEDDING_DIM\n self._encoder = encoder\n self._encoding_size = 300\n self._vocab = None\n self._task_names = task_names\n self._cnn_filter_sizes = cnn_filter_sizes\n self._cnn_num_filters = cnn_num_filters\n self._rnn_bidirectional = rnn_bidirectional\n self._rnn_cell_type = rnn_cell_type\n self._rnn_num_layers = rnn_num_layers\n self._dnn_layer_sizes = dnn_layer_sizes\n self._dnn_activation = 'relu'\n self._l2_reg_lambda = l2_reg_lambda\n self.ops = []\n self.loss = None\n self.eval_metrics = {}\n self.saver = None\n self.checkpoint_dir = os.path.join(running_dir, 'train/')\n self.eval_dir = os.path.join(running_dir, 'test/')\n\n def Graph(self):\n self.input_x = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x')\n self.input_l = tf.placeholder(tf.int32, [None], name='input_l')\n self.input_y = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_y')\n self.input_w = tf.placeholder(tf.float32, [None, self._num_tasks],\n name='input_w')\n self.dropout = tf.placeholder(tf.float32, name='dropout_prob')\n if self._rnn_bidirectional:\n self.input_x_bw = tf.placeholder(tf.int32, [None, self.\n _max_document_length], name='input_x_bw')\n else:\n self.input_x_bw = None\n vocab, init_embedding = self._LoadInitEmbeddings()\n\n def _tokenizer(xs):\n return [x.split(' ') for x in xs]\n self._vocab = learn.preprocessing.VocabularyProcessor(self.\n _max_document_length, tokenizer_fn=_tokenizer)\n self._vocab.fit(vocab)\n init_embedding = np.vstack([np.random.normal(size=self.\n _embedding_size), init_embedding])\n vocab_size = len(self._vocab.vocabulary_)\n with tf.variable_scope('WordEmbeddings'):\n embeddings = tf.get_variable(name='W', shape=init_embedding.\n shape, initializer=tf.constant_initializer(init_embedding),\n trainable=False)\n if self._encoder == 'CNN':\n input_encoded = self._CNNLayers(embeddings)\n elif self._encoder == 'RNN':\n input_encoded = self._RNNLayers(embeddings)\n elif self._encoder == 'DNN':\n input_encoded = self._DNNLayers(embeddings)\n self.input_encoded = input_encoded\n with tf.variable_scope('dropout'):\n input_encoded = tf.nn.dropout(input_encoded, 1 - self.dropout)\n if self._is_classifier:\n preds, pred_scores, loss = self._classifier(input_encoded, self\n .input_y, self.input_w)\n self.ops.extend([preds, pred_scores, loss])\n else:\n pred_scores, loss = self._regressor(input_encoded, self.input_y,\n self.input_w)\n self.ops.extend([pred_scores, pred_scores, loss])\n self.loss = loss\n self.saver = tf.train.Saver(tf.global_variables())\n return self\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n\n def _regressor(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_logits = []\n for idx in range(self._num_tasks):\n with tf.variable_scope('{0}_regressor'.format(self._task_names[\n idx])):\n logits = tf.layers.dense(input_encoded, 1,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=gts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_logits.append(tf.sigmoid(logits))\n self.eval_metrics['{0}/Pearsonr'.format(self._task_names[idx])\n ] = tf.contrib.metrics.streaming_pearson_correlation(logits\n , gts, weights=wts)\n pooled_logits = tf.stack(pooled_logits, axis=1)\n pooled_logits = tf.squeeze(pooled_logits, axis=-1)\n return pooled_logits, total_loss\n\n def _LoadInitEmbeddings(self):\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(\n W2VModelFILE, binary=False)\n vocab = []\n embd = []\n for token in w2v_model.vocab:\n vec = w2v_model[token]\n vocab.append(token)\n embd.append(vec)\n embedding = np.asarray(embd)\n return vocab, embedding\n\n def _LookupEmbeddings(self, embeddings, inputs):\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n\n def __init__(self, mode=MODE_TRAIN, running_dir='./test/', encoder=\n 'CNN', num_tasks=1, task_names=['Task'], max_document_length=64,\n is_classifier=True, l2_reg_lambda=0.1, cnn_filter_sizes=[3, 4, 5],\n cnn_num_filters=128, rnn_bidirectional=False, rnn_cell_type='GRU',\n rnn_num_layers=2, dnn_layer_sizes=[512]):\n self._train = True if mode == MODE_TRAIN else False\n self._max_document_length = max_document_length\n self._num_tasks = num_tasks\n self._is_classifier = is_classifier\n self._embedding_size = EMBEDDING_DIM\n self._encoder = encoder\n self._encoding_size = 300\n self._vocab = None\n self._task_names = task_names\n self._cnn_filter_sizes = cnn_filter_sizes\n self._cnn_num_filters = cnn_num_filters\n self._rnn_bidirectional = rnn_bidirectional\n self._rnn_cell_type = rnn_cell_type\n self._rnn_num_layers = rnn_num_layers\n self._dnn_layer_sizes = dnn_layer_sizes\n self._dnn_activation = 'relu'\n self._l2_reg_lambda = l2_reg_lambda\n self.ops = []\n self.loss = None\n self.eval_metrics = {}\n self.saver = None\n self.checkpoint_dir = os.path.join(running_dir, 'train/')\n self.eval_dir = os.path.join(running_dir, 'test/')\n <function token>\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n\n def _regressor(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_logits = []\n for idx in range(self._num_tasks):\n with tf.variable_scope('{0}_regressor'.format(self._task_names[\n idx])):\n logits = tf.layers.dense(input_encoded, 1,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=gts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_logits.append(tf.sigmoid(logits))\n self.eval_metrics['{0}/Pearsonr'.format(self._task_names[idx])\n ] = tf.contrib.metrics.streaming_pearson_correlation(logits\n , gts, weights=wts)\n pooled_logits = tf.stack(pooled_logits, axis=1)\n pooled_logits = tf.squeeze(pooled_logits, axis=-1)\n return pooled_logits, total_loss\n\n def _LoadInitEmbeddings(self):\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(\n W2VModelFILE, binary=False)\n vocab = []\n embd = []\n for token in w2v_model.vocab:\n vec = w2v_model[token]\n vocab.append(token)\n embd.append(vec)\n embedding = np.asarray(embd)\n return vocab, embedding\n\n def _LookupEmbeddings(self, embeddings, inputs):\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n <function token>\n <function token>\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n\n def _regressor(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_logits = []\n for idx in range(self._num_tasks):\n with tf.variable_scope('{0}_regressor'.format(self._task_names[\n idx])):\n logits = tf.layers.dense(input_encoded, 1,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n losses = tf.nn.sigmoid_cross_entropy_with_logits(logits=\n logits, labels=gts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_logits.append(tf.sigmoid(logits))\n self.eval_metrics['{0}/Pearsonr'.format(self._task_names[idx])\n ] = tf.contrib.metrics.streaming_pearson_correlation(logits\n , gts, weights=wts)\n pooled_logits = tf.stack(pooled_logits, axis=1)\n pooled_logits = tf.squeeze(pooled_logits, axis=-1)\n return pooled_logits, total_loss\n\n def _LoadInitEmbeddings(self):\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(\n W2VModelFILE, binary=False)\n vocab = []\n embd = []\n for token in w2v_model.vocab:\n vec = w2v_model[token]\n vocab.append(token)\n embd.append(vec)\n embedding = np.asarray(embd)\n return vocab, embedding\n\n def _LookupEmbeddings(self, embeddings, inputs):\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n <function token>\n <function token>\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n <function token>\n\n def _LoadInitEmbeddings(self):\n w2v_model = gensim.models.KeyedVectors.load_word2vec_format(\n W2VModelFILE, binary=False)\n vocab = []\n embd = []\n for token in w2v_model.vocab:\n vec = w2v_model[token]\n vocab.append(token)\n embd.append(vec)\n embedding = np.asarray(embd)\n return vocab, embedding\n\n def _LookupEmbeddings(self, embeddings, inputs):\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n <function token>\n <function token>\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n <function token>\n <function token>\n\n def _LookupEmbeddings(self, embeddings, inputs):\n mask = tf.to_float(tf.not_equal(inputs, 0))\n inputs = tf.nn.embedding_lookup(embeddings, inputs)\n lengths = tf.cast(tf.reduce_sum(mask, axis=1), tf.int64)\n return lengths, inputs\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n <function token>\n <function token>\n\n def _classifier(self, input_encoded, output, weights):\n total_loss = tf.constant(0.0)\n pooled_scores = []\n pooled_predictions = []\n for idx in range(self._num_tasks):\n gts = tf.expand_dims(output[:, idx], -1)\n wts = tf.expand_dims(weights[:, idx], -1)\n with tf.variable_scope('{0}_classifier'.format(self._task_names\n [idx])):\n labels = tf.concat([1 - gts, gts], 1)\n logits = tf.layers.dense(input_encoded, 2,\n kernel_regularizer=tf.contrib.layers.l2_regularizer(\n self._l2_reg_lambda))\n scores = tf.reduce_max(tf.nn.softmax(logits), 1)\n predictions = tf.argmax(logits, 1, name='predictions')\n pooled_predictions.append(predictions)\n pooled_scores.append(scores)\n losses = tf.nn.softmax_cross_entropy_with_logits(logits=\n logits, labels=labels)\n self.eval_metrics['{0}/Accuracy'.format(self._task_names[idx])\n ] = tf.metrics.accuracy(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Precision'.format(self._task_names[idx])\n ] = tf.metrics.precision(gts, predictions, weights=wts)\n self.eval_metrics['{0}/Recall'.format(self._task_names[idx])\n ] = tf.metrics.recall(gts, predictions, weights=wts)\n total_loss += tf.reduce_mean(losses * wts)\n pooled_predictions = tf.stack(pooled_predictions, axis=1)\n pooled_scores = tf.stack(pooled_scores, axis=1)\n return pooled_predictions, pooled_scores, total_loss\n <function token>\n <function token>\n <function token>\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _CNNLayers(self, embeddings):\n _, input_embeddings = self._LookupEmbeddings(embeddings, self.input_x)\n input_embeddings = tf.expand_dims(input_embeddings, -1)\n with tf.variable_scope('CNN'):\n pooled_outputs = []\n for i, filter_size in enumerate(self._cnn_filter_sizes):\n with tf.variable_scope('conv-maxpool-%s' % filter_size):\n filter_shape = [filter_size, self._embedding_size, 1,\n self._cnn_num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape,\n stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[self.\n _cnn_num_filters]), name='b')\n conv = tf.nn.conv2d(input_embeddings, W, strides=[1, 1,\n 1, 1], padding='VALID', name='conv')\n h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')\n pooled = tf.nn.max_pool(h, ksize=[1, self.\n _max_document_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1], padding='VALID', name='pool')\n pooled_outputs.append(pooled)\n num_filters_total = self._cnn_num_filters * len(self.\n _cnn_filter_sizes)\n cnn_encoding = tf.concat(pooled_outputs, 3)\n cnn_encoding = tf.reshape(cnn_encoding, [-1, num_filters_total])\n with tf.variable_scope('dropout'):\n cnn_encoding = tf.nn.dropout(cnn_encoding, 1 - self.dropout)\n cnn_encoding = tf.layers.dense(cnn_encoding, self._encoding_size)\n return cnn_encoding\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _DNNLayers(self, embeddings):\n lengths, input_embeddings = self._LookupEmbeddings(embeddings, self\n .input_x)\n lengths = tf.expand_dims(lengths, -1)\n input_embeddings = tf.divide(tf.reduce_sum(input_embeddings, 1), tf\n .to_float(lengths))\n with tf.variable_scope('DNN'):\n input_tensor = tf.nn.dropout(input_embeddings, 1)\n for i, out_size in enumerate(self._dnn_layer_sizes):\n with tf.variable_scope('Layer_{0}'.format(i + 1)):\n in_size = input_tensor.get_shape()[1]\n stddev = 1.0 / tf.sqrt(tf.to_float(tf.maximum(in_size,\n out_size)))\n W = tf.get_variable('W', (in_size, out_size),\n initializer=tf.truncated_normal_initializer(stddev=\n stddev))\n b = tf.get_variable('b', out_size, initializer=tf.\n constant_initializer(0.1))\n input_tensor = tf.nn.bias_add(tf.matmul(input_tensor, W), b\n )\n if self._dnn_activation == 'relu':\n input_tensor = tf.nn.relu(input_tensor, name='relu')\n else:\n raise ValueError(\n 'dnn_activation function not supported.')\n return input_tensor\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass NNModel:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<function token>\n<code token>\n"
] | false |
99,355 |
e3306e3e5dced1598f04572b791a653f481d91cc
|
import os.path
import os
import tornado.ioloop
import tornado.web
import tornado.websocket
import tornado.autoreload
from . import global_vars
from .inject import inject_live_server_script
class HtmlHandler(tornado.web.RequestHandler):
def initialize(self, path, default_filename=None):
self.root = path
self.default_filename = default_filename
def get(self, captured):
if captured is None:
captured = self.default_filename
try:
injected_html = inject_live_server_script(
os.path.join(self.root, captured))
self.write(injected_html)
except FileNotFoundError:
self.send_error(404)
class LiveServerHandler(tornado.websocket.WebSocketHandler):
active_clients = set()
def open(self):
LiveServerHandler.active_clients.add(self)
def on_close(self):
LiveServerHandler.active_clients.remove(self)
def broadcast_reload():
for client in LiveServerHandler.active_clients:
client.write_message('reload', binary=False)
def make_app():
STATIC_PATH = global_vars.PATH
LIVE_SERVER_JS_PATH = os.path.join(os.path.dirname(__file__))
config = {
'debug': True,
'serve_traceback': False
}
static_config = {'path': STATIC_PATH, 'default_filename': 'index.html'}
return tornado.web.Application([
(r'/(.*\.html)?', HtmlHandler, static_config),
(r'/ws/live-server', LiveServerHandler),
(r'/(liveServer.js)', tornado.web.StaticFileHandler,
{'path': LIVE_SERVER_JS_PATH}),
(r'/(.*)', tornado.web.StaticFileHandler, static_config)
], **config)
def start_app():
app = make_app()
server = app.listen(global_vars.PORT)
print('listening on {}'.format(global_vars.PORT))
return server
def stop_app(app):
app.stop()
|
[
"import os.path\nimport os\n\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nimport tornado.autoreload\n\nfrom . import global_vars\nfrom .inject import inject_live_server_script\n\n\nclass HtmlHandler(tornado.web.RequestHandler):\n def initialize(self, path, default_filename=None):\n self.root = path\n self.default_filename = default_filename\n\n def get(self, captured):\n if captured is None:\n captured = self.default_filename\n try:\n injected_html = inject_live_server_script(\n os.path.join(self.root, captured))\n self.write(injected_html)\n except FileNotFoundError:\n self.send_error(404)\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n active_clients = set()\n\n def open(self):\n LiveServerHandler.active_clients.add(self)\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\ndef broadcast_reload():\n for client in LiveServerHandler.active_clients:\n client.write_message('reload', binary=False)\n\n\ndef make_app():\n STATIC_PATH = global_vars.PATH\n\n LIVE_SERVER_JS_PATH = os.path.join(os.path.dirname(__file__))\n config = {\n 'debug': True,\n 'serve_traceback': False\n }\n static_config = {'path': STATIC_PATH, 'default_filename': 'index.html'}\n\n return tornado.web.Application([\n (r'/(.*\\.html)?', HtmlHandler, static_config),\n (r'/ws/live-server', LiveServerHandler),\n (r'/(liveServer.js)', tornado.web.StaticFileHandler,\n {'path': LIVE_SERVER_JS_PATH}),\n (r'/(.*)', tornado.web.StaticFileHandler, static_config)\n ], **config)\n\n\ndef start_app():\n app = make_app()\n server = app.listen(global_vars.PORT)\n print('listening on {}'.format(global_vars.PORT))\n return server\n\n\ndef stop_app(app):\n app.stop()\n",
"import os.path\nimport os\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nimport tornado.autoreload\nfrom . import global_vars\nfrom .inject import inject_live_server_script\n\n\nclass HtmlHandler(tornado.web.RequestHandler):\n\n def initialize(self, path, default_filename=None):\n self.root = path\n self.default_filename = default_filename\n\n def get(self, captured):\n if captured is None:\n captured = self.default_filename\n try:\n injected_html = inject_live_server_script(os.path.join(self.\n root, captured))\n self.write(injected_html)\n except FileNotFoundError:\n self.send_error(404)\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n active_clients = set()\n\n def open(self):\n LiveServerHandler.active_clients.add(self)\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\ndef broadcast_reload():\n for client in LiveServerHandler.active_clients:\n client.write_message('reload', binary=False)\n\n\ndef make_app():\n STATIC_PATH = global_vars.PATH\n LIVE_SERVER_JS_PATH = os.path.join(os.path.dirname(__file__))\n config = {'debug': True, 'serve_traceback': False}\n static_config = {'path': STATIC_PATH, 'default_filename': 'index.html'}\n return tornado.web.Application([('/(.*\\\\.html)?', HtmlHandler,\n static_config), ('/ws/live-server', LiveServerHandler), (\n '/(liveServer.js)', tornado.web.StaticFileHandler, {'path':\n LIVE_SERVER_JS_PATH}), ('/(.*)', tornado.web.StaticFileHandler,\n static_config)], **config)\n\n\ndef start_app():\n app = make_app()\n server = app.listen(global_vars.PORT)\n print('listening on {}'.format(global_vars.PORT))\n return server\n\n\ndef stop_app(app):\n app.stop()\n",
"<import token>\n\n\nclass HtmlHandler(tornado.web.RequestHandler):\n\n def initialize(self, path, default_filename=None):\n self.root = path\n self.default_filename = default_filename\n\n def get(self, captured):\n if captured is None:\n captured = self.default_filename\n try:\n injected_html = inject_live_server_script(os.path.join(self.\n root, captured))\n self.write(injected_html)\n except FileNotFoundError:\n self.send_error(404)\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n active_clients = set()\n\n def open(self):\n LiveServerHandler.active_clients.add(self)\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\ndef broadcast_reload():\n for client in LiveServerHandler.active_clients:\n client.write_message('reload', binary=False)\n\n\ndef make_app():\n STATIC_PATH = global_vars.PATH\n LIVE_SERVER_JS_PATH = os.path.join(os.path.dirname(__file__))\n config = {'debug': True, 'serve_traceback': False}\n static_config = {'path': STATIC_PATH, 'default_filename': 'index.html'}\n return tornado.web.Application([('/(.*\\\\.html)?', HtmlHandler,\n static_config), ('/ws/live-server', LiveServerHandler), (\n '/(liveServer.js)', tornado.web.StaticFileHandler, {'path':\n LIVE_SERVER_JS_PATH}), ('/(.*)', tornado.web.StaticFileHandler,\n static_config)], **config)\n\n\ndef start_app():\n app = make_app()\n server = app.listen(global_vars.PORT)\n print('listening on {}'.format(global_vars.PORT))\n return server\n\n\ndef stop_app(app):\n app.stop()\n",
"<import token>\n\n\nclass HtmlHandler(tornado.web.RequestHandler):\n\n def initialize(self, path, default_filename=None):\n self.root = path\n self.default_filename = default_filename\n\n def get(self, captured):\n if captured is None:\n captured = self.default_filename\n try:\n injected_html = inject_live_server_script(os.path.join(self.\n root, captured))\n self.write(injected_html)\n except FileNotFoundError:\n self.send_error(404)\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n active_clients = set()\n\n def open(self):\n LiveServerHandler.active_clients.add(self)\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\n<function token>\n\n\ndef make_app():\n STATIC_PATH = global_vars.PATH\n LIVE_SERVER_JS_PATH = os.path.join(os.path.dirname(__file__))\n config = {'debug': True, 'serve_traceback': False}\n static_config = {'path': STATIC_PATH, 'default_filename': 'index.html'}\n return tornado.web.Application([('/(.*\\\\.html)?', HtmlHandler,\n static_config), ('/ws/live-server', LiveServerHandler), (\n '/(liveServer.js)', tornado.web.StaticFileHandler, {'path':\n LIVE_SERVER_JS_PATH}), ('/(.*)', tornado.web.StaticFileHandler,\n static_config)], **config)\n\n\ndef start_app():\n app = make_app()\n server = app.listen(global_vars.PORT)\n print('listening on {}'.format(global_vars.PORT))\n return server\n\n\ndef stop_app(app):\n app.stop()\n",
"<import token>\n\n\nclass HtmlHandler(tornado.web.RequestHandler):\n\n def initialize(self, path, default_filename=None):\n self.root = path\n self.default_filename = default_filename\n\n def get(self, captured):\n if captured is None:\n captured = self.default_filename\n try:\n injected_html = inject_live_server_script(os.path.join(self.\n root, captured))\n self.write(injected_html)\n except FileNotFoundError:\n self.send_error(404)\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n active_clients = set()\n\n def open(self):\n LiveServerHandler.active_clients.add(self)\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\n<function token>\n<function token>\n\n\ndef start_app():\n app = make_app()\n server = app.listen(global_vars.PORT)\n print('listening on {}'.format(global_vars.PORT))\n return server\n\n\ndef stop_app(app):\n app.stop()\n",
"<import token>\n\n\nclass HtmlHandler(tornado.web.RequestHandler):\n\n def initialize(self, path, default_filename=None):\n self.root = path\n self.default_filename = default_filename\n\n def get(self, captured):\n if captured is None:\n captured = self.default_filename\n try:\n injected_html = inject_live_server_script(os.path.join(self.\n root, captured))\n self.write(injected_html)\n except FileNotFoundError:\n self.send_error(404)\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n active_clients = set()\n\n def open(self):\n LiveServerHandler.active_clients.add(self)\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef stop_app(app):\n app.stop()\n",
"<import token>\n\n\nclass HtmlHandler(tornado.web.RequestHandler):\n\n def initialize(self, path, default_filename=None):\n self.root = path\n self.default_filename = default_filename\n\n def get(self, captured):\n if captured is None:\n captured = self.default_filename\n try:\n injected_html = inject_live_server_script(os.path.join(self.\n root, captured))\n self.write(injected_html)\n except FileNotFoundError:\n self.send_error(404)\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n active_clients = set()\n\n def open(self):\n LiveServerHandler.active_clients.add(self)\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass HtmlHandler(tornado.web.RequestHandler):\n <function token>\n\n def get(self, captured):\n if captured is None:\n captured = self.default_filename\n try:\n injected_html = inject_live_server_script(os.path.join(self.\n root, captured))\n self.write(injected_html)\n except FileNotFoundError:\n self.send_error(404)\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n active_clients = set()\n\n def open(self):\n LiveServerHandler.active_clients.add(self)\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\nclass HtmlHandler(tornado.web.RequestHandler):\n <function token>\n <function token>\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n active_clients = set()\n\n def open(self):\n LiveServerHandler.active_clients.add(self)\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n active_clients = set()\n\n def open(self):\n LiveServerHandler.active_clients.add(self)\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n <assignment token>\n\n def open(self):\n LiveServerHandler.active_clients.add(self)\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n <assignment token>\n <function token>\n\n def on_close(self):\n LiveServerHandler.active_clients.remove(self)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n\n\nclass LiveServerHandler(tornado.websocket.WebSocketHandler):\n <assignment token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,356 |
3b2cd7bab2103bf95b058248b9584f089d3d33b2
|
from abc import ABCMeta, abstractmethod
class Selecao(metaclass=ABCMeta):
"""Classe base para implementação das técnicas de seleção de cromossomo."""
@abstractmethod
def selecionar_cromossomo(self, populacao):
"""Método base para ser sobreescrito nas subclasses."""
pass
|
[
"from abc import ABCMeta, abstractmethod\n\nclass Selecao(metaclass=ABCMeta):\n \"\"\"Classe base para implementação das técnicas de seleção de cromossomo.\"\"\"\n\n @abstractmethod\n def selecionar_cromossomo(self, populacao):\n \"\"\"Método base para ser sobreescrito nas subclasses.\"\"\"\n pass",
"from abc import ABCMeta, abstractmethod\n\n\nclass Selecao(metaclass=ABCMeta):\n \"\"\"Classe base para implementação das técnicas de seleção de cromossomo.\"\"\"\n\n @abstractmethod\n def selecionar_cromossomo(self, populacao):\n \"\"\"Método base para ser sobreescrito nas subclasses.\"\"\"\n pass\n",
"<import token>\n\n\nclass Selecao(metaclass=ABCMeta):\n \"\"\"Classe base para implementação das técnicas de seleção de cromossomo.\"\"\"\n\n @abstractmethod\n def selecionar_cromossomo(self, populacao):\n \"\"\"Método base para ser sobreescrito nas subclasses.\"\"\"\n pass\n",
"<import token>\n\n\nclass Selecao(metaclass=ABCMeta):\n <docstring token>\n\n @abstractmethod\n def selecionar_cromossomo(self, populacao):\n \"\"\"Método base para ser sobreescrito nas subclasses.\"\"\"\n pass\n",
"<import token>\n\n\nclass Selecao(metaclass=ABCMeta):\n <docstring token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,357 |
7adede326402d6b50839df2c63a6b234b4b90eb9
|
from turtle import Turtle
ALIGNMENT = "right"
FONT = ("Courier", 24, "normal")
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.level = 0
self.penup()
self.color("black")
self.goto(0, 255)
self.hideturtle()
self.update_scoreboard()
def update_scoreboard(self):
self.write(arg=f"level = {self.level}",
align=ALIGNMENT,
font=FONT)
def increase_level(self):
self.clear()
self.level += 1
self.update_scoreboard()
def game_over(self):
self.clear()
self.write(arg=f"GAME OVER",
align=ALIGNMENT,
font=FONT)
|
[
"from turtle import Turtle\n\nALIGNMENT = \"right\"\nFONT = (\"Courier\", 24, \"normal\")\n\n\nclass Scoreboard(Turtle):\n\n def __init__(self):\n super().__init__()\n self.level = 0\n self.penup()\n self.color(\"black\")\n self.goto(0, 255)\n self.hideturtle()\n self.update_scoreboard()\n\n def update_scoreboard(self):\n self.write(arg=f\"level = {self.level}\",\n align=ALIGNMENT,\n font=FONT)\n\n def increase_level(self):\n self.clear()\n self.level += 1\n self.update_scoreboard()\n\n def game_over(self):\n self.clear()\n self.write(arg=f\"GAME OVER\",\n align=ALIGNMENT,\n font=FONT)\n",
"from turtle import Turtle\nALIGNMENT = 'right'\nFONT = 'Courier', 24, 'normal'\n\n\nclass Scoreboard(Turtle):\n\n def __init__(self):\n super().__init__()\n self.level = 0\n self.penup()\n self.color('black')\n self.goto(0, 255)\n self.hideturtle()\n self.update_scoreboard()\n\n def update_scoreboard(self):\n self.write(arg=f'level = {self.level}', align=ALIGNMENT, font=FONT)\n\n def increase_level(self):\n self.clear()\n self.level += 1\n self.update_scoreboard()\n\n def game_over(self):\n self.clear()\n self.write(arg=f'GAME OVER', align=ALIGNMENT, font=FONT)\n",
"<import token>\nALIGNMENT = 'right'\nFONT = 'Courier', 24, 'normal'\n\n\nclass Scoreboard(Turtle):\n\n def __init__(self):\n super().__init__()\n self.level = 0\n self.penup()\n self.color('black')\n self.goto(0, 255)\n self.hideturtle()\n self.update_scoreboard()\n\n def update_scoreboard(self):\n self.write(arg=f'level = {self.level}', align=ALIGNMENT, font=FONT)\n\n def increase_level(self):\n self.clear()\n self.level += 1\n self.update_scoreboard()\n\n def game_over(self):\n self.clear()\n self.write(arg=f'GAME OVER', align=ALIGNMENT, font=FONT)\n",
"<import token>\n<assignment token>\n\n\nclass Scoreboard(Turtle):\n\n def __init__(self):\n super().__init__()\n self.level = 0\n self.penup()\n self.color('black')\n self.goto(0, 255)\n self.hideturtle()\n self.update_scoreboard()\n\n def update_scoreboard(self):\n self.write(arg=f'level = {self.level}', align=ALIGNMENT, font=FONT)\n\n def increase_level(self):\n self.clear()\n self.level += 1\n self.update_scoreboard()\n\n def game_over(self):\n self.clear()\n self.write(arg=f'GAME OVER', align=ALIGNMENT, font=FONT)\n",
"<import token>\n<assignment token>\n\n\nclass Scoreboard(Turtle):\n <function token>\n\n def update_scoreboard(self):\n self.write(arg=f'level = {self.level}', align=ALIGNMENT, font=FONT)\n\n def increase_level(self):\n self.clear()\n self.level += 1\n self.update_scoreboard()\n\n def game_over(self):\n self.clear()\n self.write(arg=f'GAME OVER', align=ALIGNMENT, font=FONT)\n",
"<import token>\n<assignment token>\n\n\nclass Scoreboard(Turtle):\n <function token>\n <function token>\n\n def increase_level(self):\n self.clear()\n self.level += 1\n self.update_scoreboard()\n\n def game_over(self):\n self.clear()\n self.write(arg=f'GAME OVER', align=ALIGNMENT, font=FONT)\n",
"<import token>\n<assignment token>\n\n\nclass Scoreboard(Turtle):\n <function token>\n <function token>\n <function token>\n\n def game_over(self):\n self.clear()\n self.write(arg=f'GAME OVER', align=ALIGNMENT, font=FONT)\n",
"<import token>\n<assignment token>\n\n\nclass Scoreboard(Turtle):\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
99,358 |
c06b1581e2b0240f5f59d9cf28e13f14cea2c53e
|
# coding=gbk
import json
import pandas as pd
import numpy as np
import akshare as ak
from pyecharts.charts import Map, Line, Grid, Timeline, Bar, Tab
from pyecharts import options as opts
from pyecharts.commons.utils import JsCode
from pyecharts.globals import ThemeType
date_span_1 = ['2020-02-0' + str(i) for i in range(8, 10)]
date_span_2 = ['2020-02-' + str(i) for i in range(10, 22)]
date_span = date_span_1 + date_span_2
time_list = [item[-5:] for item in date_span]
# print(prov_data)
maxNum = 5000
minNum = 0
# def get_map_data(date:str):
with open('epidata.json', 'r') as f:
prov_data = json.loads(f.read())
def get_hubei_data():
hubei_data = []
for d in prov_data:
for x in d['data']:
if x['name'] == '湖北省':
hubei_data.append(x["value"][:-1])
return hubei_data
def get_chongqin_data():
chong_data = []
for d in prov_data:
for x in d['data']:
if x['name'] == '重庆市':
chong_data.append(x["value"][:-1])
return chong_data
def get_total_data():
total_data = []
for d in prov_data:
confirm, cure, dead = 0, 0, 0
for x in d['data']:
confirm += x['value'][0]
cure += x['value'][1]
dead += x['value'][2]
total_data.append([confirm, cure, dead])
return total_data
# print(np.array(get_total_data())[:,0])
def get_line_charts():
hb_confirmed = [int(x) for x in np.array(get_hubei_data())[:, 0]]
cq_confirmed = [int(x) for x in np.array(get_chongqin_data())[:, 0]]
tot_confirmed = [int(x) for x in np.array(get_total_data())[:, 0]]
hb_cured = [int(x) for x in np.array(get_hubei_data())[:, 1]]
cq_cured = [int(x) for x in np.array(get_chongqin_data())[:, 1]]
tot_cured = [int(x) for x in np.array(get_total_data())[:, 1]]
hb_dead = [int(x) for x in np.array(get_hubei_data())[:, 2]]
cq_dead = [int(x) for x in np.array(get_chongqin_data())[:, 2]]
tot_dead = [int(x) for x in np.array(get_total_data())[:, 2]]
line_chart_1 = (
Line(init_opts=opts.InitOpts())
.add_xaxis(time_list)
.add_yaxis('湖北',hb_confirmed, color='#ff6361')
.add_yaxis('重庆', cq_confirmed, color='#ffa600')
.add_yaxis('全国', tot_confirmed, color='#bc5090')
.set_global_opts(
title_opts=opts.TitleOpts(title='2-8至2-22之间累计确诊病例变化趋势',
pos_left='20%', pos_top='5%'),
tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type='shadow'),
legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%', pos_top='5%'),
yaxis_opts=opts.AxisOpts(
name='人数',
type_='value',
axistick_opts=opts.AxisTickOpts(is_show=True),
splitline_opts=opts.SplitLineOpts(is_show=True),
)
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show=True))
)
line_chart_2 = (
Line()
.add_xaxis(time_list)
.add_yaxis('湖北', hb_cured, color='#ff6361')
.add_yaxis('重庆', cq_cured, color='#ffa600')
.add_yaxis('全国', tot_cured, color='#bc5090')
.set_global_opts(
title_opts=opts.TitleOpts(title='2-8至2-22之间累计治愈病例变化趋势',
pos_left='20%', pos_top='5%'),
tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type='shadow'),
legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%', pos_top='5%'),
yaxis_opts=opts.AxisOpts(
name='人数',
type_='value',
axistick_opts=opts.AxisTickOpts(is_show=True),
splitline_opts=opts.SplitLineOpts(is_show=True),
)
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show=True))
)
line_chart_3 = (
Line()
.add_xaxis(time_list)
.add_yaxis('湖北', hb_dead, color='#ff6361')
.add_yaxis('重庆', cq_dead, color='#ffa600')
.add_yaxis('全国', tot_dead, color='#bc5090')
.set_global_opts(
title_opts=opts.TitleOpts(title='2-8至2-22之间累计死亡病例变化趋势',
pos_left='20%', pos_top='5%'),
tooltip_opts=opts.TooltipOpts(trigger="axis", axis_pointer_type='shadow'),
legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%', pos_top='5%'),
yaxis_opts=opts.AxisOpts(
name='人数',
type_='value',
axistick_opts=opts.AxisTickOpts(is_show=True),
splitline_opts=opts.SplitLineOpts(is_show=True),
)
)
.set_series_opts(label_opts=opts.LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show=True))
)
tab = Tab(page_title='湖北、重庆、全国病例变化趋势')
tab.add(line_chart_1, '累计确诊人数')
tab.add(line_chart_2, '累计治愈人数')
tab.add(line_chart_3, '累计死亡人数')
return tab
if __name__ == '__main__':
g = get_line_charts()
g.render("hubei_vs_total.html")
|
[
"# coding=gbk\nimport json\n\nimport pandas as pd\nimport numpy as np\nimport akshare as ak\nfrom pyecharts.charts import Map, Line, Grid, Timeline, Bar, Tab\nfrom pyecharts import options as opts\nfrom pyecharts.commons.utils import JsCode\nfrom pyecharts.globals import ThemeType\n\ndate_span_1 = ['2020-02-0' + str(i) for i in range(8, 10)]\ndate_span_2 = ['2020-02-' + str(i) for i in range(10, 22)]\ndate_span = date_span_1 + date_span_2\n\ntime_list = [item[-5:] for item in date_span]\n\n# print(prov_data)\n\nmaxNum = 5000\nminNum = 0\n\n# def get_map_data(date:str):\nwith open('epidata.json', 'r') as f:\n prov_data = json.loads(f.read())\n\n\ndef get_hubei_data():\n hubei_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '湖北省':\n hubei_data.append(x[\"value\"][:-1])\n return hubei_data\n\n\ndef get_chongqin_data():\n chong_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '重庆市':\n chong_data.append(x[\"value\"][:-1])\n return chong_data\n\n\ndef get_total_data():\n total_data = []\n for d in prov_data:\n confirm, cure, dead = 0, 0, 0\n for x in d['data']:\n confirm += x['value'][0]\n cure += x['value'][1]\n dead += x['value'][2]\n total_data.append([confirm, cure, dead])\n return total_data\n\n\n# print(np.array(get_total_data())[:,0])\n\ndef get_line_charts():\n hb_confirmed = [int(x) for x in np.array(get_hubei_data())[:, 0]]\n cq_confirmed = [int(x) for x in np.array(get_chongqin_data())[:, 0]]\n tot_confirmed = [int(x) for x in np.array(get_total_data())[:, 0]]\n hb_cured = [int(x) for x in np.array(get_hubei_data())[:, 1]]\n cq_cured = [int(x) for x in np.array(get_chongqin_data())[:, 1]]\n tot_cured = [int(x) for x in np.array(get_total_data())[:, 1]]\n hb_dead = [int(x) for x in np.array(get_hubei_data())[:, 2]]\n cq_dead = [int(x) for x in np.array(get_chongqin_data())[:, 2]]\n tot_dead = [int(x) for x in np.array(get_total_data())[:, 2]]\n line_chart_1 = (\n Line(init_opts=opts.InitOpts())\n\n .add_xaxis(time_list)\n .add_yaxis('湖北',hb_confirmed, color='#ff6361')\n .add_yaxis('重庆', cq_confirmed, color='#ffa600')\n .add_yaxis('全国', tot_confirmed, color='#bc5090')\n\n .set_global_opts(\n title_opts=opts.TitleOpts(title='2-8至2-22之间累计确诊病例变化趋势',\n pos_left='20%', pos_top='5%'),\n tooltip_opts=opts.TooltipOpts(trigger=\"axis\", axis_pointer_type='shadow'),\n legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%', pos_top='5%'),\n yaxis_opts=opts.AxisOpts(\n name='人数',\n type_='value',\n axistick_opts=opts.AxisTickOpts(is_show=True),\n splitline_opts=opts.SplitLineOpts(is_show=True),\n )\n )\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show=True))\n\n )\n\n line_chart_2 = (\n Line()\n\n .add_xaxis(time_list)\n .add_yaxis('湖北', hb_cured, color='#ff6361')\n .add_yaxis('重庆', cq_cured, color='#ffa600')\n .add_yaxis('全国', tot_cured, color='#bc5090')\n\n .set_global_opts(\n title_opts=opts.TitleOpts(title='2-8至2-22之间累计治愈病例变化趋势',\n pos_left='20%', pos_top='5%'),\n tooltip_opts=opts.TooltipOpts(trigger=\"axis\", axis_pointer_type='shadow'),\n legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%', pos_top='5%'),\n yaxis_opts=opts.AxisOpts(\n name='人数',\n type_='value',\n axistick_opts=opts.AxisTickOpts(is_show=True),\n splitline_opts=opts.SplitLineOpts(is_show=True),\n )\n )\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show=True))\n\n )\n\n line_chart_3 = (\n Line()\n\n .add_xaxis(time_list)\n .add_yaxis('湖北', hb_dead, color='#ff6361')\n .add_yaxis('重庆', cq_dead, color='#ffa600')\n .add_yaxis('全国', tot_dead, color='#bc5090')\n\n .set_global_opts(\n title_opts=opts.TitleOpts(title='2-8至2-22之间累计死亡病例变化趋势',\n pos_left='20%', pos_top='5%'),\n tooltip_opts=opts.TooltipOpts(trigger=\"axis\", axis_pointer_type='shadow'),\n legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%', pos_top='5%'),\n yaxis_opts=opts.AxisOpts(\n name='人数',\n type_='value',\n axistick_opts=opts.AxisTickOpts(is_show=True),\n splitline_opts=opts.SplitLineOpts(is_show=True),\n )\n )\n .set_series_opts(label_opts=opts.LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show=True))\n\n )\n tab = Tab(page_title='湖北、重庆、全国病例变化趋势')\n tab.add(line_chart_1, '累计确诊人数')\n tab.add(line_chart_2, '累计治愈人数')\n tab.add(line_chart_3, '累计死亡人数')\n return tab\n\n\nif __name__ == '__main__':\n g = get_line_charts()\n g.render(\"hubei_vs_total.html\")\n",
"import json\nimport pandas as pd\nimport numpy as np\nimport akshare as ak\nfrom pyecharts.charts import Map, Line, Grid, Timeline, Bar, Tab\nfrom pyecharts import options as opts\nfrom pyecharts.commons.utils import JsCode\nfrom pyecharts.globals import ThemeType\ndate_span_1 = [('2020-02-0' + str(i)) for i in range(8, 10)]\ndate_span_2 = [('2020-02-' + str(i)) for i in range(10, 22)]\ndate_span = date_span_1 + date_span_2\ntime_list = [item[-5:] for item in date_span]\nmaxNum = 5000\nminNum = 0\nwith open('epidata.json', 'r') as f:\n prov_data = json.loads(f.read())\n\n\ndef get_hubei_data():\n hubei_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '湖北省':\n hubei_data.append(x['value'][:-1])\n return hubei_data\n\n\ndef get_chongqin_data():\n chong_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '重庆市':\n chong_data.append(x['value'][:-1])\n return chong_data\n\n\ndef get_total_data():\n total_data = []\n for d in prov_data:\n confirm, cure, dead = 0, 0, 0\n for x in d['data']:\n confirm += x['value'][0]\n cure += x['value'][1]\n dead += x['value'][2]\n total_data.append([confirm, cure, dead])\n return total_data\n\n\ndef get_line_charts():\n hb_confirmed = [int(x) for x in np.array(get_hubei_data())[:, 0]]\n cq_confirmed = [int(x) for x in np.array(get_chongqin_data())[:, 0]]\n tot_confirmed = [int(x) for x in np.array(get_total_data())[:, 0]]\n hb_cured = [int(x) for x in np.array(get_hubei_data())[:, 1]]\n cq_cured = [int(x) for x in np.array(get_chongqin_data())[:, 1]]\n tot_cured = [int(x) for x in np.array(get_total_data())[:, 1]]\n hb_dead = [int(x) for x in np.array(get_hubei_data())[:, 2]]\n cq_dead = [int(x) for x in np.array(get_chongqin_data())[:, 2]]\n tot_dead = [int(x) for x in np.array(get_total_data())[:, 2]]\n line_chart_1 = Line(init_opts=opts.InitOpts()).add_xaxis(time_list\n ).add_yaxis('湖北', hb_confirmed, color='#ff6361').add_yaxis('重庆',\n cq_confirmed, color='#ffa600').add_yaxis('全国', tot_confirmed, color\n ='#bc5090').set_global_opts(title_opts=opts.TitleOpts(title=\n '2-8至2-22之间累计确诊病例变化趋势', pos_left='20%', pos_top='5%'), tooltip_opts\n =opts.TooltipOpts(trigger='axis', axis_pointer_type='shadow'),\n legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%',\n pos_top='5%'), yaxis_opts=opts.AxisOpts(name='人数', type_='value',\n axistick_opts=opts.AxisTickOpts(is_show=True), splitline_opts=opts.\n SplitLineOpts(is_show=True))).set_series_opts(label_opts=opts.\n LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show\n =True))\n line_chart_2 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_cured,\n color='#ff6361').add_yaxis('重庆', cq_cured, color='#ffa600').add_yaxis(\n '全国', tot_cured, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计治愈病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n line_chart_3 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_dead,\n color='#ff6361').add_yaxis('重庆', cq_dead, color='#ffa600').add_yaxis(\n '全国', tot_dead, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计死亡病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n tab = Tab(page_title='湖北、重庆、全国病例变化趋势')\n tab.add(line_chart_1, '累计确诊人数')\n tab.add(line_chart_2, '累计治愈人数')\n tab.add(line_chart_3, '累计死亡人数')\n return tab\n\n\nif __name__ == '__main__':\n g = get_line_charts()\n g.render('hubei_vs_total.html')\n",
"<import token>\ndate_span_1 = [('2020-02-0' + str(i)) for i in range(8, 10)]\ndate_span_2 = [('2020-02-' + str(i)) for i in range(10, 22)]\ndate_span = date_span_1 + date_span_2\ntime_list = [item[-5:] for item in date_span]\nmaxNum = 5000\nminNum = 0\nwith open('epidata.json', 'r') as f:\n prov_data = json.loads(f.read())\n\n\ndef get_hubei_data():\n hubei_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '湖北省':\n hubei_data.append(x['value'][:-1])\n return hubei_data\n\n\ndef get_chongqin_data():\n chong_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '重庆市':\n chong_data.append(x['value'][:-1])\n return chong_data\n\n\ndef get_total_data():\n total_data = []\n for d in prov_data:\n confirm, cure, dead = 0, 0, 0\n for x in d['data']:\n confirm += x['value'][0]\n cure += x['value'][1]\n dead += x['value'][2]\n total_data.append([confirm, cure, dead])\n return total_data\n\n\ndef get_line_charts():\n hb_confirmed = [int(x) for x in np.array(get_hubei_data())[:, 0]]\n cq_confirmed = [int(x) for x in np.array(get_chongqin_data())[:, 0]]\n tot_confirmed = [int(x) for x in np.array(get_total_data())[:, 0]]\n hb_cured = [int(x) for x in np.array(get_hubei_data())[:, 1]]\n cq_cured = [int(x) for x in np.array(get_chongqin_data())[:, 1]]\n tot_cured = [int(x) for x in np.array(get_total_data())[:, 1]]\n hb_dead = [int(x) for x in np.array(get_hubei_data())[:, 2]]\n cq_dead = [int(x) for x in np.array(get_chongqin_data())[:, 2]]\n tot_dead = [int(x) for x in np.array(get_total_data())[:, 2]]\n line_chart_1 = Line(init_opts=opts.InitOpts()).add_xaxis(time_list\n ).add_yaxis('湖北', hb_confirmed, color='#ff6361').add_yaxis('重庆',\n cq_confirmed, color='#ffa600').add_yaxis('全国', tot_confirmed, color\n ='#bc5090').set_global_opts(title_opts=opts.TitleOpts(title=\n '2-8至2-22之间累计确诊病例变化趋势', pos_left='20%', pos_top='5%'), tooltip_opts\n =opts.TooltipOpts(trigger='axis', axis_pointer_type='shadow'),\n legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%',\n pos_top='5%'), yaxis_opts=opts.AxisOpts(name='人数', type_='value',\n axistick_opts=opts.AxisTickOpts(is_show=True), splitline_opts=opts.\n SplitLineOpts(is_show=True))).set_series_opts(label_opts=opts.\n LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show\n =True))\n line_chart_2 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_cured,\n color='#ff6361').add_yaxis('重庆', cq_cured, color='#ffa600').add_yaxis(\n '全国', tot_cured, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计治愈病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n line_chart_3 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_dead,\n color='#ff6361').add_yaxis('重庆', cq_dead, color='#ffa600').add_yaxis(\n '全国', tot_dead, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计死亡病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n tab = Tab(page_title='湖北、重庆、全国病例变化趋势')\n tab.add(line_chart_1, '累计确诊人数')\n tab.add(line_chart_2, '累计治愈人数')\n tab.add(line_chart_3, '累计死亡人数')\n return tab\n\n\nif __name__ == '__main__':\n g = get_line_charts()\n g.render('hubei_vs_total.html')\n",
"<import token>\n<assignment token>\nwith open('epidata.json', 'r') as f:\n prov_data = json.loads(f.read())\n\n\ndef get_hubei_data():\n hubei_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '湖北省':\n hubei_data.append(x['value'][:-1])\n return hubei_data\n\n\ndef get_chongqin_data():\n chong_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '重庆市':\n chong_data.append(x['value'][:-1])\n return chong_data\n\n\ndef get_total_data():\n total_data = []\n for d in prov_data:\n confirm, cure, dead = 0, 0, 0\n for x in d['data']:\n confirm += x['value'][0]\n cure += x['value'][1]\n dead += x['value'][2]\n total_data.append([confirm, cure, dead])\n return total_data\n\n\ndef get_line_charts():\n hb_confirmed = [int(x) for x in np.array(get_hubei_data())[:, 0]]\n cq_confirmed = [int(x) for x in np.array(get_chongqin_data())[:, 0]]\n tot_confirmed = [int(x) for x in np.array(get_total_data())[:, 0]]\n hb_cured = [int(x) for x in np.array(get_hubei_data())[:, 1]]\n cq_cured = [int(x) for x in np.array(get_chongqin_data())[:, 1]]\n tot_cured = [int(x) for x in np.array(get_total_data())[:, 1]]\n hb_dead = [int(x) for x in np.array(get_hubei_data())[:, 2]]\n cq_dead = [int(x) for x in np.array(get_chongqin_data())[:, 2]]\n tot_dead = [int(x) for x in np.array(get_total_data())[:, 2]]\n line_chart_1 = Line(init_opts=opts.InitOpts()).add_xaxis(time_list\n ).add_yaxis('湖北', hb_confirmed, color='#ff6361').add_yaxis('重庆',\n cq_confirmed, color='#ffa600').add_yaxis('全国', tot_confirmed, color\n ='#bc5090').set_global_opts(title_opts=opts.TitleOpts(title=\n '2-8至2-22之间累计确诊病例变化趋势', pos_left='20%', pos_top='5%'), tooltip_opts\n =opts.TooltipOpts(trigger='axis', axis_pointer_type='shadow'),\n legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%',\n pos_top='5%'), yaxis_opts=opts.AxisOpts(name='人数', type_='value',\n axistick_opts=opts.AxisTickOpts(is_show=True), splitline_opts=opts.\n SplitLineOpts(is_show=True))).set_series_opts(label_opts=opts.\n LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show\n =True))\n line_chart_2 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_cured,\n color='#ff6361').add_yaxis('重庆', cq_cured, color='#ffa600').add_yaxis(\n '全国', tot_cured, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计治愈病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n line_chart_3 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_dead,\n color='#ff6361').add_yaxis('重庆', cq_dead, color='#ffa600').add_yaxis(\n '全国', tot_dead, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计死亡病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n tab = Tab(page_title='湖北、重庆、全国病例变化趋势')\n tab.add(line_chart_1, '累计确诊人数')\n tab.add(line_chart_2, '累计治愈人数')\n tab.add(line_chart_3, '累计死亡人数')\n return tab\n\n\nif __name__ == '__main__':\n g = get_line_charts()\n g.render('hubei_vs_total.html')\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef get_hubei_data():\n hubei_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '湖北省':\n hubei_data.append(x['value'][:-1])\n return hubei_data\n\n\ndef get_chongqin_data():\n chong_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '重庆市':\n chong_data.append(x['value'][:-1])\n return chong_data\n\n\ndef get_total_data():\n total_data = []\n for d in prov_data:\n confirm, cure, dead = 0, 0, 0\n for x in d['data']:\n confirm += x['value'][0]\n cure += x['value'][1]\n dead += x['value'][2]\n total_data.append([confirm, cure, dead])\n return total_data\n\n\ndef get_line_charts():\n hb_confirmed = [int(x) for x in np.array(get_hubei_data())[:, 0]]\n cq_confirmed = [int(x) for x in np.array(get_chongqin_data())[:, 0]]\n tot_confirmed = [int(x) for x in np.array(get_total_data())[:, 0]]\n hb_cured = [int(x) for x in np.array(get_hubei_data())[:, 1]]\n cq_cured = [int(x) for x in np.array(get_chongqin_data())[:, 1]]\n tot_cured = [int(x) for x in np.array(get_total_data())[:, 1]]\n hb_dead = [int(x) for x in np.array(get_hubei_data())[:, 2]]\n cq_dead = [int(x) for x in np.array(get_chongqin_data())[:, 2]]\n tot_dead = [int(x) for x in np.array(get_total_data())[:, 2]]\n line_chart_1 = Line(init_opts=opts.InitOpts()).add_xaxis(time_list\n ).add_yaxis('湖北', hb_confirmed, color='#ff6361').add_yaxis('重庆',\n cq_confirmed, color='#ffa600').add_yaxis('全国', tot_confirmed, color\n ='#bc5090').set_global_opts(title_opts=opts.TitleOpts(title=\n '2-8至2-22之间累计确诊病例变化趋势', pos_left='20%', pos_top='5%'), tooltip_opts\n =opts.TooltipOpts(trigger='axis', axis_pointer_type='shadow'),\n legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%',\n pos_top='5%'), yaxis_opts=opts.AxisOpts(name='人数', type_='value',\n axistick_opts=opts.AxisTickOpts(is_show=True), splitline_opts=opts.\n SplitLineOpts(is_show=True))).set_series_opts(label_opts=opts.\n LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show\n =True))\n line_chart_2 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_cured,\n color='#ff6361').add_yaxis('重庆', cq_cured, color='#ffa600').add_yaxis(\n '全国', tot_cured, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计治愈病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n line_chart_3 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_dead,\n color='#ff6361').add_yaxis('重庆', cq_dead, color='#ffa600').add_yaxis(\n '全国', tot_dead, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计死亡病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n tab = Tab(page_title='湖北、重庆、全国病例变化趋势')\n tab.add(line_chart_1, '累计确诊人数')\n tab.add(line_chart_2, '累计治愈人数')\n tab.add(line_chart_3, '累计死亡人数')\n return tab\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef get_hubei_data():\n hubei_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '湖北省':\n hubei_data.append(x['value'][:-1])\n return hubei_data\n\n\n<function token>\n\n\ndef get_total_data():\n total_data = []\n for d in prov_data:\n confirm, cure, dead = 0, 0, 0\n for x in d['data']:\n confirm += x['value'][0]\n cure += x['value'][1]\n dead += x['value'][2]\n total_data.append([confirm, cure, dead])\n return total_data\n\n\ndef get_line_charts():\n hb_confirmed = [int(x) for x in np.array(get_hubei_data())[:, 0]]\n cq_confirmed = [int(x) for x in np.array(get_chongqin_data())[:, 0]]\n tot_confirmed = [int(x) for x in np.array(get_total_data())[:, 0]]\n hb_cured = [int(x) for x in np.array(get_hubei_data())[:, 1]]\n cq_cured = [int(x) for x in np.array(get_chongqin_data())[:, 1]]\n tot_cured = [int(x) for x in np.array(get_total_data())[:, 1]]\n hb_dead = [int(x) for x in np.array(get_hubei_data())[:, 2]]\n cq_dead = [int(x) for x in np.array(get_chongqin_data())[:, 2]]\n tot_dead = [int(x) for x in np.array(get_total_data())[:, 2]]\n line_chart_1 = Line(init_opts=opts.InitOpts()).add_xaxis(time_list\n ).add_yaxis('湖北', hb_confirmed, color='#ff6361').add_yaxis('重庆',\n cq_confirmed, color='#ffa600').add_yaxis('全国', tot_confirmed, color\n ='#bc5090').set_global_opts(title_opts=opts.TitleOpts(title=\n '2-8至2-22之间累计确诊病例变化趋势', pos_left='20%', pos_top='5%'), tooltip_opts\n =opts.TooltipOpts(trigger='axis', axis_pointer_type='shadow'),\n legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%',\n pos_top='5%'), yaxis_opts=opts.AxisOpts(name='人数', type_='value',\n axistick_opts=opts.AxisTickOpts(is_show=True), splitline_opts=opts.\n SplitLineOpts(is_show=True))).set_series_opts(label_opts=opts.\n LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show\n =True))\n line_chart_2 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_cured,\n color='#ff6361').add_yaxis('重庆', cq_cured, color='#ffa600').add_yaxis(\n '全国', tot_cured, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计治愈病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n line_chart_3 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_dead,\n color='#ff6361').add_yaxis('重庆', cq_dead, color='#ffa600').add_yaxis(\n '全国', tot_dead, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计死亡病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n tab = Tab(page_title='湖北、重庆、全国病例变化趋势')\n tab.add(line_chart_1, '累计确诊人数')\n tab.add(line_chart_2, '累计治愈人数')\n tab.add(line_chart_3, '累计死亡人数')\n return tab\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef get_hubei_data():\n hubei_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '湖北省':\n hubei_data.append(x['value'][:-1])\n return hubei_data\n\n\n<function token>\n<function token>\n\n\ndef get_line_charts():\n hb_confirmed = [int(x) for x in np.array(get_hubei_data())[:, 0]]\n cq_confirmed = [int(x) for x in np.array(get_chongqin_data())[:, 0]]\n tot_confirmed = [int(x) for x in np.array(get_total_data())[:, 0]]\n hb_cured = [int(x) for x in np.array(get_hubei_data())[:, 1]]\n cq_cured = [int(x) for x in np.array(get_chongqin_data())[:, 1]]\n tot_cured = [int(x) for x in np.array(get_total_data())[:, 1]]\n hb_dead = [int(x) for x in np.array(get_hubei_data())[:, 2]]\n cq_dead = [int(x) for x in np.array(get_chongqin_data())[:, 2]]\n tot_dead = [int(x) for x in np.array(get_total_data())[:, 2]]\n line_chart_1 = Line(init_opts=opts.InitOpts()).add_xaxis(time_list\n ).add_yaxis('湖北', hb_confirmed, color='#ff6361').add_yaxis('重庆',\n cq_confirmed, color='#ffa600').add_yaxis('全国', tot_confirmed, color\n ='#bc5090').set_global_opts(title_opts=opts.TitleOpts(title=\n '2-8至2-22之间累计确诊病例变化趋势', pos_left='20%', pos_top='5%'), tooltip_opts\n =opts.TooltipOpts(trigger='axis', axis_pointer_type='shadow'),\n legend_opts=opts.LegendOpts(orient='horizontal', pos_left='60%',\n pos_top='5%'), yaxis_opts=opts.AxisOpts(name='人数', type_='value',\n axistick_opts=opts.AxisTickOpts(is_show=True), splitline_opts=opts.\n SplitLineOpts(is_show=True))).set_series_opts(label_opts=opts.\n LabelOpts(is_show=False), splitline_opts=opts.SplitLineOpts(is_show\n =True))\n line_chart_2 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_cured,\n color='#ff6361').add_yaxis('重庆', cq_cured, color='#ffa600').add_yaxis(\n '全国', tot_cured, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计治愈病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n line_chart_3 = Line().add_xaxis(time_list).add_yaxis('湖北', hb_dead,\n color='#ff6361').add_yaxis('重庆', cq_dead, color='#ffa600').add_yaxis(\n '全国', tot_dead, color='#bc5090').set_global_opts(title_opts=opts.\n TitleOpts(title='2-8至2-22之间累计死亡病例变化趋势', pos_left='20%', pos_top=\n '5%'), tooltip_opts=opts.TooltipOpts(trigger='axis',\n axis_pointer_type='shadow'), legend_opts=opts.LegendOpts(orient=\n 'horizontal', pos_left='60%', pos_top='5%'), yaxis_opts=opts.\n AxisOpts(name='人数', type_='value', axistick_opts=opts.AxisTickOpts(\n is_show=True), splitline_opts=opts.SplitLineOpts(is_show=True))\n ).set_series_opts(label_opts=opts.LabelOpts(is_show=False),\n splitline_opts=opts.SplitLineOpts(is_show=True))\n tab = Tab(page_title='湖北、重庆、全国病例变化趋势')\n tab.add(line_chart_1, '累计确诊人数')\n tab.add(line_chart_2, '累计治愈人数')\n tab.add(line_chart_3, '累计死亡人数')\n return tab\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef get_hubei_data():\n hubei_data = []\n for d in prov_data:\n for x in d['data']:\n if x['name'] == '湖北省':\n hubei_data.append(x['value'][:-1])\n return hubei_data\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,359 |
5c3507b69215177a6138f03ed5845be53548bc6d
|
import numpy as np
from tensorflow.keras import layers, models
import identify_code.ideantify as idt
model = models.Sequential([
layers.Conv2D(64, (3, 3), activation='relu', input_shape=(50, 200, 1)), # 卷积层1,卷积核3*3
layers.ReLU(),
layers.MaxPooling2D((2, 2)), # 池化层1,2*2采样
layers.Conv2D(128, (3, 3), activation='relu'), # 卷积层2,卷积核3*3
layers.ReLU(),
layers.MaxPooling2D((2, 2)), # 池化层2,2*2采样
layers.Conv2D(256, (3, 3), activation='relu'), # 卷积层2,卷积核3*3
layers.ReLU(),
layers.MaxPooling2D((2, 2)), # 池化层2,2*2采样
layers.Flatten(), # Flatten层,连接卷积层与全连接层
layers.Dense(1024, activation='relu'), # 全连接层,特征进一步提取
layers.Dense(idt.label_name_len * idt.char_set_len),
layers.Reshape([idt.label_name_len, idt.char_set_len]),
layers.Softmax() # 输出层,输出预期结果
])
# 打印网络结构
print(model.summary())
# model.compile()方法用于在配置训练方法时,告知训练时用的优化器、损失函数和准确率评测标准
model.compile(optimizer="adam",
loss='categorical_crossentropy',
metrics=['accuracy'])
epochs = 20
history = model.fit(
idt.train_ds,
validation_data=idt.val_ds,
epochs=epochs
)
# 模型评估
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
idt.plt.figure(figsize=(12, 4))
idt.plt.subplot(1, 2, 1)
idt.plt.plot(epochs_range, acc, label='训练时的准确率')
idt.plt.plot(epochs_range, val_acc, label='测试的准确率')
idt.plt.legend(loc='lower right')
idt.plt.title('训练和测试的准确率的折线图')
idt.plt.subplot(1, 2, 2)
idt.plt.plot(epochs_range, loss, label='训练时的误差率')
idt.plt.plot(epochs_range, val_loss, label='测试的误差率')
idt.plt.legend(loc='upper right')
idt.plt.title('训练和测试的误差率的折线图')
idt.plt.show()
# 保存模型
model.save('models/final_model.h5')
|
[
"import numpy as np\nfrom tensorflow.keras import layers, models\nimport identify_code.ideantify as idt\n\nmodel = models.Sequential([\n\n layers.Conv2D(64, (3, 3), activation='relu', input_shape=(50, 200, 1)), # 卷积层1,卷积核3*3\n layers.ReLU(),\n layers.MaxPooling2D((2, 2)), # 池化层1,2*2采样\n\n layers.Conv2D(128, (3, 3), activation='relu'), # 卷积层2,卷积核3*3\n layers.ReLU(),\n layers.MaxPooling2D((2, 2)), # 池化层2,2*2采样\n\n layers.Conv2D(256, (3, 3), activation='relu'), # 卷积层2,卷积核3*3\n layers.ReLU(),\n layers.MaxPooling2D((2, 2)), # 池化层2,2*2采样\n\n layers.Flatten(), # Flatten层,连接卷积层与全连接层\n layers.Dense(1024, activation='relu'), # 全连接层,特征进一步提取\n\n layers.Dense(idt.label_name_len * idt.char_set_len),\n layers.Reshape([idt.label_name_len, idt.char_set_len]),\n layers.Softmax() # 输出层,输出预期结果\n])\n# 打印网络结构\nprint(model.summary())\n\n# model.compile()方法用于在配置训练方法时,告知训练时用的优化器、损失函数和准确率评测标准\nmodel.compile(optimizer=\"adam\",\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\nepochs = 20\nhistory = model.fit(\n idt.train_ds,\n validation_data=idt.val_ds,\n epochs=epochs\n)\n\n# 模型评估\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\n\nloss = history.history['loss']\nval_loss = history.history['val_loss']\n\n\nepochs_range = range(epochs)\n\nidt.plt.figure(figsize=(12, 4))\nidt.plt.subplot(1, 2, 1)\n\nidt.plt.plot(epochs_range, acc, label='训练时的准确率')\nidt.plt.plot(epochs_range, val_acc, label='测试的准确率')\nidt.plt.legend(loc='lower right')\nidt.plt.title('训练和测试的准确率的折线图')\n\nidt.plt.subplot(1, 2, 2)\nidt.plt.plot(epochs_range, loss, label='训练时的误差率')\nidt.plt.plot(epochs_range, val_loss, label='测试的误差率')\nidt.plt.legend(loc='upper right')\nidt.plt.title('训练和测试的误差率的折线图')\nidt.plt.show()\n\n# 保存模型\nmodel.save('models/final_model.h5')\n",
"import numpy as np\nfrom tensorflow.keras import layers, models\nimport identify_code.ideantify as idt\nmodel = models.Sequential([layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(50, 200, 1)), layers.ReLU(), layers.MaxPooling2D((2, 2)),\n layers.Conv2D(128, (3, 3), activation='relu'), layers.ReLU(), layers.\n MaxPooling2D((2, 2)), layers.Conv2D(256, (3, 3), activation='relu'),\n layers.ReLU(), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.\n Dense(1024, activation='relu'), layers.Dense(idt.label_name_len * idt.\n char_set_len), layers.Reshape([idt.label_name_len, idt.char_set_len]),\n layers.Softmax()])\nprint(model.summary())\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\nepochs = 20\nhistory = model.fit(idt.train_ds, validation_data=idt.val_ds, epochs=epochs)\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs_range = range(epochs)\nidt.plt.figure(figsize=(12, 4))\nidt.plt.subplot(1, 2, 1)\nidt.plt.plot(epochs_range, acc, label='训练时的准确率')\nidt.plt.plot(epochs_range, val_acc, label='测试的准确率')\nidt.plt.legend(loc='lower right')\nidt.plt.title('训练和测试的准确率的折线图')\nidt.plt.subplot(1, 2, 2)\nidt.plt.plot(epochs_range, loss, label='训练时的误差率')\nidt.plt.plot(epochs_range, val_loss, label='测试的误差率')\nidt.plt.legend(loc='upper right')\nidt.plt.title('训练和测试的误差率的折线图')\nidt.plt.show()\nmodel.save('models/final_model.h5')\n",
"<import token>\nmodel = models.Sequential([layers.Conv2D(64, (3, 3), activation='relu',\n input_shape=(50, 200, 1)), layers.ReLU(), layers.MaxPooling2D((2, 2)),\n layers.Conv2D(128, (3, 3), activation='relu'), layers.ReLU(), layers.\n MaxPooling2D((2, 2)), layers.Conv2D(256, (3, 3), activation='relu'),\n layers.ReLU(), layers.MaxPooling2D((2, 2)), layers.Flatten(), layers.\n Dense(1024, activation='relu'), layers.Dense(idt.label_name_len * idt.\n char_set_len), layers.Reshape([idt.label_name_len, idt.char_set_len]),\n layers.Softmax()])\nprint(model.summary())\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\nepochs = 20\nhistory = model.fit(idt.train_ds, validation_data=idt.val_ds, epochs=epochs)\nacc = history.history['accuracy']\nval_acc = history.history['val_accuracy']\nloss = history.history['loss']\nval_loss = history.history['val_loss']\nepochs_range = range(epochs)\nidt.plt.figure(figsize=(12, 4))\nidt.plt.subplot(1, 2, 1)\nidt.plt.plot(epochs_range, acc, label='训练时的准确率')\nidt.plt.plot(epochs_range, val_acc, label='测试的准确率')\nidt.plt.legend(loc='lower right')\nidt.plt.title('训练和测试的准确率的折线图')\nidt.plt.subplot(1, 2, 2)\nidt.plt.plot(epochs_range, loss, label='训练时的误差率')\nidt.plt.plot(epochs_range, val_loss, label='测试的误差率')\nidt.plt.legend(loc='upper right')\nidt.plt.title('训练和测试的误差率的折线图')\nidt.plt.show()\nmodel.save('models/final_model.h5')\n",
"<import token>\n<assignment token>\nprint(model.summary())\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=[\n 'accuracy'])\n<assignment token>\nidt.plt.figure(figsize=(12, 4))\nidt.plt.subplot(1, 2, 1)\nidt.plt.plot(epochs_range, acc, label='训练时的准确率')\nidt.plt.plot(epochs_range, val_acc, label='测试的准确率')\nidt.plt.legend(loc='lower right')\nidt.plt.title('训练和测试的准确率的折线图')\nidt.plt.subplot(1, 2, 2)\nidt.plt.plot(epochs_range, loss, label='训练时的误差率')\nidt.plt.plot(epochs_range, val_loss, label='测试的误差率')\nidt.plt.legend(loc='upper right')\nidt.plt.title('训练和测试的误差率的折线图')\nidt.plt.show()\nmodel.save('models/final_model.h5')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,360 |
25fdd55ed0d6c88f9fa6e6eaf45f3641264bdfff
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 22 11:40:56 2017
@author: Marcin
"""
import folium
import pandas as pd
from geopy.geocoders import Nominatim
danes = pd.read_excel("adresy.xlsx","Polska")
lats = list(danes["Lat"])
lons = list(danes["Lon"])
danep = pd.read_excel("adresy.xlsx","Pioneers")
latp = list(danep["Lat"])
lonp = list(danep["Lon"])
danee = pd.read_excel("adresy.xlsx","Explorers")
late = list(danee["Lat"])
lone = list(danee["Lon"])
danefp = pd.read_excel("adresy.xlsx","FinalistP")
latfp = list(danefp["Lat"])
lonfp = list(danefp["Lon"])
danefe = pd.read_excel("adresy.xlsx","FinalistE")
latfe = list(danefe["Lat"])
lonfe = list(danefe["Lon"])
map = folium.Map(location = [52.00, 21.00], zoom_start=5, tiles="Mapbox Bright")
fgs = folium.FeatureGroup(name = "Skywalkers")
for lt, ln in zip(lats, lons):
fgs.add_child(folium.CircleMarker(location=[float(lt),float(ln)], radius = 2,fill_color = 'Green', color = 'green'))
fgp = folium.FeatureGroup(name = "Pioneers")
for lt, ln in zip(latp, lonp):
fgp.add_child(folium.CircleMarker(location=[float(lt),float(ln)], radius = 2,fill_color = 'Red', color = 'Red'))
fge = folium.FeatureGroup(name = "Explorers")
for lt, ln in zip(late, lone):
fge.add_child(folium.CircleMarker(location=[float(lt),float(ln)], radius = 2,fill_color = 'Blue', color = 'Blue'))
fgfp = folium.FeatureGroup(name = "Pioneers")
for lt, ln in zip(latfp, lonfp):
fgfp.add_child(folium.CircleMarker(location=[float(lt),float(ln)], radius = 4,fill_color = 'Red', color = 'Red'))
fgfe = folium.FeatureGroup(name = "Explorers")
for lt, ln in zip(latfe, lonfe):
fgfe.add_child(folium.CircleMarker(location=[float(lt),float(ln)], radius = 4,fill_color = 'Blue', color = 'Blue'))
map.add_child(fgfe)
map.add_child(fgfp)
map.add_child(fgs)
map.add_child(fgp)
map.add_child(fge)
map.add_child(folium.LayerControl())
map.save("Map2.html")
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Nov 22 11:40:56 2017\n\n@author: Marcin\n\"\"\"\n\nimport folium\nimport pandas as pd\nfrom geopy.geocoders import Nominatim\n\ndanes = pd.read_excel(\"adresy.xlsx\",\"Polska\")\nlats = list(danes[\"Lat\"])\nlons = list(danes[\"Lon\"])\n\ndanep = pd.read_excel(\"adresy.xlsx\",\"Pioneers\")\nlatp = list(danep[\"Lat\"])\nlonp = list(danep[\"Lon\"])\n\ndanee = pd.read_excel(\"adresy.xlsx\",\"Explorers\")\nlate = list(danee[\"Lat\"])\nlone = list(danee[\"Lon\"])\n\ndanefp = pd.read_excel(\"adresy.xlsx\",\"FinalistP\")\nlatfp = list(danefp[\"Lat\"])\nlonfp = list(danefp[\"Lon\"])\n\ndanefe = pd.read_excel(\"adresy.xlsx\",\"FinalistE\")\nlatfe = list(danefe[\"Lat\"])\nlonfe = list(danefe[\"Lon\"])\n\nmap = folium.Map(location = [52.00, 21.00], zoom_start=5, tiles=\"Mapbox Bright\")\n\nfgs = folium.FeatureGroup(name = \"Skywalkers\")\nfor lt, ln in zip(lats, lons):\n fgs.add_child(folium.CircleMarker(location=[float(lt),float(ln)], radius = 2,fill_color = 'Green', color = 'green'))\n \n \nfgp = folium.FeatureGroup(name = \"Pioneers\")\nfor lt, ln in zip(latp, lonp):\n fgp.add_child(folium.CircleMarker(location=[float(lt),float(ln)], radius = 2,fill_color = 'Red', color = 'Red'))\n \nfge = folium.FeatureGroup(name = \"Explorers\")\nfor lt, ln in zip(late, lone):\n fge.add_child(folium.CircleMarker(location=[float(lt),float(ln)], radius = 2,fill_color = 'Blue', color = 'Blue'))\n\nfgfp = folium.FeatureGroup(name = \"Pioneers\")\nfor lt, ln in zip(latfp, lonfp):\n fgfp.add_child(folium.CircleMarker(location=[float(lt),float(ln)], radius = 4,fill_color = 'Red', color = 'Red'))\n \nfgfe = folium.FeatureGroup(name = \"Explorers\")\nfor lt, ln in zip(latfe, lonfe):\n fgfe.add_child(folium.CircleMarker(location=[float(lt),float(ln)], radius = 4,fill_color = 'Blue', color = 'Blue')) \n\n\nmap.add_child(fgfe)\nmap.add_child(fgfp)\nmap.add_child(fgs)\nmap.add_child(fgp)\nmap.add_child(fge)\nmap.add_child(folium.LayerControl())\nmap.save(\"Map2.html\")\n\n",
"<docstring token>\nimport folium\nimport pandas as pd\nfrom geopy.geocoders import Nominatim\ndanes = pd.read_excel('adresy.xlsx', 'Polska')\nlats = list(danes['Lat'])\nlons = list(danes['Lon'])\ndanep = pd.read_excel('adresy.xlsx', 'Pioneers')\nlatp = list(danep['Lat'])\nlonp = list(danep['Lon'])\ndanee = pd.read_excel('adresy.xlsx', 'Explorers')\nlate = list(danee['Lat'])\nlone = list(danee['Lon'])\ndanefp = pd.read_excel('adresy.xlsx', 'FinalistP')\nlatfp = list(danefp['Lat'])\nlonfp = list(danefp['Lon'])\ndanefe = pd.read_excel('adresy.xlsx', 'FinalistE')\nlatfe = list(danefe['Lat'])\nlonfe = list(danefe['Lon'])\nmap = folium.Map(location=[52.0, 21.0], zoom_start=5, tiles='Mapbox Bright')\nfgs = folium.FeatureGroup(name='Skywalkers')\nfor lt, ln in zip(lats, lons):\n fgs.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=2, fill_color='Green', color='green'))\nfgp = folium.FeatureGroup(name='Pioneers')\nfor lt, ln in zip(latp, lonp):\n fgp.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=2, fill_color='Red', color='Red'))\nfge = folium.FeatureGroup(name='Explorers')\nfor lt, ln in zip(late, lone):\n fge.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=2, fill_color='Blue', color='Blue'))\nfgfp = folium.FeatureGroup(name='Pioneers')\nfor lt, ln in zip(latfp, lonfp):\n fgfp.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=4, fill_color='Red', color='Red'))\nfgfe = folium.FeatureGroup(name='Explorers')\nfor lt, ln in zip(latfe, lonfe):\n fgfe.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=4, fill_color='Blue', color='Blue'))\nmap.add_child(fgfe)\nmap.add_child(fgfp)\nmap.add_child(fgs)\nmap.add_child(fgp)\nmap.add_child(fge)\nmap.add_child(folium.LayerControl())\nmap.save('Map2.html')\n",
"<docstring token>\n<import token>\ndanes = pd.read_excel('adresy.xlsx', 'Polska')\nlats = list(danes['Lat'])\nlons = list(danes['Lon'])\ndanep = pd.read_excel('adresy.xlsx', 'Pioneers')\nlatp = list(danep['Lat'])\nlonp = list(danep['Lon'])\ndanee = pd.read_excel('adresy.xlsx', 'Explorers')\nlate = list(danee['Lat'])\nlone = list(danee['Lon'])\ndanefp = pd.read_excel('adresy.xlsx', 'FinalistP')\nlatfp = list(danefp['Lat'])\nlonfp = list(danefp['Lon'])\ndanefe = pd.read_excel('adresy.xlsx', 'FinalistE')\nlatfe = list(danefe['Lat'])\nlonfe = list(danefe['Lon'])\nmap = folium.Map(location=[52.0, 21.0], zoom_start=5, tiles='Mapbox Bright')\nfgs = folium.FeatureGroup(name='Skywalkers')\nfor lt, ln in zip(lats, lons):\n fgs.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=2, fill_color='Green', color='green'))\nfgp = folium.FeatureGroup(name='Pioneers')\nfor lt, ln in zip(latp, lonp):\n fgp.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=2, fill_color='Red', color='Red'))\nfge = folium.FeatureGroup(name='Explorers')\nfor lt, ln in zip(late, lone):\n fge.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=2, fill_color='Blue', color='Blue'))\nfgfp = folium.FeatureGroup(name='Pioneers')\nfor lt, ln in zip(latfp, lonfp):\n fgfp.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=4, fill_color='Red', color='Red'))\nfgfe = folium.FeatureGroup(name='Explorers')\nfor lt, ln in zip(latfe, lonfe):\n fgfe.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=4, fill_color='Blue', color='Blue'))\nmap.add_child(fgfe)\nmap.add_child(fgfp)\nmap.add_child(fgs)\nmap.add_child(fgp)\nmap.add_child(fge)\nmap.add_child(folium.LayerControl())\nmap.save('Map2.html')\n",
"<docstring token>\n<import token>\n<assignment token>\nfor lt, ln in zip(lats, lons):\n fgs.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=2, fill_color='Green', color='green'))\n<assignment token>\nfor lt, ln in zip(latp, lonp):\n fgp.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=2, fill_color='Red', color='Red'))\n<assignment token>\nfor lt, ln in zip(late, lone):\n fge.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=2, fill_color='Blue', color='Blue'))\n<assignment token>\nfor lt, ln in zip(latfp, lonfp):\n fgfp.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=4, fill_color='Red', color='Red'))\n<assignment token>\nfor lt, ln in zip(latfe, lonfe):\n fgfe.add_child(folium.CircleMarker(location=[float(lt), float(ln)],\n radius=4, fill_color='Blue', color='Blue'))\nmap.add_child(fgfe)\nmap.add_child(fgfp)\nmap.add_child(fgs)\nmap.add_child(fgp)\nmap.add_child(fge)\nmap.add_child(folium.LayerControl())\nmap.save('Map2.html')\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
99,361 |
d0489c855ad7fd336450c7ff6df78fd8d4d37dc0
|
from django.urls import path
from .views import (ListCreateComment, RetrieveUpdateDeleteComment,
CommentEditHistoryAPIView, Like)
urlpatterns = [
path('articles/<article_id>/comments/', ListCreateComment.as_view(),
name="comment_list"),
path('articles/<article_id>/comments/<int:pk>/',
RetrieveUpdateDeleteComment.as_view(), name="comment_detail"),
path('articles/<article_id>/comments/<comment_id>/like/',
Like.as_view(), name="like_comment"),
path('articles/<article_id>/comment/<comment_id>/',
Like.as_view(), name="get_likes"),
path('articles/<article_id>/comments/<int:pk>/update_history/',
CommentEditHistoryAPIView.as_view(), name="update_history")
]
|
[
"from django.urls import path\nfrom .views import (ListCreateComment, RetrieveUpdateDeleteComment,\n CommentEditHistoryAPIView, Like)\n\n\nurlpatterns = [\n path('articles/<article_id>/comments/', ListCreateComment.as_view(),\n name=\"comment_list\"),\n path('articles/<article_id>/comments/<int:pk>/',\n RetrieveUpdateDeleteComment.as_view(), name=\"comment_detail\"),\n path('articles/<article_id>/comments/<comment_id>/like/',\n Like.as_view(), name=\"like_comment\"),\n path('articles/<article_id>/comment/<comment_id>/',\n Like.as_view(), name=\"get_likes\"),\n path('articles/<article_id>/comments/<int:pk>/update_history/',\n CommentEditHistoryAPIView.as_view(), name=\"update_history\")\n]\n",
"from django.urls import path\nfrom .views import ListCreateComment, RetrieveUpdateDeleteComment, CommentEditHistoryAPIView, Like\nurlpatterns = [path('articles/<article_id>/comments/', ListCreateComment.\n as_view(), name='comment_list'), path(\n 'articles/<article_id>/comments/<int:pk>/', RetrieveUpdateDeleteComment\n .as_view(), name='comment_detail'), path(\n 'articles/<article_id>/comments/<comment_id>/like/', Like.as_view(),\n name='like_comment'), path(\n 'articles/<article_id>/comment/<comment_id>/', Like.as_view(), name=\n 'get_likes'), path(\n 'articles/<article_id>/comments/<int:pk>/update_history/',\n CommentEditHistoryAPIView.as_view(), name='update_history')]\n",
"<import token>\nurlpatterns = [path('articles/<article_id>/comments/', ListCreateComment.\n as_view(), name='comment_list'), path(\n 'articles/<article_id>/comments/<int:pk>/', RetrieveUpdateDeleteComment\n .as_view(), name='comment_detail'), path(\n 'articles/<article_id>/comments/<comment_id>/like/', Like.as_view(),\n name='like_comment'), path(\n 'articles/<article_id>/comment/<comment_id>/', Like.as_view(), name=\n 'get_likes'), path(\n 'articles/<article_id>/comments/<int:pk>/update_history/',\n CommentEditHistoryAPIView.as_view(), name='update_history')]\n",
"<import token>\n<assignment token>\n"
] | false |
99,362 |
b237a966f2420175b2f4e002f27fc0850ce6d96d
|
import elasticsearch
import api.utils as utils
from flask import request, g, make_response
import json
JSON_MIME_TYPE = 'application/json; charset=utf-8'
import math
import copy
from elasticsearch import Elasticsearch
# Config var
calculate_ndcg_score = True
#ndcg_scorring_file = "/Users/pacmac/Documents/GitHub/KTH_Projects/DD2476PersonalizedSearch/backend/api/markets_fynn.txt"
ndcg_scorring_file = "/Users/pacmac/Documents/GitHub/KTH_Projects/DD2476PersonalizedSearch/backend/api/markets_artin.txt"
es = Elasticsearch()
def success_response(result, message=''):
format = {'status': 'success',
'message': message,
'result': result}
return json_response(json.dumps(format))
def json_response(data='', status=200, headers=None):
headers = headers or {}
if 'Content-Type' not in headers:
headers['Content-Type'] = JSON_MIME_TYPE
return make_response(data, status, headers)
def success_message(message):
format = {'status': 'success',
'result': message}
return json_response(json.dumps(format))
def regular_search():
data = request.args
results = utils.search(data["query"], es)
# print for report
print()
s = ""
i = 1
for elem in results["hits"]["hits"]:
s += str(i) +" & " +elem["_id"] + " & " + " & " +"\\\\" + "\n"
i += 1
print(s)
return success_response(results)
def ndcg(regular_search, personalized_search):
f = open(ndcg_scorring_file, "r")
lines = f.readlines()
ratings = dict()
for line in lines:
(id, rating) = line.split(" ")
ratings[id] = int(rating)
optimal_results = sorted(ratings.values(), reverse=True)
ideal = 0
for i in range(len(optimal_results)):
oneindexedI = i + 1
ideal += (optimal_results[i] / math.log2(oneindexedI + 1))
regular = 0
for i in range(len(optimal_results)):
oneindexedI = i + 1
regular += (ratings[regular_search[i]["_id"]] / math.log2(oneindexedI + 1))
personalized = 0
for i in range(len(optimal_results)):
oneindexedI = i + 1
personalized += (ratings[personalized_search[i]["_id"]] / math.log2(oneindexedI + 1))
print("Regular Search NDCG: ", regular/ideal, "DCG:", regular)
print("Personalized Search NDCG:", personalized/ideal, "DCG:", personalized)
print(" "*37 + "Optimal DCG:", ideal)
def update_user():
data = request.args
# retrieve user
user = utils.get_user(data["id"], es)
# check if user exists and update if so
if user["hits"]["total"]["value"] == 1:
body = {
"script": {
"source": "if (ctx._source.history.contains(params.click)) { ctx._source.history.remove(ctx._source.history.indexOf(params.click))} ctx._source.history.add(params.click)",
"lang": "painless",
"params": {
"click": data["click"]
}
}
}
results = es.update(index="users", id=data["id"], body=body)
# Add user to index if user does not exist
else:
history = [data["click"]]
doc = {"user_id":data["id"], "history":history}
results = es.index(index='users', id=data["id"], body=doc)
return success_response(results)
# This method is to fetch the user details (DEBUGGING)
def get_user():
data = request.args
results = utils.get_user(data["id"], es)
return success_response(results)
def get_history():
data = request.args
user = utils.get_user(data["id"], es)
if user["hits"]["total"]["value"] != 1:
return success_response({"docs": []})
# ------------------------------------------
history = user["hits"]["hits"][0]["_source"]["history"]
if len(history) > 10:
history = history[-10:]
history.reverse()
docstoretrieve = {"docs" : [{"_id": elem} for elem in history]}
if len(docstoretrieve["docs"]) == 0:
return success_response([])
docs = es.mget(body=docstoretrieve, index="news")
return success_response(docs)
def delete_user():
data = request.args
results = es.delete(index="users", id=data["id"])
return success_response(results)
def get_recommendations():
data = request.args
body = {
"query": {
"bool": {
"must": {
"term": {
"history.keyword": data["id"]
}
}
}
},
"aggs": {
"recommendations": {
"significant_terms": {
"field": "history.keyword",
"exclude": data["id"],
"min_doc_count": 1
}
}
}
}
recommendations = es.search(index = "users", body = body)
docstoretrieve = {"docs" : [{"_id": elem["key"]} for elem in recommendations["aggregations"]["recommendations"]["buckets"]]}
if len(docstoretrieve["docs"]) == 0:
return success_response([])
docs = es.mget(body=docstoretrieve, index="news")
return success_response(docs)
def get_news_by_id():
data = request.args
results = es.get(index="news", id=data["id"])
return success_response(results)
def personalized_search():
data = request.args
user = utils.get_user(data["id"], es)
news_fields = ['title','category','body']
# Regular search
search_results = utils.search(data["query"], es)
# Return regular search if user does not exist
if user["hits"]["total"]["value"] != 1:
return success_response(search_results["hits"]["hits"])
# ------------------------------------------
history = user["hits"]["hits"][0]["_source"]["history"]
if len(history) > 10:
history = history[-10:]
# Term vectors of history ids.
results = utils.get_term_vectors(history, news_fields, es)
ret = dict()
# to compute the mean
normalization = dict()
for c in news_fields:
ret[c] = dict()
normalization[c] = dict()
for doc in results['docs']:
if "term_vectors" in doc:
for k in news_fields:
if k in doc["term_vectors"]:
term_vec = doc["term_vectors"][k]["terms"]
for t, t_value in term_vec.items():
if t in ret[k]:
# change it with the mean
ret[k][t] += t_value["score"]
normalization[k][t] += 1
else:
ret[k][t] = t_value["score"]
normalization[k][t] = 1
# compute the mean
for field in ret.keys():
for term in ret[k].keys():
ret[k][t] = ret[k][term]/normalization[k][term]
# Normalize
for key, value in ret.items():
ret[key] = utils.normalize_vec(value)
# Obtain documents vectors
ids = []
docs_vectors = dict()
# find doc ids
for s_rslt in search_results["hits"]["hits"]:
ids.append(s_rslt["_id"])
# construct doc vectors
results_doc = utils.get_term_vectors(ids, news_fields, es)
for doc in results_doc['docs']:
if "term_vectors" in doc:
docs_vectors[doc["_id"]] = dict()
for k in news_fields:
if k in doc["term_vectors"]:
docs_vectors[doc["_id"]][k] = dict()
term_vec = doc["term_vectors"][k]["terms"]
for t, t_value in term_vec.items():
docs_vectors[doc["_id"]][k][t] = t_value["score"]
# Doc 1
# body: "term" ; score ... "term_n" ; score_n
# title: "term" ; score ... "term_n" ; score_n
# category: "term" ; score ... "term_n" ; score_n
# Doc 2
# body: "term" ; score ... "term_n" ; score_n
# title: "term" ; score ... "term_n" ; score_n
# category: "term" ; score ... "term_n" ; score_n
# (Cosine similarity) Dot product and sort search results
# user vector = w_1*body_vector + w_1*category + w_3*title
weights = dict()
weights["body"] = 1
weights["category"] = 2
weights["title"] = 2.5
user_vector = utils.aggregate_vecs(ret, weights)
scores = dict()
for doc, vector in docs_vectors.items():
for key, value in vector.items():
vector[key] = utils.normalize_vec(value)
document_vector = utils.aggregate_vecs(vector, weights)
score = utils.cosine_similarity(document_vector, user_vector)
scores[doc] = score
# new_score = old_score + alpha*user_vector * doc_score
p = 1.0
# normlize the old_score and new_score
norm_old = 0
for s_rslt in search_results["hits"]["hits"]:
norm_old += s_rslt['_score']
norm_new = 0
for score in scores.values():
norm_new += score
# Store copy of regular search to calculate NDCG
regular_s = copy.deepcopy(search_results["hits"]["hits"])
# change documents score
for s_rslt in search_results["hits"]["hits"]:
s_rslt['_score'] = (1-p) * s_rslt['_score']/norm_old + p*scores[s_rslt['_id']]/norm_new
# reorder documents
search_results["hits"]["hits"] = sorted(search_results["hits"]["hits"], key=lambda k: k['_score'], reverse=True)
if calculate_ndcg_score:
ndcg(regular_s, search_results["hits"]["hits"])
# print for report
print()
s = ""
i = 1
for elem in search_results["hits"]["hits"]:
s += str(i) +" & " + " & " + elem["_id"] + " & " +"\\\\" + "\n"
i += 1
print(s)
return success_response(search_results["hits"]["hits"])
|
[
"import elasticsearch\nimport api.utils as utils\nfrom flask import request, g, make_response\nimport json\nJSON_MIME_TYPE = 'application/json; charset=utf-8'\nimport math\nimport copy\nfrom elasticsearch import Elasticsearch\n\n# Config var\ncalculate_ndcg_score = True\n#ndcg_scorring_file = \"/Users/pacmac/Documents/GitHub/KTH_Projects/DD2476PersonalizedSearch/backend/api/markets_fynn.txt\"\nndcg_scorring_file = \"/Users/pacmac/Documents/GitHub/KTH_Projects/DD2476PersonalizedSearch/backend/api/markets_artin.txt\"\n\n\nes = Elasticsearch()\ndef success_response(result, message=''):\n format = {'status': 'success',\n 'message': message,\n 'result': result}\n return json_response(json.dumps(format))\n\ndef json_response(data='', status=200, headers=None):\n headers = headers or {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = JSON_MIME_TYPE\n return make_response(data, status, headers)\n\ndef success_message(message):\n format = {'status': 'success',\n 'result': message}\n\n return json_response(json.dumps(format))\n\ndef regular_search():\n data = request.args\n results = utils.search(data[\"query\"], es)\n # print for report\n print()\n s = \"\"\n i = 1\n for elem in results[\"hits\"][\"hits\"]:\n s += str(i) +\" & \" +elem[\"_id\"] + \" & \" + \" & \" +\"\\\\\\\\\" + \"\\n\"\n i += 1\n print(s)\n\n return success_response(results)\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, \"r\")\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n (id, rating) = line.split(\" \")\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += (optimal_results[i] / math.log2(oneindexedI + 1))\n \n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += (ratings[regular_search[i][\"_id\"]] / math.log2(oneindexedI + 1))\n\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += (ratings[personalized_search[i][\"_id\"]] / math.log2(oneindexedI + 1))\n\n print(\"Regular Search NDCG: \", regular/ideal, \"DCG:\", regular)\n print(\"Personalized Search NDCG:\", personalized/ideal, \"DCG:\", personalized)\n print(\" \"*37 + \"Optimal DCG:\", ideal)\n\n\n \ndef update_user():\n data = request.args\n # retrieve user\n user = utils.get_user(data[\"id\"], es)\n # check if user exists and update if so\n if user[\"hits\"][\"total\"][\"value\"] == 1:\n body = {\n \"script\": {\n \"source\": \"if (ctx._source.history.contains(params.click)) { ctx._source.history.remove(ctx._source.history.indexOf(params.click))} ctx._source.history.add(params.click)\",\n \"lang\": \"painless\",\n \"params\": {\n \"click\": data[\"click\"]\n }\n }\n } \n results = es.update(index=\"users\", id=data[\"id\"], body=body)\n # Add user to index if user does not exist\n else:\n history = [data[\"click\"]]\n doc = {\"user_id\":data[\"id\"], \"history\":history}\n results = es.index(index='users', id=data[\"id\"], body=doc)\n return success_response(results)\n\n# This method is to fetch the user details (DEBUGGING)\ndef get_user():\n data = request.args\n results = utils.get_user(data[\"id\"], es)\n return success_response(results)\n\n\ndef get_history():\n data = request.args\n user = utils.get_user(data[\"id\"], es)\n if user[\"hits\"][\"total\"][\"value\"] != 1:\n return success_response({\"docs\": []})\n # ------------------------------------------\n history = user[\"hits\"][\"hits\"][0][\"_source\"][\"history\"]\n if len(history) > 10:\n history = history[-10:]\n \n history.reverse()\n docstoretrieve = {\"docs\" : [{\"_id\": elem} for elem in history]}\n if len(docstoretrieve[\"docs\"]) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index=\"news\")\n return success_response(docs)\n\n\ndef delete_user():\n data = request.args\n results = es.delete(index=\"users\", id=data[\"id\"])\n return success_response(results)\n\n\ndef get_recommendations():\n data = request.args\n body = {\n \"query\": {\n \"bool\": {\n \"must\": {\n \"term\": {\n \"history.keyword\": data[\"id\"]\n }\n }\n }\n },\n \"aggs\": {\n \"recommendations\": {\n \"significant_terms\": {\n \"field\": \"history.keyword\",\n \"exclude\": data[\"id\"],\n \"min_doc_count\": 1\n }\n }\n }\n }\n recommendations = es.search(index = \"users\", body = body)\n docstoretrieve = {\"docs\" : [{\"_id\": elem[\"key\"]} for elem in recommendations[\"aggregations\"][\"recommendations\"][\"buckets\"]]}\n if len(docstoretrieve[\"docs\"]) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index=\"news\")\n return success_response(docs)\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index=\"news\", id=data[\"id\"])\n return success_response(results)\n\ndef personalized_search():\n data = request.args\n user = utils.get_user(data[\"id\"], es)\n news_fields = ['title','category','body']\n\n # Regular search\n search_results = utils.search(data[\"query\"], es)\n\n # Return regular search if user does not exist\n if user[\"hits\"][\"total\"][\"value\"] != 1:\n return success_response(search_results[\"hits\"][\"hits\"])\n # ------------------------------------------\n\n history = user[\"hits\"][\"hits\"][0][\"_source\"][\"history\"]\n if len(history) > 10:\n history = history[-10:]\n \n # Term vectors of history ids. \n results = utils.get_term_vectors(history, news_fields, es)\n ret = dict()\n # to compute the mean\n normalization = dict()\n for c in news_fields:\n ret[c] = dict()\n normalization[c] = dict()\n for doc in results['docs']:\n if \"term_vectors\" in doc:\n for k in news_fields:\n if k in doc[\"term_vectors\"]:\n term_vec = doc[\"term_vectors\"][k][\"terms\"]\n for t, t_value in term_vec.items():\n if t in ret[k]:\n # change it with the mean\n ret[k][t] += t_value[\"score\"]\n normalization[k][t] += 1\n else:\n ret[k][t] = t_value[\"score\"]\n normalization[k][t] = 1\n # compute the mean\n for field in ret.keys():\n for term in ret[k].keys():\n ret[k][t] = ret[k][term]/normalization[k][term]\n \n # Normalize\n for key, value in ret.items():\n ret[key] = utils.normalize_vec(value)\n\n # Obtain documents vectors \n ids = []\n docs_vectors = dict()\n # find doc ids \n for s_rslt in search_results[\"hits\"][\"hits\"]:\n ids.append(s_rslt[\"_id\"])\n # construct doc vectors\n results_doc = utils.get_term_vectors(ids, news_fields, es)\n for doc in results_doc['docs']:\n if \"term_vectors\" in doc:\n docs_vectors[doc[\"_id\"]] = dict()\n for k in news_fields:\n if k in doc[\"term_vectors\"]:\n docs_vectors[doc[\"_id\"]][k] = dict()\n term_vec = doc[\"term_vectors\"][k][\"terms\"] \n for t, t_value in term_vec.items():\n docs_vectors[doc[\"_id\"]][k][t] = t_value[\"score\"]\n\n # Doc 1\n # body: \"term\" ; score ... \"term_n\" ; score_n\n # title: \"term\" ; score ... \"term_n\" ; score_n\n # category: \"term\" ; score ... \"term_n\" ; score_n\n # Doc 2\n # body: \"term\" ; score ... \"term_n\" ; score_n\n # title: \"term\" ; score ... \"term_n\" ; score_n\n # category: \"term\" ; score ... \"term_n\" ; score_n\n\n # (Cosine similarity) Dot product and sort search results\n\n # user vector = w_1*body_vector + w_1*category + w_3*title\n weights = dict()\n weights[\"body\"] = 1\n weights[\"category\"] = 2\n weights[\"title\"] = 2.5\n user_vector = utils.aggregate_vecs(ret, weights)\n\n scores = dict()\n for doc, vector in docs_vectors.items():\n for key, value in vector.items():\n vector[key] = utils.normalize_vec(value)\n document_vector = utils.aggregate_vecs(vector, weights)\n score = utils.cosine_similarity(document_vector, user_vector)\n scores[doc] = score\n \n\n # new_score = old_score + alpha*user_vector * doc_score\n p = 1.0\n # normlize the old_score and new_score\n norm_old = 0\n for s_rslt in search_results[\"hits\"][\"hits\"]:\n norm_old += s_rslt['_score']\n\n norm_new = 0\n for score in scores.values():\n norm_new += score\n\n # Store copy of regular search to calculate NDCG\n regular_s = copy.deepcopy(search_results[\"hits\"][\"hits\"])\n\n # change documents score\n for s_rslt in search_results[\"hits\"][\"hits\"]:\n s_rslt['_score'] = (1-p) * s_rslt['_score']/norm_old + p*scores[s_rslt['_id']]/norm_new\n # reorder documents\n search_results[\"hits\"][\"hits\"] = sorted(search_results[\"hits\"][\"hits\"], key=lambda k: k['_score'], reverse=True)\n\n if calculate_ndcg_score:\n ndcg(regular_s, search_results[\"hits\"][\"hits\"])\n \n # print for report\n print()\n s = \"\"\n i = 1\n for elem in search_results[\"hits\"][\"hits\"]:\n s += str(i) +\" & \" + \" & \" + elem[\"_id\"] + \" & \" +\"\\\\\\\\\" + \"\\n\"\n i += 1\n print(s)\n return success_response(search_results[\"hits\"][\"hits\"])\n\n\n\n\n",
"import elasticsearch\nimport api.utils as utils\nfrom flask import request, g, make_response\nimport json\nJSON_MIME_TYPE = 'application/json; charset=utf-8'\nimport math\nimport copy\nfrom elasticsearch import Elasticsearch\ncalculate_ndcg_score = True\nndcg_scorring_file = (\n '/Users/pacmac/Documents/GitHub/KTH_Projects/DD2476PersonalizedSearch/backend/api/markets_artin.txt'\n )\nes = Elasticsearch()\n\n\ndef success_response(result, message=''):\n format = {'status': 'success', 'message': message, 'result': result}\n return json_response(json.dumps(format))\n\n\ndef json_response(data='', status=200, headers=None):\n headers = headers or {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = JSON_MIME_TYPE\n return make_response(data, status, headers)\n\n\ndef success_message(message):\n format = {'status': 'success', 'result': message}\n return json_response(json.dumps(format))\n\n\ndef regular_search():\n data = request.args\n results = utils.search(data['query'], es)\n print()\n s = ''\n i = 1\n for elem in results['hits']['hits']:\n s += str(i) + ' & ' + elem['_id'] + ' & ' + ' & ' + '\\\\\\\\' + '\\n'\n i += 1\n print(s)\n return success_response(results)\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\ndef update_user():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] == 1:\n body = {'script': {'source':\n 'if (ctx._source.history.contains(params.click)) { ctx._source.history.remove(ctx._source.history.indexOf(params.click))} ctx._source.history.add(params.click)'\n , 'lang': 'painless', 'params': {'click': data['click']}}}\n results = es.update(index='users', id=data['id'], body=body)\n else:\n history = [data['click']]\n doc = {'user_id': data['id'], 'history': history}\n results = es.index(index='users', id=data['id'], body=doc)\n return success_response(results)\n\n\ndef get_user():\n data = request.args\n results = utils.get_user(data['id'], es)\n return success_response(results)\n\n\ndef get_history():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] != 1:\n return success_response({'docs': []})\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n history.reverse()\n docstoretrieve = {'docs': [{'_id': elem} for elem in history]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef delete_user():\n data = request.args\n results = es.delete(index='users', id=data['id'])\n return success_response(results)\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\ndef personalized_search():\n data = request.args\n user = utils.get_user(data['id'], es)\n news_fields = ['title', 'category', 'body']\n search_results = utils.search(data['query'], es)\n if user['hits']['total']['value'] != 1:\n return success_response(search_results['hits']['hits'])\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n results = utils.get_term_vectors(history, news_fields, es)\n ret = dict()\n normalization = dict()\n for c in news_fields:\n ret[c] = dict()\n normalization[c] = dict()\n for doc in results['docs']:\n if 'term_vectors' in doc:\n for k in news_fields:\n if k in doc['term_vectors']:\n term_vec = doc['term_vectors'][k]['terms']\n for t, t_value in term_vec.items():\n if t in ret[k]:\n ret[k][t] += t_value['score']\n normalization[k][t] += 1\n else:\n ret[k][t] = t_value['score']\n normalization[k][t] = 1\n for field in ret.keys():\n for term in ret[k].keys():\n ret[k][t] = ret[k][term] / normalization[k][term]\n for key, value in ret.items():\n ret[key] = utils.normalize_vec(value)\n ids = []\n docs_vectors = dict()\n for s_rslt in search_results['hits']['hits']:\n ids.append(s_rslt['_id'])\n results_doc = utils.get_term_vectors(ids, news_fields, es)\n for doc in results_doc['docs']:\n if 'term_vectors' in doc:\n docs_vectors[doc['_id']] = dict()\n for k in news_fields:\n if k in doc['term_vectors']:\n docs_vectors[doc['_id']][k] = dict()\n term_vec = doc['term_vectors'][k]['terms']\n for t, t_value in term_vec.items():\n docs_vectors[doc['_id']][k][t] = t_value['score']\n weights = dict()\n weights['body'] = 1\n weights['category'] = 2\n weights['title'] = 2.5\n user_vector = utils.aggregate_vecs(ret, weights)\n scores = dict()\n for doc, vector in docs_vectors.items():\n for key, value in vector.items():\n vector[key] = utils.normalize_vec(value)\n document_vector = utils.aggregate_vecs(vector, weights)\n score = utils.cosine_similarity(document_vector, user_vector)\n scores[doc] = score\n p = 1.0\n norm_old = 0\n for s_rslt in search_results['hits']['hits']:\n norm_old += s_rslt['_score']\n norm_new = 0\n for score in scores.values():\n norm_new += score\n regular_s = copy.deepcopy(search_results['hits']['hits'])\n for s_rslt in search_results['hits']['hits']:\n s_rslt['_score'] = (1 - p) * s_rslt['_score'] / norm_old + p * scores[\n s_rslt['_id']] / norm_new\n search_results['hits']['hits'] = sorted(search_results['hits']['hits'],\n key=lambda k: k['_score'], reverse=True)\n if calculate_ndcg_score:\n ndcg(regular_s, search_results['hits']['hits'])\n print()\n s = ''\n i = 1\n for elem in search_results['hits']['hits']:\n s += str(i) + ' & ' + ' & ' + elem['_id'] + ' & ' + '\\\\\\\\' + '\\n'\n i += 1\n print(s)\n return success_response(search_results['hits']['hits'])\n",
"<import token>\nJSON_MIME_TYPE = 'application/json; charset=utf-8'\n<import token>\ncalculate_ndcg_score = True\nndcg_scorring_file = (\n '/Users/pacmac/Documents/GitHub/KTH_Projects/DD2476PersonalizedSearch/backend/api/markets_artin.txt'\n )\nes = Elasticsearch()\n\n\ndef success_response(result, message=''):\n format = {'status': 'success', 'message': message, 'result': result}\n return json_response(json.dumps(format))\n\n\ndef json_response(data='', status=200, headers=None):\n headers = headers or {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = JSON_MIME_TYPE\n return make_response(data, status, headers)\n\n\ndef success_message(message):\n format = {'status': 'success', 'result': message}\n return json_response(json.dumps(format))\n\n\ndef regular_search():\n data = request.args\n results = utils.search(data['query'], es)\n print()\n s = ''\n i = 1\n for elem in results['hits']['hits']:\n s += str(i) + ' & ' + elem['_id'] + ' & ' + ' & ' + '\\\\\\\\' + '\\n'\n i += 1\n print(s)\n return success_response(results)\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\ndef update_user():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] == 1:\n body = {'script': {'source':\n 'if (ctx._source.history.contains(params.click)) { ctx._source.history.remove(ctx._source.history.indexOf(params.click))} ctx._source.history.add(params.click)'\n , 'lang': 'painless', 'params': {'click': data['click']}}}\n results = es.update(index='users', id=data['id'], body=body)\n else:\n history = [data['click']]\n doc = {'user_id': data['id'], 'history': history}\n results = es.index(index='users', id=data['id'], body=doc)\n return success_response(results)\n\n\ndef get_user():\n data = request.args\n results = utils.get_user(data['id'], es)\n return success_response(results)\n\n\ndef get_history():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] != 1:\n return success_response({'docs': []})\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n history.reverse()\n docstoretrieve = {'docs': [{'_id': elem} for elem in history]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef delete_user():\n data = request.args\n results = es.delete(index='users', id=data['id'])\n return success_response(results)\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\ndef personalized_search():\n data = request.args\n user = utils.get_user(data['id'], es)\n news_fields = ['title', 'category', 'body']\n search_results = utils.search(data['query'], es)\n if user['hits']['total']['value'] != 1:\n return success_response(search_results['hits']['hits'])\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n results = utils.get_term_vectors(history, news_fields, es)\n ret = dict()\n normalization = dict()\n for c in news_fields:\n ret[c] = dict()\n normalization[c] = dict()\n for doc in results['docs']:\n if 'term_vectors' in doc:\n for k in news_fields:\n if k in doc['term_vectors']:\n term_vec = doc['term_vectors'][k]['terms']\n for t, t_value in term_vec.items():\n if t in ret[k]:\n ret[k][t] += t_value['score']\n normalization[k][t] += 1\n else:\n ret[k][t] = t_value['score']\n normalization[k][t] = 1\n for field in ret.keys():\n for term in ret[k].keys():\n ret[k][t] = ret[k][term] / normalization[k][term]\n for key, value in ret.items():\n ret[key] = utils.normalize_vec(value)\n ids = []\n docs_vectors = dict()\n for s_rslt in search_results['hits']['hits']:\n ids.append(s_rslt['_id'])\n results_doc = utils.get_term_vectors(ids, news_fields, es)\n for doc in results_doc['docs']:\n if 'term_vectors' in doc:\n docs_vectors[doc['_id']] = dict()\n for k in news_fields:\n if k in doc['term_vectors']:\n docs_vectors[doc['_id']][k] = dict()\n term_vec = doc['term_vectors'][k]['terms']\n for t, t_value in term_vec.items():\n docs_vectors[doc['_id']][k][t] = t_value['score']\n weights = dict()\n weights['body'] = 1\n weights['category'] = 2\n weights['title'] = 2.5\n user_vector = utils.aggregate_vecs(ret, weights)\n scores = dict()\n for doc, vector in docs_vectors.items():\n for key, value in vector.items():\n vector[key] = utils.normalize_vec(value)\n document_vector = utils.aggregate_vecs(vector, weights)\n score = utils.cosine_similarity(document_vector, user_vector)\n scores[doc] = score\n p = 1.0\n norm_old = 0\n for s_rslt in search_results['hits']['hits']:\n norm_old += s_rslt['_score']\n norm_new = 0\n for score in scores.values():\n norm_new += score\n regular_s = copy.deepcopy(search_results['hits']['hits'])\n for s_rslt in search_results['hits']['hits']:\n s_rslt['_score'] = (1 - p) * s_rslt['_score'] / norm_old + p * scores[\n s_rslt['_id']] / norm_new\n search_results['hits']['hits'] = sorted(search_results['hits']['hits'],\n key=lambda k: k['_score'], reverse=True)\n if calculate_ndcg_score:\n ndcg(regular_s, search_results['hits']['hits'])\n print()\n s = ''\n i = 1\n for elem in search_results['hits']['hits']:\n s += str(i) + ' & ' + ' & ' + elem['_id'] + ' & ' + '\\\\\\\\' + '\\n'\n i += 1\n print(s)\n return success_response(search_results['hits']['hits'])\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef success_response(result, message=''):\n format = {'status': 'success', 'message': message, 'result': result}\n return json_response(json.dumps(format))\n\n\ndef json_response(data='', status=200, headers=None):\n headers = headers or {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = JSON_MIME_TYPE\n return make_response(data, status, headers)\n\n\ndef success_message(message):\n format = {'status': 'success', 'result': message}\n return json_response(json.dumps(format))\n\n\ndef regular_search():\n data = request.args\n results = utils.search(data['query'], es)\n print()\n s = ''\n i = 1\n for elem in results['hits']['hits']:\n s += str(i) + ' & ' + elem['_id'] + ' & ' + ' & ' + '\\\\\\\\' + '\\n'\n i += 1\n print(s)\n return success_response(results)\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\ndef update_user():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] == 1:\n body = {'script': {'source':\n 'if (ctx._source.history.contains(params.click)) { ctx._source.history.remove(ctx._source.history.indexOf(params.click))} ctx._source.history.add(params.click)'\n , 'lang': 'painless', 'params': {'click': data['click']}}}\n results = es.update(index='users', id=data['id'], body=body)\n else:\n history = [data['click']]\n doc = {'user_id': data['id'], 'history': history}\n results = es.index(index='users', id=data['id'], body=doc)\n return success_response(results)\n\n\ndef get_user():\n data = request.args\n results = utils.get_user(data['id'], es)\n return success_response(results)\n\n\ndef get_history():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] != 1:\n return success_response({'docs': []})\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n history.reverse()\n docstoretrieve = {'docs': [{'_id': elem} for elem in history]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef delete_user():\n data = request.args\n results = es.delete(index='users', id=data['id'])\n return success_response(results)\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\ndef personalized_search():\n data = request.args\n user = utils.get_user(data['id'], es)\n news_fields = ['title', 'category', 'body']\n search_results = utils.search(data['query'], es)\n if user['hits']['total']['value'] != 1:\n return success_response(search_results['hits']['hits'])\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n results = utils.get_term_vectors(history, news_fields, es)\n ret = dict()\n normalization = dict()\n for c in news_fields:\n ret[c] = dict()\n normalization[c] = dict()\n for doc in results['docs']:\n if 'term_vectors' in doc:\n for k in news_fields:\n if k in doc['term_vectors']:\n term_vec = doc['term_vectors'][k]['terms']\n for t, t_value in term_vec.items():\n if t in ret[k]:\n ret[k][t] += t_value['score']\n normalization[k][t] += 1\n else:\n ret[k][t] = t_value['score']\n normalization[k][t] = 1\n for field in ret.keys():\n for term in ret[k].keys():\n ret[k][t] = ret[k][term] / normalization[k][term]\n for key, value in ret.items():\n ret[key] = utils.normalize_vec(value)\n ids = []\n docs_vectors = dict()\n for s_rslt in search_results['hits']['hits']:\n ids.append(s_rslt['_id'])\n results_doc = utils.get_term_vectors(ids, news_fields, es)\n for doc in results_doc['docs']:\n if 'term_vectors' in doc:\n docs_vectors[doc['_id']] = dict()\n for k in news_fields:\n if k in doc['term_vectors']:\n docs_vectors[doc['_id']][k] = dict()\n term_vec = doc['term_vectors'][k]['terms']\n for t, t_value in term_vec.items():\n docs_vectors[doc['_id']][k][t] = t_value['score']\n weights = dict()\n weights['body'] = 1\n weights['category'] = 2\n weights['title'] = 2.5\n user_vector = utils.aggregate_vecs(ret, weights)\n scores = dict()\n for doc, vector in docs_vectors.items():\n for key, value in vector.items():\n vector[key] = utils.normalize_vec(value)\n document_vector = utils.aggregate_vecs(vector, weights)\n score = utils.cosine_similarity(document_vector, user_vector)\n scores[doc] = score\n p = 1.0\n norm_old = 0\n for s_rslt in search_results['hits']['hits']:\n norm_old += s_rslt['_score']\n norm_new = 0\n for score in scores.values():\n norm_new += score\n regular_s = copy.deepcopy(search_results['hits']['hits'])\n for s_rslt in search_results['hits']['hits']:\n s_rslt['_score'] = (1 - p) * s_rslt['_score'] / norm_old + p * scores[\n s_rslt['_id']] / norm_new\n search_results['hits']['hits'] = sorted(search_results['hits']['hits'],\n key=lambda k: k['_score'], reverse=True)\n if calculate_ndcg_score:\n ndcg(regular_s, search_results['hits']['hits'])\n print()\n s = ''\n i = 1\n for elem in search_results['hits']['hits']:\n s += str(i) + ' & ' + ' & ' + elem['_id'] + ' & ' + '\\\\\\\\' + '\\n'\n i += 1\n print(s)\n return success_response(search_results['hits']['hits'])\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef success_response(result, message=''):\n format = {'status': 'success', 'message': message, 'result': result}\n return json_response(json.dumps(format))\n\n\ndef json_response(data='', status=200, headers=None):\n headers = headers or {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = JSON_MIME_TYPE\n return make_response(data, status, headers)\n\n\ndef success_message(message):\n format = {'status': 'success', 'result': message}\n return json_response(json.dumps(format))\n\n\n<function token>\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\ndef update_user():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] == 1:\n body = {'script': {'source':\n 'if (ctx._source.history.contains(params.click)) { ctx._source.history.remove(ctx._source.history.indexOf(params.click))} ctx._source.history.add(params.click)'\n , 'lang': 'painless', 'params': {'click': data['click']}}}\n results = es.update(index='users', id=data['id'], body=body)\n else:\n history = [data['click']]\n doc = {'user_id': data['id'], 'history': history}\n results = es.index(index='users', id=data['id'], body=doc)\n return success_response(results)\n\n\ndef get_user():\n data = request.args\n results = utils.get_user(data['id'], es)\n return success_response(results)\n\n\ndef get_history():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] != 1:\n return success_response({'docs': []})\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n history.reverse()\n docstoretrieve = {'docs': [{'_id': elem} for elem in history]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef delete_user():\n data = request.args\n results = es.delete(index='users', id=data['id'])\n return success_response(results)\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\ndef personalized_search():\n data = request.args\n user = utils.get_user(data['id'], es)\n news_fields = ['title', 'category', 'body']\n search_results = utils.search(data['query'], es)\n if user['hits']['total']['value'] != 1:\n return success_response(search_results['hits']['hits'])\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n results = utils.get_term_vectors(history, news_fields, es)\n ret = dict()\n normalization = dict()\n for c in news_fields:\n ret[c] = dict()\n normalization[c] = dict()\n for doc in results['docs']:\n if 'term_vectors' in doc:\n for k in news_fields:\n if k in doc['term_vectors']:\n term_vec = doc['term_vectors'][k]['terms']\n for t, t_value in term_vec.items():\n if t in ret[k]:\n ret[k][t] += t_value['score']\n normalization[k][t] += 1\n else:\n ret[k][t] = t_value['score']\n normalization[k][t] = 1\n for field in ret.keys():\n for term in ret[k].keys():\n ret[k][t] = ret[k][term] / normalization[k][term]\n for key, value in ret.items():\n ret[key] = utils.normalize_vec(value)\n ids = []\n docs_vectors = dict()\n for s_rslt in search_results['hits']['hits']:\n ids.append(s_rslt['_id'])\n results_doc = utils.get_term_vectors(ids, news_fields, es)\n for doc in results_doc['docs']:\n if 'term_vectors' in doc:\n docs_vectors[doc['_id']] = dict()\n for k in news_fields:\n if k in doc['term_vectors']:\n docs_vectors[doc['_id']][k] = dict()\n term_vec = doc['term_vectors'][k]['terms']\n for t, t_value in term_vec.items():\n docs_vectors[doc['_id']][k][t] = t_value['score']\n weights = dict()\n weights['body'] = 1\n weights['category'] = 2\n weights['title'] = 2.5\n user_vector = utils.aggregate_vecs(ret, weights)\n scores = dict()\n for doc, vector in docs_vectors.items():\n for key, value in vector.items():\n vector[key] = utils.normalize_vec(value)\n document_vector = utils.aggregate_vecs(vector, weights)\n score = utils.cosine_similarity(document_vector, user_vector)\n scores[doc] = score\n p = 1.0\n norm_old = 0\n for s_rslt in search_results['hits']['hits']:\n norm_old += s_rslt['_score']\n norm_new = 0\n for score in scores.values():\n norm_new += score\n regular_s = copy.deepcopy(search_results['hits']['hits'])\n for s_rslt in search_results['hits']['hits']:\n s_rslt['_score'] = (1 - p) * s_rslt['_score'] / norm_old + p * scores[\n s_rslt['_id']] / norm_new\n search_results['hits']['hits'] = sorted(search_results['hits']['hits'],\n key=lambda k: k['_score'], reverse=True)\n if calculate_ndcg_score:\n ndcg(regular_s, search_results['hits']['hits'])\n print()\n s = ''\n i = 1\n for elem in search_results['hits']['hits']:\n s += str(i) + ' & ' + ' & ' + elem['_id'] + ' & ' + '\\\\\\\\' + '\\n'\n i += 1\n print(s)\n return success_response(search_results['hits']['hits'])\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef success_response(result, message=''):\n format = {'status': 'success', 'message': message, 'result': result}\n return json_response(json.dumps(format))\n\n\ndef json_response(data='', status=200, headers=None):\n headers = headers or {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = JSON_MIME_TYPE\n return make_response(data, status, headers)\n\n\ndef success_message(message):\n format = {'status': 'success', 'result': message}\n return json_response(json.dumps(format))\n\n\n<function token>\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\ndef update_user():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] == 1:\n body = {'script': {'source':\n 'if (ctx._source.history.contains(params.click)) { ctx._source.history.remove(ctx._source.history.indexOf(params.click))} ctx._source.history.add(params.click)'\n , 'lang': 'painless', 'params': {'click': data['click']}}}\n results = es.update(index='users', id=data['id'], body=body)\n else:\n history = [data['click']]\n doc = {'user_id': data['id'], 'history': history}\n results = es.index(index='users', id=data['id'], body=doc)\n return success_response(results)\n\n\ndef get_user():\n data = request.args\n results = utils.get_user(data['id'], es)\n return success_response(results)\n\n\ndef get_history():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] != 1:\n return success_response({'docs': []})\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n history.reverse()\n docstoretrieve = {'docs': [{'_id': elem} for elem in history]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef delete_user():\n data = request.args\n results = es.delete(index='users', id=data['id'])\n return success_response(results)\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef success_response(result, message=''):\n format = {'status': 'success', 'message': message, 'result': result}\n return json_response(json.dumps(format))\n\n\ndef json_response(data='', status=200, headers=None):\n headers = headers or {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = JSON_MIME_TYPE\n return make_response(data, status, headers)\n\n\ndef success_message(message):\n format = {'status': 'success', 'result': message}\n return json_response(json.dumps(format))\n\n\n<function token>\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\n<function token>\n\n\ndef get_user():\n data = request.args\n results = utils.get_user(data['id'], es)\n return success_response(results)\n\n\ndef get_history():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] != 1:\n return success_response({'docs': []})\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n history.reverse()\n docstoretrieve = {'docs': [{'_id': elem} for elem in history]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef delete_user():\n data = request.args\n results = es.delete(index='users', id=data['id'])\n return success_response(results)\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef success_response(result, message=''):\n format = {'status': 'success', 'message': message, 'result': result}\n return json_response(json.dumps(format))\n\n\ndef json_response(data='', status=200, headers=None):\n headers = headers or {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = JSON_MIME_TYPE\n return make_response(data, status, headers)\n\n\n<function token>\n<function token>\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\n<function token>\n\n\ndef get_user():\n data = request.args\n results = utils.get_user(data['id'], es)\n return success_response(results)\n\n\ndef get_history():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] != 1:\n return success_response({'docs': []})\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n history.reverse()\n docstoretrieve = {'docs': [{'_id': elem} for elem in history]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef delete_user():\n data = request.args\n results = es.delete(index='users', id=data['id'])\n return success_response(results)\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef success_response(result, message=''):\n format = {'status': 'success', 'message': message, 'result': result}\n return json_response(json.dumps(format))\n\n\ndef json_response(data='', status=200, headers=None):\n headers = headers or {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = JSON_MIME_TYPE\n return make_response(data, status, headers)\n\n\n<function token>\n<function token>\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\n<function token>\n<function token>\n\n\ndef get_history():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] != 1:\n return success_response({'docs': []})\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n history.reverse()\n docstoretrieve = {'docs': [{'_id': elem} for elem in history]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef delete_user():\n data = request.args\n results = es.delete(index='users', id=data['id'])\n return success_response(results)\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef success_response(result, message=''):\n format = {'status': 'success', 'message': message, 'result': result}\n return json_response(json.dumps(format))\n\n\ndef json_response(data='', status=200, headers=None):\n headers = headers or {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = JSON_MIME_TYPE\n return make_response(data, status, headers)\n\n\n<function token>\n<function token>\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\n<function token>\n<function token>\n\n\ndef get_history():\n data = request.args\n user = utils.get_user(data['id'], es)\n if user['hits']['total']['value'] != 1:\n return success_response({'docs': []})\n history = user['hits']['hits'][0]['_source']['history']\n if len(history) > 10:\n history = history[-10:]\n history.reverse()\n docstoretrieve = {'docs': [{'_id': elem} for elem in history]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\n<function token>\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef success_response(result, message=''):\n format = {'status': 'success', 'message': message, 'result': result}\n return json_response(json.dumps(format))\n\n\ndef json_response(data='', status=200, headers=None):\n headers = headers or {}\n if 'Content-Type' not in headers:\n headers['Content-Type'] = JSON_MIME_TYPE\n return make_response(data, status, headers)\n\n\n<function token>\n<function token>\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef success_response(result, message=''):\n format = {'status': 'success', 'message': message, 'result': result}\n return json_response(json.dumps(format))\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\ndef get_news_by_id():\n data = request.args\n results = es.get(index='news', id=data['id'])\n return success_response(results)\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef ndcg(regular_search, personalized_search):\n f = open(ndcg_scorring_file, 'r')\n lines = f.readlines()\n ratings = dict()\n for line in lines:\n id, rating = line.split(' ')\n ratings[id] = int(rating)\n optimal_results = sorted(ratings.values(), reverse=True)\n ideal = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n ideal += optimal_results[i] / math.log2(oneindexedI + 1)\n regular = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n regular += ratings[regular_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n personalized = 0\n for i in range(len(optimal_results)):\n oneindexedI = i + 1\n personalized += ratings[personalized_search[i]['_id']] / math.log2(\n oneindexedI + 1)\n print('Regular Search NDCG: ', regular / ideal, 'DCG:', regular)\n print('Personalized Search NDCG:', personalized / ideal, 'DCG:',\n personalized)\n print(' ' * 37 + 'Optimal DCG:', ideal)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_recommendations():\n data = request.args\n body = {'query': {'bool': {'must': {'term': {'history.keyword': data[\n 'id']}}}}, 'aggs': {'recommendations': {'significant_terms': {\n 'field': 'history.keyword', 'exclude': data['id'], 'min_doc_count':\n 1}}}}\n recommendations = es.search(index='users', body=body)\n docstoretrieve = {'docs': [{'_id': elem['key']} for elem in\n recommendations['aggregations']['recommendations']['buckets']]}\n if len(docstoretrieve['docs']) == 0:\n return success_response([])\n docs = es.mget(body=docstoretrieve, index='news')\n return success_response(docs)\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,363 |
6ff82394d65152d5f5664dc1ba607c3a94a02d3a
|
import heapq
def solution(N, road, K):
answer = 0
#무한을 의미하는 값으로 10억을 설정.
INF=int(1e9)
#최단거리 테이블
distance=[INF]*(N+1)
#그래프
graph=[[] for _ in range(N+1)]
#그래프 채우기
for a,b,c in road:
graph[a].append((b,c))
#이거 안해서 처음에 틀림
graph[b].append((a,c))
#다익스트라 함수 구현
def dijksta(start):
q=[]
distance[start]=0
heapq.heappush(q, (0,start))
while q:
#우선순위 큐(heapq)를 쓰는 이유
#방문하지 않는 노드 중 최단 거리가 가장 짧은 노드를 선택할 수 있다.
dist, now= heapq.heappop(q)
#현재 노드가 이미 처리된 적이있는 노드면 무시
if distance[now] < dist:
continue
#현재 노드와 연결된 다른 인접한 노드들을 검사
for next in graph[now]:
cost= dist+next[1]
#현재 노드를 거쳐서, 다른 노드로 이동하는 거리가 더 짧을 경우 갱신
if cost < distance[next[0]]:
distance[next[0]]=cost
heapq.heappush(q,(cost, next[0]))
dijksta(1)
#K보다 작은 동네 찾기
for i in range(1,N+1):
if distance[i]<=K:
# print(i, distance[i])
answer+=1
return answer
print(solution(5,[[1,2,1],[2,3,3],[5,2,2],[1,4,2],[5,3,1],[5,4,2]], 3))
|
[
"import heapq\n\n\n\ndef solution(N, road, K):\n answer = 0\n #무한을 의미하는 값으로 10억을 설정.\n INF=int(1e9)\n\n #최단거리 테이블 \n distance=[INF]*(N+1)\n\n #그래프\n graph=[[] for _ in range(N+1)]\n\n #그래프 채우기\n for a,b,c in road:\n graph[a].append((b,c))\n #이거 안해서 처음에 틀림\n graph[b].append((a,c))\n\n #다익스트라 함수 구현\n def dijksta(start): \n q=[]\n distance[start]=0\n\n heapq.heappush(q, (0,start))\n\n while q:\n #우선순위 큐(heapq)를 쓰는 이유\n #방문하지 않는 노드 중 최단 거리가 가장 짧은 노드를 선택할 수 있다.\n dist, now= heapq.heappop(q)\n\n #현재 노드가 이미 처리된 적이있는 노드면 무시\n if distance[now] < dist:\n continue\n\n #현재 노드와 연결된 다른 인접한 노드들을 검사\n for next in graph[now]:\n cost= dist+next[1]\n #현재 노드를 거쳐서, 다른 노드로 이동하는 거리가 더 짧을 경우 갱신\n if cost < distance[next[0]]:\n distance[next[0]]=cost\n heapq.heappush(q,(cost, next[0]))\n\n \n dijksta(1)\n \n \n #K보다 작은 동네 찾기\n for i in range(1,N+1):\n if distance[i]<=K:\n # print(i, distance[i])\n answer+=1\n\n\n\n return answer\n\nprint(solution(5,[[1,2,1],[2,3,3],[5,2,2],[1,4,2],[5,3,1],[5,4,2]], 3))",
"import heapq\n\n\ndef solution(N, road, K):\n answer = 0\n INF = int(1000000000.0)\n distance = [INF] * (N + 1)\n graph = [[] for _ in range(N + 1)]\n for a, b, c in road:\n graph[a].append((b, c))\n graph[b].append((a, c))\n\n def dijksta(start):\n q = []\n distance[start] = 0\n heapq.heappush(q, (0, start))\n while q:\n dist, now = heapq.heappop(q)\n if distance[now] < dist:\n continue\n for next in graph[now]:\n cost = dist + next[1]\n if cost < distance[next[0]]:\n distance[next[0]] = cost\n heapq.heappush(q, (cost, next[0]))\n dijksta(1)\n for i in range(1, N + 1):\n if distance[i] <= K:\n answer += 1\n return answer\n\n\nprint(solution(5, [[1, 2, 1], [2, 3, 3], [5, 2, 2], [1, 4, 2], [5, 3, 1], [\n 5, 4, 2]], 3))\n",
"<import token>\n\n\ndef solution(N, road, K):\n answer = 0\n INF = int(1000000000.0)\n distance = [INF] * (N + 1)\n graph = [[] for _ in range(N + 1)]\n for a, b, c in road:\n graph[a].append((b, c))\n graph[b].append((a, c))\n\n def dijksta(start):\n q = []\n distance[start] = 0\n heapq.heappush(q, (0, start))\n while q:\n dist, now = heapq.heappop(q)\n if distance[now] < dist:\n continue\n for next in graph[now]:\n cost = dist + next[1]\n if cost < distance[next[0]]:\n distance[next[0]] = cost\n heapq.heappush(q, (cost, next[0]))\n dijksta(1)\n for i in range(1, N + 1):\n if distance[i] <= K:\n answer += 1\n return answer\n\n\nprint(solution(5, [[1, 2, 1], [2, 3, 3], [5, 2, 2], [1, 4, 2], [5, 3, 1], [\n 5, 4, 2]], 3))\n",
"<import token>\n\n\ndef solution(N, road, K):\n answer = 0\n INF = int(1000000000.0)\n distance = [INF] * (N + 1)\n graph = [[] for _ in range(N + 1)]\n for a, b, c in road:\n graph[a].append((b, c))\n graph[b].append((a, c))\n\n def dijksta(start):\n q = []\n distance[start] = 0\n heapq.heappush(q, (0, start))\n while q:\n dist, now = heapq.heappop(q)\n if distance[now] < dist:\n continue\n for next in graph[now]:\n cost = dist + next[1]\n if cost < distance[next[0]]:\n distance[next[0]] = cost\n heapq.heappush(q, (cost, next[0]))\n dijksta(1)\n for i in range(1, N + 1):\n if distance[i] <= K:\n answer += 1\n return answer\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
99,364 |
fca40cd3d4312577eac1d79f372a4189c00bc3f8
|
from django.urls import path
import docapp.views as docapp_views
app_name = 'docapp'
urlpatterns = [
path('', docapp_views.index, name='index'),
]
|
[
"from django.urls import path\nimport docapp.views as docapp_views\n\napp_name = 'docapp'\n\nurlpatterns = [\n path('', docapp_views.index, name='index'),\n]\n",
"from django.urls import path\nimport docapp.views as docapp_views\napp_name = 'docapp'\nurlpatterns = [path('', docapp_views.index, name='index')]\n",
"<import token>\napp_name = 'docapp'\nurlpatterns = [path('', docapp_views.index, name='index')]\n",
"<import token>\n<assignment token>\n"
] | false |
99,365 |
2426bb5ff132db530ed70ff177ab334d06d77cb4
|
'''
- 20년 가을학기 분산병렬 프로그래밍
- 8장 멀티프로세싱
- multiprocessing.pipe() 사용하기
- 이호섭
- 프로세스간 통신을 위해 이름있는 파이프 생성
이름있는 파이프도 FIFO 구조이다.
단방향 통신이며 쌍방향(duplex)을 위해선 2개의 파이프를 만들어야함
프로세스가 종료되면 제거됨
- 익명 파이프와는 다르게 윈도우에서 실행 가능
'''
import multiprocessing
##
# name: ChildProcess(multiprocessing.Process)
# use: ChildProcess(conn=쓰기 파이프)
# role: 네임드 파이프에 무언가를 작성
# info: Custom Process
class ChildProcess(multiprocessing.Process):
def __init__(self, conn):
super(ChildProcess, self).__init__()
self.conn = conn
def run(self):
print("Attempting to pipein to pipe")
self.conn.send("My name is Hoseop")
# 파이프 close
self.conn.close()
def main():
conn1, conn2 = multiprocessing.Pipe()
child = ChildProcess(conn2)
child.start()
child.join()
pipContent = conn1.recv()
print("Pipe: {}".format(pipContent))
conn1.close()
if __name__ == '__main__':
main()
|
[
"'''\n - 20년 가을학기 분산병렬 프로그래밍\n - 8장 멀티프로세싱\n - multiprocessing.pipe() 사용하기\n - 이호섭\n - 프로세스간 통신을 위해 이름있는 파이프 생성\n 이름있는 파이프도 FIFO 구조이다.\n 단방향 통신이며 쌍방향(duplex)을 위해선 2개의 파이프를 만들어야함\n 프로세스가 종료되면 제거됨\n - 익명 파이프와는 다르게 윈도우에서 실행 가능\n'''\n\nimport multiprocessing\n\n\n##\n# name: ChildProcess(multiprocessing.Process)\n# use: ChildProcess(conn=쓰기 파이프)\n# role: 네임드 파이프에 무언가를 작성\n# info: Custom Process\nclass ChildProcess(multiprocessing.Process):\n\n def __init__(self, conn):\n super(ChildProcess, self).__init__()\n self.conn = conn\n\n def run(self):\n print(\"Attempting to pipein to pipe\")\n self.conn.send(\"My name is Hoseop\")\n # 파이프 close\n self.conn.close()\n\n\ndef main():\n conn1, conn2 = multiprocessing.Pipe()\n\n child = ChildProcess(conn2)\n child.start()\n child.join()\n\n pipContent = conn1.recv()\n print(\"Pipe: {}\".format(pipContent))\n\n conn1.close()\n\n\nif __name__ == '__main__':\n main()",
"<docstring token>\nimport multiprocessing\n\n\nclass ChildProcess(multiprocessing.Process):\n\n def __init__(self, conn):\n super(ChildProcess, self).__init__()\n self.conn = conn\n\n def run(self):\n print('Attempting to pipein to pipe')\n self.conn.send('My name is Hoseop')\n self.conn.close()\n\n\ndef main():\n conn1, conn2 = multiprocessing.Pipe()\n child = ChildProcess(conn2)\n child.start()\n child.join()\n pipContent = conn1.recv()\n print('Pipe: {}'.format(pipContent))\n conn1.close()\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\n\n\nclass ChildProcess(multiprocessing.Process):\n\n def __init__(self, conn):\n super(ChildProcess, self).__init__()\n self.conn = conn\n\n def run(self):\n print('Attempting to pipein to pipe')\n self.conn.send('My name is Hoseop')\n self.conn.close()\n\n\ndef main():\n conn1, conn2 = multiprocessing.Pipe()\n child = ChildProcess(conn2)\n child.start()\n child.join()\n pipContent = conn1.recv()\n print('Pipe: {}'.format(pipContent))\n conn1.close()\n\n\nif __name__ == '__main__':\n main()\n",
"<docstring token>\n<import token>\n\n\nclass ChildProcess(multiprocessing.Process):\n\n def __init__(self, conn):\n super(ChildProcess, self).__init__()\n self.conn = conn\n\n def run(self):\n print('Attempting to pipein to pipe')\n self.conn.send('My name is Hoseop')\n self.conn.close()\n\n\ndef main():\n conn1, conn2 = multiprocessing.Pipe()\n child = ChildProcess(conn2)\n child.start()\n child.join()\n pipContent = conn1.recv()\n print('Pipe: {}'.format(pipContent))\n conn1.close()\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass ChildProcess(multiprocessing.Process):\n\n def __init__(self, conn):\n super(ChildProcess, self).__init__()\n self.conn = conn\n\n def run(self):\n print('Attempting to pipein to pipe')\n self.conn.send('My name is Hoseop')\n self.conn.close()\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass ChildProcess(multiprocessing.Process):\n\n def __init__(self, conn):\n super(ChildProcess, self).__init__()\n self.conn = conn\n <function token>\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass ChildProcess(multiprocessing.Process):\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n<function token>\n<code token>\n"
] | false |
99,366 |
40cac895dcf9e091cfe6be9f92479cf9a253c395
|
from django.db import models
# Create your models here.
class City(models.Model):
label=models.CharField(max_length=250)
def __str__(self):
return self.label
class Area(models.Model):
label=models.CharField(max_length=250)
city_label=models.ForeignKey(City,on_delete=models.CASCADE)
def __str__(self):
return self.label+"\t"+self.city_label.label
class Veges(models.Model):
label=models.CharField(max_length=250)
price=models.IntegerField()
img=models.CharField(max_length=250)
def __str__(self):
return str(self.price)+"\t"+self.label
class Fruits(models.Model):
label=models.CharField(max_length=250)
price=models.IntegerField()
img=models.CharField(max_length=250)
def __str__(self):
return self.label
class farmDesciption(models.Model):
name=models.CharField(max_length=250)
description=models.CharField(max_length=500)
age=models.CharField(max_length=25)
contcNum=models.IntegerField()
land_owner=models.IntegerField()
area=models.CharField(max_length=250)
img=models.CharField(max_length=250)
def __str__(self):
return self.name
class fruitFarm(models.Model):
fruit=models.ForeignKey(Fruits,on_delete=models.CASCADE)
farm=models.ForeignKey(farmDesciption,on_delete=models.CASCADE)
def __str__(self):
return self.farm.name+self.farm.description+str(self.farm.age)+str(self.farm.contcNum)+self.farm.area+self.farm.img
class vegeFarm(models.Model):
vege=models.ForeignKey(Veges,on_delete=models.CASCADE)
farm=models.ForeignKey(farmDesciption,on_delete=models.CASCADE)
def __str__(self):
return self.farm.name+"\t"+self.farm.description+"\t"+str(self.farm.age)+"\t"+str(self.farm.contcNum)+"\t"+self.farm.area+"\t"+self.farm.img
class nutrientsVeges(models.Model):
vege=models.ForeignKey(Veges,on_delete=models.CASCADE)
carbohydrates=models.FloatField()
proteins=models.FloatField()
energy=models.FloatField()
fats = models.FloatField()
sugar = models.FloatField()
potassium = models.FloatField()
iron = models.FloatField()
calcium = models.FloatField()
def __str__(self):
return self.vege.label
class registeredUser(models.Model):
userName=models.CharField(max_length=400)
area=models.ForeignKey(Area,on_delete=models.CASCADE)
address=models.CharField(max_length=400)
contactNum=models.CharField(max_length=400)
email=models.CharField(max_length=250)
def __str__(self):
return self.userName+"\t"+self.area.label+"\t"
class orderDetails(models.Model):
user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)
orderID = models.IntegerField()
status = models.CharField(max_length=10)
cost = models.FloatField()
odate = models.DateField()
def __str__(self):
return self.user.userName+"\t"+self.status+"\t"+str(self.cost)
class fruitOrder(models.Model):
orderID=models.ForeignKey(orderDetails,on_delete=models.CASCADE)
fruit=models.ForeignKey(Fruits,on_delete=models.CASCADE)
def __str__(self):
return self.fruit.label+"\t"+str(self.orderID.orderID)
class vegeOrder(models.Model):
orderID=models.ForeignKey(orderDetails,on_delete=models.CASCADE)
vege=models.ForeignKey(Veges,on_delete=models.CASCADE)
class nutrientsFruits(models.Model):
fruit=models.ForeignKey(Fruits,on_delete=models.CASCADE)
carbohydrates=models.FloatField()
proteins=models.FloatField()
energy=models.FloatField()
fats = models.FloatField()
sugar = models.FloatField()
potassium = models.FloatField()
iron = models.FloatField()
calcium = models.FloatField()
def __str__(self):
return self.fruit.label
|
[
"from django.db import models\n\n# Create your models here.\nclass City(models.Model):\n label=models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass Area(models.Model):\n label=models.CharField(max_length=250)\n city_label=models.ForeignKey(City,on_delete=models.CASCADE)\n def __str__(self):\n return self.label+\"\\t\"+self.city_label.label\n\nclass Veges(models.Model):\n label=models.CharField(max_length=250)\n price=models.IntegerField()\n img=models.CharField(max_length=250)\n def __str__(self):\n return str(self.price)+\"\\t\"+self.label\n\nclass Fruits(models.Model):\n label=models.CharField(max_length=250)\n price=models.IntegerField()\n img=models.CharField(max_length=250)\n def __str__(self):\n return self.label\nclass farmDesciption(models.Model):\n name=models.CharField(max_length=250)\n description=models.CharField(max_length=500)\n age=models.CharField(max_length=25)\n contcNum=models.IntegerField()\n land_owner=models.IntegerField()\n area=models.CharField(max_length=250)\n img=models.CharField(max_length=250)\n def __str__(self):\n return self.name\n\nclass fruitFarm(models.Model):\n fruit=models.ForeignKey(Fruits,on_delete=models.CASCADE)\n farm=models.ForeignKey(farmDesciption,on_delete=models.CASCADE)\n def __str__(self):\n return self.farm.name+self.farm.description+str(self.farm.age)+str(self.farm.contcNum)+self.farm.area+self.farm.img\n\nclass vegeFarm(models.Model):\n vege=models.ForeignKey(Veges,on_delete=models.CASCADE)\n farm=models.ForeignKey(farmDesciption,on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name+\"\\t\"+self.farm.description+\"\\t\"+str(self.farm.age)+\"\\t\"+str(self.farm.contcNum)+\"\\t\"+self.farm.area+\"\\t\"+self.farm.img\n\nclass nutrientsVeges(models.Model):\n vege=models.ForeignKey(Veges,on_delete=models.CASCADE)\n carbohydrates=models.FloatField()\n proteins=models.FloatField()\n energy=models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n def __str__(self):\n return self.vege.label\n\nclass registeredUser(models.Model):\n userName=models.CharField(max_length=400)\n area=models.ForeignKey(Area,on_delete=models.CASCADE)\n address=models.CharField(max_length=400)\n contactNum=models.CharField(max_length=400)\n email=models.CharField(max_length=250)\n def __str__(self):\n return self.userName+\"\\t\"+self.area.label+\"\\t\"\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n def __str__(self):\n return self.user.userName+\"\\t\"+self.status+\"\\t\"+str(self.cost)\n\nclass fruitOrder(models.Model):\n orderID=models.ForeignKey(orderDetails,on_delete=models.CASCADE)\n fruit=models.ForeignKey(Fruits,on_delete=models.CASCADE)\n def __str__(self):\n return self.fruit.label+\"\\t\"+str(self.orderID.orderID)\n\nclass vegeOrder(models.Model):\n orderID=models.ForeignKey(orderDetails,on_delete=models.CASCADE)\n vege=models.ForeignKey(Veges,on_delete=models.CASCADE)\nclass nutrientsFruits(models.Model):\n fruit=models.ForeignKey(Fruits,on_delete=models.CASCADE)\n carbohydrates=models.FloatField()\n proteins=models.FloatField()\n energy=models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n def __str__(self):\n return self.fruit.label",
"from django.db import models\n\n\nclass City(models.Model):\n label = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass Area(models.Model):\n label = models.CharField(max_length=250)\n city_label = models.ForeignKey(City, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.label + '\\t' + self.city_label.label\n\n\nclass Veges(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return str(self.price) + '\\t' + self.label\n\n\nclass Fruits(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n\n\nclass City(models.Model):\n label = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass Area(models.Model):\n label = models.CharField(max_length=250)\n city_label = models.ForeignKey(City, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.label + '\\t' + self.city_label.label\n\n\nclass Veges(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return str(self.price) + '\\t' + self.label\n\n\nclass Fruits(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n\n\nclass City(models.Model):\n <assignment token>\n\n def __str__(self):\n return self.label\n\n\nclass Area(models.Model):\n label = models.CharField(max_length=250)\n city_label = models.ForeignKey(City, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.label + '\\t' + self.city_label.label\n\n\nclass Veges(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return str(self.price) + '\\t' + self.label\n\n\nclass Fruits(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n\n\nclass City(models.Model):\n <assignment token>\n <function token>\n\n\nclass Area(models.Model):\n label = models.CharField(max_length=250)\n city_label = models.ForeignKey(City, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.label + '\\t' + self.city_label.label\n\n\nclass Veges(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return str(self.price) + '\\t' + self.label\n\n\nclass Fruits(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n\n\nclass Area(models.Model):\n label = models.CharField(max_length=250)\n city_label = models.ForeignKey(City, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.label + '\\t' + self.city_label.label\n\n\nclass Veges(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return str(self.price) + '\\t' + self.label\n\n\nclass Fruits(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n\n\nclass Area(models.Model):\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.label + '\\t' + self.city_label.label\n\n\nclass Veges(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return str(self.price) + '\\t' + self.label\n\n\nclass Fruits(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n\n\nclass Area(models.Model):\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Veges(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return str(self.price) + '\\t' + self.label\n\n\nclass Fruits(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n\n\nclass Veges(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return str(self.price) + '\\t' + self.label\n\n\nclass Fruits(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n\n\nclass Veges(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return str(self.price) + '\\t' + self.label\n\n\nclass Fruits(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n\n\nclass Veges(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass Fruits(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass Fruits(models.Model):\n label = models.CharField(max_length=250)\n price = models.IntegerField()\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass Fruits(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.label\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n\n\nclass Fruits(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass farmDesciption(models.Model):\n name = models.CharField(max_length=250)\n description = models.CharField(max_length=500)\n age = models.CharField(max_length=25)\n contcNum = models.IntegerField()\n land_owner = models.IntegerField()\n area = models.CharField(max_length=250)\n img = models.CharField(max_length=250)\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass farmDesciption(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.name\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass farmDesciption(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass fruitFarm(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass fruitFarm(models.Model):\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.farm.name + self.farm.description + str(self.farm.age\n ) + str(self.farm.contcNum) + self.farm.area + self.farm.img\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass fruitFarm(models.Model):\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass vegeFarm(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n farm = models.ForeignKey(farmDesciption, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass vegeFarm(models.Model):\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.farm.name + '\\t' + self.farm.description + '\\t' + str(self\n .farm.age) + '\\t' + str(self.farm.contcNum\n ) + '\\t' + self.farm.area + '\\t' + self.farm.img\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass vegeFarm(models.Model):\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass nutrientsVeges(models.Model):\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass nutrientsVeges(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.vege.label\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass nutrientsVeges(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass registeredUser(models.Model):\n userName = models.CharField(max_length=400)\n area = models.ForeignKey(Area, on_delete=models.CASCADE)\n address = models.CharField(max_length=400)\n contactNum = models.CharField(max_length=400)\n email = models.CharField(max_length=250)\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass registeredUser(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.userName + '\\t' + self.area.label + '\\t'\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass registeredUser(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass orderDetails(models.Model):\n user = models.ForeignKey(registeredUser, on_delete=models.CASCADE)\n orderID = models.IntegerField()\n status = models.CharField(max_length=10)\n cost = models.FloatField()\n odate = models.DateField()\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass orderDetails(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.user.userName + '\\t' + self.status + '\\t' + str(self.cost)\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass orderDetails(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass fruitOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass fruitOrder(models.Model):\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.fruit.label + '\\t' + str(self.orderID.orderID)\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass fruitOrder(models.Model):\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass vegeOrder(models.Model):\n orderID = models.ForeignKey(orderDetails, on_delete=models.CASCADE)\n vege = models.ForeignKey(Veges, on_delete=models.CASCADE)\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass vegeOrder(models.Model):\n <assignment token>\n <assignment token>\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass nutrientsFruits(models.Model):\n fruit = models.ForeignKey(Fruits, on_delete=models.CASCADE)\n carbohydrates = models.FloatField()\n proteins = models.FloatField()\n energy = models.FloatField()\n fats = models.FloatField()\n sugar = models.FloatField()\n potassium = models.FloatField()\n iron = models.FloatField()\n calcium = models.FloatField()\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass nutrientsFruits(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __str__(self):\n return self.fruit.label\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass nutrientsFruits(models.Model):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
99,367 |
68694604463462af88d9a1f3a1d2f38de6e81228
|
import openmc
mats = openmc.Materials()
mat = openmc.Material(1)
mat.name = "36 wt% U-235"
mat.set_density('sum')
mat.add_nuclide('U234', 1.5272e-04)
mat.add_nuclide('U235', 1.7118e-02)
mat.add_nuclide('U238', 2.9211e-02)
mat.add_element('C', 7.7389e-04)
mat.add_element('Fe', 1.2058e-04)
mat.add_element('W', 1.0087e-05)
mat.add_element('Cu', 3.8133e-04)
mat.add_element('Ni', 4.1288e-04)
mats.append(mat)
mats.export_to_xml()
|
[
"import openmc\n\nmats = openmc.Materials()\n\nmat = openmc.Material(1)\nmat.name = \"36 wt% U-235\"\nmat.set_density('sum')\nmat.add_nuclide('U234', 1.5272e-04)\nmat.add_nuclide('U235', 1.7118e-02)\nmat.add_nuclide('U238', 2.9211e-02)\nmat.add_element('C', 7.7389e-04)\nmat.add_element('Fe', 1.2058e-04)\nmat.add_element('W', 1.0087e-05)\nmat.add_element('Cu', 3.8133e-04)\nmat.add_element('Ni', 4.1288e-04)\nmats.append(mat)\n\nmats.export_to_xml()\n",
"import openmc\nmats = openmc.Materials()\nmat = openmc.Material(1)\nmat.name = '36 wt% U-235'\nmat.set_density('sum')\nmat.add_nuclide('U234', 0.00015272)\nmat.add_nuclide('U235', 0.017118)\nmat.add_nuclide('U238', 0.029211)\nmat.add_element('C', 0.00077389)\nmat.add_element('Fe', 0.00012058)\nmat.add_element('W', 1.0087e-05)\nmat.add_element('Cu', 0.00038133)\nmat.add_element('Ni', 0.00041288)\nmats.append(mat)\nmats.export_to_xml()\n",
"<import token>\nmats = openmc.Materials()\nmat = openmc.Material(1)\nmat.name = '36 wt% U-235'\nmat.set_density('sum')\nmat.add_nuclide('U234', 0.00015272)\nmat.add_nuclide('U235', 0.017118)\nmat.add_nuclide('U238', 0.029211)\nmat.add_element('C', 0.00077389)\nmat.add_element('Fe', 0.00012058)\nmat.add_element('W', 1.0087e-05)\nmat.add_element('Cu', 0.00038133)\nmat.add_element('Ni', 0.00041288)\nmats.append(mat)\nmats.export_to_xml()\n",
"<import token>\n<assignment token>\nmat.set_density('sum')\nmat.add_nuclide('U234', 0.00015272)\nmat.add_nuclide('U235', 0.017118)\nmat.add_nuclide('U238', 0.029211)\nmat.add_element('C', 0.00077389)\nmat.add_element('Fe', 0.00012058)\nmat.add_element('W', 1.0087e-05)\nmat.add_element('Cu', 0.00038133)\nmat.add_element('Ni', 0.00041288)\nmats.append(mat)\nmats.export_to_xml()\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,368 |
84d41e727bd759661bb225c0fca7a0da6235dfa8
|
import foo
print("Hello world")
foo.test("bar")
|
[
"import foo\n\nprint(\"Hello world\")\nfoo.test(\"bar\")",
"import foo\nprint('Hello world')\nfoo.test('bar')\n",
"<import token>\nprint('Hello world')\nfoo.test('bar')\n",
"<import token>\n<code token>\n"
] | false |
99,369 |
bf4b70ab7d4df84e7c1ee82829fd7fe6bf3fe2f7
|
import torch.nn as nn
from wilds.common.metrics.loss import ElementwiseLoss, Loss, MultiTaskLoss
from wilds.common.metrics.all_metrics import MSE
def initialize_loss(config, d_out):
if config.loss_function == 'cross_entropy':
return ElementwiseLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))
elif config.loss_function == 'lm_cross_entropy':
return MultiTaskLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))
elif config.loss_function == 'mse':
return MSE(name='loss')
elif config.loss_function == 'multitask_bce':
return MultiTaskLoss(loss_fn=nn.BCEWithLogitsLoss(reduction='none'))
elif config.loss_function == 'fasterrcnn_criterion':
from models.detection.fasterrcnn import FasterRCNNLoss
return ElementwiseLoss(loss_fn=FasterRCNNLoss(config.device))
else:
raise ValueError(f'config.loss_function {config.loss_function} not recognized')
|
[
"import torch.nn as nn\r\nfrom wilds.common.metrics.loss import ElementwiseLoss, Loss, MultiTaskLoss\r\nfrom wilds.common.metrics.all_metrics import MSE\r\n\r\ndef initialize_loss(config, d_out):\r\n if config.loss_function == 'cross_entropy':\r\n return ElementwiseLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))\r\n\r\n elif config.loss_function == 'lm_cross_entropy':\r\n return MultiTaskLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))\r\n\r\n elif config.loss_function == 'mse':\r\n return MSE(name='loss')\r\n\r\n elif config.loss_function == 'multitask_bce':\r\n return MultiTaskLoss(loss_fn=nn.BCEWithLogitsLoss(reduction='none'))\r\n\r\n elif config.loss_function == 'fasterrcnn_criterion':\r\n from models.detection.fasterrcnn import FasterRCNNLoss\r\n return ElementwiseLoss(loss_fn=FasterRCNNLoss(config.device))\r\n\r\n else:\r\n raise ValueError(f'config.loss_function {config.loss_function} not recognized')\r\n",
"import torch.nn as nn\nfrom wilds.common.metrics.loss import ElementwiseLoss, Loss, MultiTaskLoss\nfrom wilds.common.metrics.all_metrics import MSE\n\n\ndef initialize_loss(config, d_out):\n if config.loss_function == 'cross_entropy':\n return ElementwiseLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))\n elif config.loss_function == 'lm_cross_entropy':\n return MultiTaskLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))\n elif config.loss_function == 'mse':\n return MSE(name='loss')\n elif config.loss_function == 'multitask_bce':\n return MultiTaskLoss(loss_fn=nn.BCEWithLogitsLoss(reduction='none'))\n elif config.loss_function == 'fasterrcnn_criterion':\n from models.detection.fasterrcnn import FasterRCNNLoss\n return ElementwiseLoss(loss_fn=FasterRCNNLoss(config.device))\n else:\n raise ValueError(\n f'config.loss_function {config.loss_function} not recognized')\n",
"<import token>\n\n\ndef initialize_loss(config, d_out):\n if config.loss_function == 'cross_entropy':\n return ElementwiseLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))\n elif config.loss_function == 'lm_cross_entropy':\n return MultiTaskLoss(loss_fn=nn.CrossEntropyLoss(reduction='none'))\n elif config.loss_function == 'mse':\n return MSE(name='loss')\n elif config.loss_function == 'multitask_bce':\n return MultiTaskLoss(loss_fn=nn.BCEWithLogitsLoss(reduction='none'))\n elif config.loss_function == 'fasterrcnn_criterion':\n from models.detection.fasterrcnn import FasterRCNNLoss\n return ElementwiseLoss(loss_fn=FasterRCNNLoss(config.device))\n else:\n raise ValueError(\n f'config.loss_function {config.loss_function} not recognized')\n",
"<import token>\n<function token>\n"
] | false |
99,370 |
42996c6fd047d0cd7c3ce3bffe005dcc5d610fa0
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file '../gui_design/postviewerdesign.ui'
#
# Created: Sun Jul 31 20:17:27 2016
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(411, 278)
self.verticalLayoutWidget = QtGui.QWidget(Dialog)
self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 411, 281))
self.verticalLayoutWidget.setObjectName(_fromUtf8("verticalLayoutWidget"))
self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.prevBTN = QtGui.QPushButton(self.verticalLayoutWidget)
self.prevBTN.setObjectName(_fromUtf8("prevBTN"))
self.horizontalLayout.addWidget(self.prevBTN)
self.imgButton = QtGui.QPushButton(self.verticalLayoutWidget)
self.imgButton.setObjectName(_fromUtf8("imgButton"))
self.horizontalLayout.addWidget(self.imgButton)
self.nextBTN = QtGui.QPushButton(self.verticalLayoutWidget)
self.nextBTN.setObjectName(_fromUtf8("nextBTN"))
self.horizontalLayout.addWidget(self.nextBTN)
self.verticalLayout.addLayout(self.horizontalLayout)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.emailBtn = QtGui.QPushButton(self.verticalLayoutWidget)
self.emailBtn.setObjectName(_fromUtf8("emailBtn"))
self.horizontalLayout_2.addWidget(self.emailBtn)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.textBrowser = QtGui.QTextBrowser(self.verticalLayoutWidget)
self.textBrowser.setObjectName(_fromUtf8("textBrowser"))
self.verticalLayout.addWidget(self.textBrowser)
self.buttonBox = QtGui.QDialogButtonBox(self.verticalLayoutWidget)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout.addWidget(self.buttonBox)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.prevBTN.setText(_translate("Dialog", "Prev", None))
self.imgButton.setText(_translate("Dialog", "Images", None))
self.nextBTN.setText(_translate("Dialog", "Next", None))
self.emailBtn.setText(_translate("Dialog", "Contact Me!", None))
|
[
"# -*- coding: utf-8 -*-\n\n# Form implementation generated from reading ui file '../gui_design/postviewerdesign.ui'\n#\n# Created: Sun Jul 31 20:17:27 2016\n# by: PyQt4 UI code generator 4.11.2\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n def _fromUtf8(s):\n return s\n\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\nclass Ui_Dialog(object):\n def setupUi(self, Dialog):\n Dialog.setObjectName(_fromUtf8(\"Dialog\"))\n Dialog.resize(411, 278)\n self.verticalLayoutWidget = QtGui.QWidget(Dialog)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 411, 281))\n self.verticalLayoutWidget.setObjectName(_fromUtf8(\"verticalLayoutWidget\"))\n self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setMargin(0)\n self.verticalLayout.setObjectName(_fromUtf8(\"verticalLayout\"))\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8(\"horizontalLayout\"))\n self.prevBTN = QtGui.QPushButton(self.verticalLayoutWidget)\n self.prevBTN.setObjectName(_fromUtf8(\"prevBTN\"))\n self.horizontalLayout.addWidget(self.prevBTN)\n self.imgButton = QtGui.QPushButton(self.verticalLayoutWidget)\n self.imgButton.setObjectName(_fromUtf8(\"imgButton\"))\n self.horizontalLayout.addWidget(self.imgButton)\n self.nextBTN = QtGui.QPushButton(self.verticalLayoutWidget)\n self.nextBTN.setObjectName(_fromUtf8(\"nextBTN\"))\n self.horizontalLayout.addWidget(self.nextBTN)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(_fromUtf8(\"horizontalLayout_2\"))\n self.emailBtn = QtGui.QPushButton(self.verticalLayoutWidget)\n self.emailBtn.setObjectName(_fromUtf8(\"emailBtn\"))\n self.horizontalLayout_2.addWidget(self.emailBtn)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.textBrowser = QtGui.QTextBrowser(self.verticalLayoutWidget)\n self.textBrowser.setObjectName(_fromUtf8(\"textBrowser\"))\n self.verticalLayout.addWidget(self.textBrowser)\n self.buttonBox = QtGui.QDialogButtonBox(self.verticalLayoutWidget)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8(\"buttonBox\"))\n self.verticalLayout.addWidget(self.buttonBox)\n\n self.retranslateUi(Dialog)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"accepted()\")), Dialog.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\"rejected()\")), Dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n Dialog.setWindowTitle(_translate(\"Dialog\", \"Dialog\", None))\n self.prevBTN.setText(_translate(\"Dialog\", \"Prev\", None))\n self.imgButton.setText(_translate(\"Dialog\", \"Images\", None))\n self.nextBTN.setText(_translate(\"Dialog\", \"Next\", None))\n self.emailBtn.setText(_translate(\"Dialog\", \"Contact Me!\", None))\n\n",
"from PyQt4 import QtCore, QtGui\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n\n def _fromUtf8(s):\n return s\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName(_fromUtf8('Dialog'))\n Dialog.resize(411, 278)\n self.verticalLayoutWidget = QtGui.QWidget(Dialog)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 411, 281))\n self.verticalLayoutWidget.setObjectName(_fromUtf8(\n 'verticalLayoutWidget'))\n self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setMargin(0)\n self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))\n self.prevBTN = QtGui.QPushButton(self.verticalLayoutWidget)\n self.prevBTN.setObjectName(_fromUtf8('prevBTN'))\n self.horizontalLayout.addWidget(self.prevBTN)\n self.imgButton = QtGui.QPushButton(self.verticalLayoutWidget)\n self.imgButton.setObjectName(_fromUtf8('imgButton'))\n self.horizontalLayout.addWidget(self.imgButton)\n self.nextBTN = QtGui.QPushButton(self.verticalLayoutWidget)\n self.nextBTN.setObjectName(_fromUtf8('nextBTN'))\n self.horizontalLayout.addWidget(self.nextBTN)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(_fromUtf8('horizontalLayout_2'))\n self.emailBtn = QtGui.QPushButton(self.verticalLayoutWidget)\n self.emailBtn.setObjectName(_fromUtf8('emailBtn'))\n self.horizontalLayout_2.addWidget(self.emailBtn)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.textBrowser = QtGui.QTextBrowser(self.verticalLayoutWidget)\n self.textBrowser.setObjectName(_fromUtf8('textBrowser'))\n self.verticalLayout.addWidget(self.textBrowser)\n self.buttonBox = QtGui.QDialogButtonBox(self.verticalLayoutWidget)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel |\n QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8('buttonBox'))\n self.verticalLayout.addWidget(self.buttonBox)\n self.retranslateUi(Dialog)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\n 'accepted()')), Dialog.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\n 'rejected()')), Dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n Dialog.setWindowTitle(_translate('Dialog', 'Dialog', None))\n self.prevBTN.setText(_translate('Dialog', 'Prev', None))\n self.imgButton.setText(_translate('Dialog', 'Images', None))\n self.nextBTN.setText(_translate('Dialog', 'Next', None))\n self.emailBtn.setText(_translate('Dialog', 'Contact Me!', None))\n",
"<import token>\ntry:\n _fromUtf8 = QtCore.QString.fromUtf8\nexcept AttributeError:\n\n def _fromUtf8(s):\n return s\ntry:\n _encoding = QtGui.QApplication.UnicodeUTF8\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig, _encoding)\nexcept AttributeError:\n\n def _translate(context, text, disambig):\n return QtGui.QApplication.translate(context, text, disambig)\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName(_fromUtf8('Dialog'))\n Dialog.resize(411, 278)\n self.verticalLayoutWidget = QtGui.QWidget(Dialog)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 411, 281))\n self.verticalLayoutWidget.setObjectName(_fromUtf8(\n 'verticalLayoutWidget'))\n self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setMargin(0)\n self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))\n self.prevBTN = QtGui.QPushButton(self.verticalLayoutWidget)\n self.prevBTN.setObjectName(_fromUtf8('prevBTN'))\n self.horizontalLayout.addWidget(self.prevBTN)\n self.imgButton = QtGui.QPushButton(self.verticalLayoutWidget)\n self.imgButton.setObjectName(_fromUtf8('imgButton'))\n self.horizontalLayout.addWidget(self.imgButton)\n self.nextBTN = QtGui.QPushButton(self.verticalLayoutWidget)\n self.nextBTN.setObjectName(_fromUtf8('nextBTN'))\n self.horizontalLayout.addWidget(self.nextBTN)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(_fromUtf8('horizontalLayout_2'))\n self.emailBtn = QtGui.QPushButton(self.verticalLayoutWidget)\n self.emailBtn.setObjectName(_fromUtf8('emailBtn'))\n self.horizontalLayout_2.addWidget(self.emailBtn)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.textBrowser = QtGui.QTextBrowser(self.verticalLayoutWidget)\n self.textBrowser.setObjectName(_fromUtf8('textBrowser'))\n self.verticalLayout.addWidget(self.textBrowser)\n self.buttonBox = QtGui.QDialogButtonBox(self.verticalLayoutWidget)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel |\n QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8('buttonBox'))\n self.verticalLayout.addWidget(self.buttonBox)\n self.retranslateUi(Dialog)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\n 'accepted()')), Dialog.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\n 'rejected()')), Dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n Dialog.setWindowTitle(_translate('Dialog', 'Dialog', None))\n self.prevBTN.setText(_translate('Dialog', 'Prev', None))\n self.imgButton.setText(_translate('Dialog', 'Images', None))\n self.nextBTN.setText(_translate('Dialog', 'Next', None))\n self.emailBtn.setText(_translate('Dialog', 'Contact Me!', None))\n",
"<import token>\n<code token>\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName(_fromUtf8('Dialog'))\n Dialog.resize(411, 278)\n self.verticalLayoutWidget = QtGui.QWidget(Dialog)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 411, 281))\n self.verticalLayoutWidget.setObjectName(_fromUtf8(\n 'verticalLayoutWidget'))\n self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setMargin(0)\n self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))\n self.prevBTN = QtGui.QPushButton(self.verticalLayoutWidget)\n self.prevBTN.setObjectName(_fromUtf8('prevBTN'))\n self.horizontalLayout.addWidget(self.prevBTN)\n self.imgButton = QtGui.QPushButton(self.verticalLayoutWidget)\n self.imgButton.setObjectName(_fromUtf8('imgButton'))\n self.horizontalLayout.addWidget(self.imgButton)\n self.nextBTN = QtGui.QPushButton(self.verticalLayoutWidget)\n self.nextBTN.setObjectName(_fromUtf8('nextBTN'))\n self.horizontalLayout.addWidget(self.nextBTN)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(_fromUtf8('horizontalLayout_2'))\n self.emailBtn = QtGui.QPushButton(self.verticalLayoutWidget)\n self.emailBtn.setObjectName(_fromUtf8('emailBtn'))\n self.horizontalLayout_2.addWidget(self.emailBtn)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.textBrowser = QtGui.QTextBrowser(self.verticalLayoutWidget)\n self.textBrowser.setObjectName(_fromUtf8('textBrowser'))\n self.verticalLayout.addWidget(self.textBrowser)\n self.buttonBox = QtGui.QDialogButtonBox(self.verticalLayoutWidget)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel |\n QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8('buttonBox'))\n self.verticalLayout.addWidget(self.buttonBox)\n self.retranslateUi(Dialog)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\n 'accepted()')), Dialog.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\n 'rejected()')), Dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n\n def retranslateUi(self, Dialog):\n Dialog.setWindowTitle(_translate('Dialog', 'Dialog', None))\n self.prevBTN.setText(_translate('Dialog', 'Prev', None))\n self.imgButton.setText(_translate('Dialog', 'Images', None))\n self.nextBTN.setText(_translate('Dialog', 'Next', None))\n self.emailBtn.setText(_translate('Dialog', 'Contact Me!', None))\n",
"<import token>\n<code token>\n\n\nclass Ui_Dialog(object):\n\n def setupUi(self, Dialog):\n Dialog.setObjectName(_fromUtf8('Dialog'))\n Dialog.resize(411, 278)\n self.verticalLayoutWidget = QtGui.QWidget(Dialog)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(0, 0, 411, 281))\n self.verticalLayoutWidget.setObjectName(_fromUtf8(\n 'verticalLayoutWidget'))\n self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setMargin(0)\n self.verticalLayout.setObjectName(_fromUtf8('verticalLayout'))\n self.horizontalLayout = QtGui.QHBoxLayout()\n self.horizontalLayout.setObjectName(_fromUtf8('horizontalLayout'))\n self.prevBTN = QtGui.QPushButton(self.verticalLayoutWidget)\n self.prevBTN.setObjectName(_fromUtf8('prevBTN'))\n self.horizontalLayout.addWidget(self.prevBTN)\n self.imgButton = QtGui.QPushButton(self.verticalLayoutWidget)\n self.imgButton.setObjectName(_fromUtf8('imgButton'))\n self.horizontalLayout.addWidget(self.imgButton)\n self.nextBTN = QtGui.QPushButton(self.verticalLayoutWidget)\n self.nextBTN.setObjectName(_fromUtf8('nextBTN'))\n self.horizontalLayout.addWidget(self.nextBTN)\n self.verticalLayout.addLayout(self.horizontalLayout)\n self.horizontalLayout_2 = QtGui.QHBoxLayout()\n self.horizontalLayout_2.setObjectName(_fromUtf8('horizontalLayout_2'))\n self.emailBtn = QtGui.QPushButton(self.verticalLayoutWidget)\n self.emailBtn.setObjectName(_fromUtf8('emailBtn'))\n self.horizontalLayout_2.addWidget(self.emailBtn)\n self.verticalLayout.addLayout(self.horizontalLayout_2)\n self.textBrowser = QtGui.QTextBrowser(self.verticalLayoutWidget)\n self.textBrowser.setObjectName(_fromUtf8('textBrowser'))\n self.verticalLayout.addWidget(self.textBrowser)\n self.buttonBox = QtGui.QDialogButtonBox(self.verticalLayoutWidget)\n self.buttonBox.setOrientation(QtCore.Qt.Horizontal)\n self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel |\n QtGui.QDialogButtonBox.Ok)\n self.buttonBox.setObjectName(_fromUtf8('buttonBox'))\n self.verticalLayout.addWidget(self.buttonBox)\n self.retranslateUi(Dialog)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\n 'accepted()')), Dialog.accept)\n QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8(\n 'rejected()')), Dialog.reject)\n QtCore.QMetaObject.connectSlotsByName(Dialog)\n <function token>\n",
"<import token>\n<code token>\n\n\nclass Ui_Dialog(object):\n <function token>\n <function token>\n",
"<import token>\n<code token>\n<class token>\n"
] | false |
99,371 |
3d4c3648e9381d01f8412d48af252edb6631ad00
|
from rest_framework import serializers
from .models import Priority
class PrioritySerializer(serializers.ModelSerializer):
class Meta:
model = Priority
fields = '__all__'
|
[
"from rest_framework import serializers\nfrom .models import Priority\n\nclass PrioritySerializer(serializers.ModelSerializer):\n class Meta:\n model = Priority\n fields = '__all__'",
"from rest_framework import serializers\nfrom .models import Priority\n\n\nclass PrioritySerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Priority\n fields = '__all__'\n",
"<import token>\n\n\nclass PrioritySerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = Priority\n fields = '__all__'\n",
"<import token>\n<class token>\n"
] | false |
99,372 |
a751c191c8b17cb5a6ef09ce686ee3f8f4e996d1
|
#!/usr/bin/env python3
import argparse
import logging
from io import BytesIO
from pathlib import Path
from typing import Optional, Tuple
import humanfriendly
import kaldiio
import numpy as np
import resampy
import soundfile
from tqdm import tqdm
from typeguard import check_argument_types
from espnet2.fileio.read_text import read_2columns_text
from espnet2.fileio.sound_scp import SoundScpWriter, soundfile_read
from espnet2.fileio.vad_scp import VADScpReader
from espnet2.utils.types import str2bool
from espnet.utils.cli_utils import get_commandline_args
def humanfriendly_or_none(value: str):
if value in ("none", "None", "NONE"):
return None
return humanfriendly.parse_size(value)
def str2int_tuple(integers: str) -> Optional[Tuple[int, ...]]:
"""
>>> str2int_tuple('3,4,5')
(3, 4, 5)
"""
assert check_argument_types()
if integers.strip() in ("none", "None", "NONE", "null", "Null", "NULL"):
return None
return tuple(map(int, integers.strip().split(",")))
def vad_trim(vad_reader: VADScpReader, uttid: str, wav: np.array, fs: int) -> np.array:
# Conduct trim wtih vad information
assert check_argument_types()
assert uttid in vad_reader, uttid
vad_info = vad_reader[uttid]
total_length = sum(int((time[1] - time[0]) * fs) for time in vad_info)
new_wav = np.zeros((total_length,), dtype=wav.dtype)
start_frame = 0
for time in vad_info:
# Note: we regard vad as [xxx, yyy)
duration = int((time[1] - time[0]) * fs)
orig_start_frame = int(time[0] * fs)
orig_end_frame = orig_start_frame + duration
end_frame = start_frame + duration
new_wav[start_frame:end_frame] = wav[orig_start_frame:orig_end_frame]
start_frame = end_frame
return new_wav
class SegmentsExtractor:
"""Emulating kaldi extract-segments.cc
Args:
segments (str): The file format is
"<segment-id> <recording-id> <start-time> <end-time>\n"
"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n"
"""
def __init__(self, fname: str, segments: str = None, multi_columns: bool = False):
assert check_argument_types()
self.wav_scp = fname
self.multi_columns = multi_columns
self.wav_dict = {}
with open(self.wav_scp, "r") as f:
for line in f:
recodeid, wavpath = line.strip().split(None, 1)
if recodeid in self.wav_dict:
raise RuntimeError(f"{recodeid} is duplicated")
self.wav_dict[recodeid] = wavpath
self.segments = segments
self.segments_dict = {}
with open(self.segments, "r") as f:
for line in f:
sps = line.rstrip().split(None)
if len(sps) != 4:
raise RuntimeError("Format is invalid: {}".format(line))
uttid, recodeid, st, et = sps
self.segments_dict[uttid] = (recodeid, float(st), float(et))
if recodeid not in self.wav_dict:
raise RuntimeError(
'Not found "{}" in {}'.format(recodeid, self.wav_scp)
)
def generator(self):
recodeid_counter = {}
for utt, (recodeid, st, et) in self.segments_dict.items():
recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1
cached = {}
for utt, (recodeid, st, et) in self.segments_dict.items():
wavpath = self.wav_dict[recodeid]
if recodeid not in cached:
if wavpath.endswith("|"):
if self.multi_columns:
raise RuntimeError(
"Not supporting multi_columns wav.scp for inputs by pipe"
)
# Streaming input e.g. cat a.wav |
with kaldiio.open_like_kaldi(wavpath, "rb") as f:
with BytesIO(f.read()) as g:
array, rate = soundfile.read(g)
else:
if self.multi_columns:
array, rate = soundfile_read(
wavs=wavpath.split(),
dtype=None,
always_2d=False,
concat_axis=1,
)
else:
array, rate = soundfile.read(wavpath)
cached[recodeid] = array, rate
array, rate = cached[recodeid]
# Keep array until the last query
recodeid_counter[recodeid] -= 1
if recodeid_counter[recodeid] == 0:
cached.pop(recodeid)
# Convert starting time of the segment to corresponding sample number.
# If end time is -1 then use the whole file starting from start time.
if et != -1:
array = array[int(st * rate) : int(et * rate)]
else:
array = array[int(st * rate) :]
yield utt, (array, rate), None, None
def main():
logfmt = "%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"
logging.basicConfig(level=logging.INFO, format=logfmt)
logging.info(get_commandline_args())
parser = argparse.ArgumentParser(
description='Create waves list from "wav.scp"',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("scp")
parser.add_argument("outdir")
parser.add_argument(
"--name",
default="wav",
help='Specify the prefix word of output file name such as "wav.scp"',
)
parser.add_argument("--segments", default=None)
parser.add_argument(
"--fs",
type=humanfriendly_or_none,
default=None,
help="If the sampling rate specified, Change the sampling rate.",
)
parser.add_argument("--audio-format", default="wav")
parser.add_argument("--vad_based_trim", type=str, default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument("--ref-channels", default=None, type=str2int_tuple)
group.add_argument("--utt2ref-channels", default=None, type=str)
group.add_argument(
"--audio-subtype",
default=None,
type=str,
help=(
"Give a interpretable subtype by soundfile e.g. PCM_16. "
"You can check all available types by soundfile.available_subtypes()"
),
)
parser.add_argument(
"--multi-columns-input",
type=str2bool,
default=False,
help=(
"Enable multi columns mode for input wav.scp. "
"e.g. 'ID a.wav b.wav c.wav' is interpreted as 3ch audio data"
),
)
parser.add_argument(
"--multi-columns-output",
type=str2bool,
default=False,
help=(
"Enable multi columns mode for output wav.scp. "
"e.g. If input audio data has 2ch, "
"each line in wav.scp has the the format like "
"'ID ID-CH0.wav ID-CH1.wav'"
),
)
args = parser.parse_args()
out_num_samples = Path(args.outdir) / "utt2num_samples"
if args.ref_channels is not None:
def utt2ref_channels(x) -> Tuple[int, ...]:
return args.ref_channels
elif args.utt2ref_channels is not None:
utt2ref_channels_dict = read_2columns_text(args.utt2ref_channels)
def utt2ref_channels(x, d=utt2ref_channels_dict) -> Tuple[int, ...]:
chs_str = d[x]
return tuple(map(int, chs_str.split()))
else:
utt2ref_channels = None
if args.audio_format.endswith("ark") and args.multi_columns_output:
raise RuntimeError("Multi columns wav.scp is not supported for ark type")
Path(args.outdir).mkdir(parents=True, exist_ok=True)
out_wavscp = Path(args.outdir) / f"{args.name}.scp"
if args.audio_format.endswith("ark"):
fark = open(Path(args.outdir) / f"data_{args.name}.ark", "wb")
fscp_out = out_wavscp.open("w")
writer = None
else:
writer = SoundScpWriter(
args.outdir,
out_wavscp,
format=args.audio_format,
multi_columns=args.multi_columns_output,
subtype=args.audio_subtype,
)
fscp_out = None
if args.vad_based_trim is not None:
vad_reader = VADScpReader(args.vad_based_trim)
if args.segments is not None:
extractor = SegmentsExtractor(
args.scp, segments=args.segments, multi_columns=args.multi_columns_input
)
generator = extractor.generator
else:
def generator():
with Path(args.scp).open("r") as fscp:
for line in tqdm(fscp):
uttid, wavpath = line.strip().split(None, 1)
# B.a. Without segments and using pipe inputs
if wavpath.endswith("|"):
if args.multi_columns_input:
raise RuntimeError(
"Not supporting multi_columns wav.scp for inputs by"
" pipe"
)
# Streaming input e.g. cat a.wav |
with kaldiio.open_like_kaldi(wavpath, "rb") as f:
with BytesIO(f.read()) as g:
wave, rate = soundfile.read(g)
subtypes = None
# B.b Without segments and not using pipe
else:
if args.multi_columns_input:
wave, rate, subtypes = soundfile_read(
wavs=wavpath.split(),
dtype=None,
always_2d=False,
concat_axis=1,
return_subtype=True,
)
else:
with soundfile.SoundFile(wavpath) as sf:
rate = sf.samplerate
subtypes = [sf.subtype]
wave = sf.read()
yield uttid, (wave, rate), wavpath, subtypes
with out_num_samples.open("w") as fnum_samples:
for uttid, (wave, rate), wavpath, subtypes in tqdm(generator()):
save_asis = True
if args.fs is not None and args.fs != rate:
# FIXME(kamo): To use sox?
wave = resampy.resample(wave, rate, args.fs, axis=0)
rate = args.fs
save_asis = False
if args.vad_based_trim is not None:
wave = vad_trim(vad_reader, uttid, wave, rate)
save_asis = False
if wave.ndim == 2 and utt2ref_channels is not None:
wave = wave[:, utt2ref_channels(uttid)]
save_asis = False
if args.segments is not None:
save_asis = False
if args.audio_format.endswith("ark"):
save_asis = False
if args.multi_columns_input:
if args.multi_columns_output:
if wavpath is not None:
for _wavpath in wavpath.split():
if Path(_wavpath).suffix != "." + args.audio_format:
save_asis = False
break
if wave.ndim == 1:
_num_ch = 1
else:
_num_ch = wave.shape[1]
if len(wavpath.split()) != _num_ch:
save_asis = False
else:
if wavpath is not None and len(wavpath.split()) > 1:
save_asis = False
elif args.multi_columns_output:
if wave.ndim == 2 and wave.shape[1] > 1:
save_asis = False
if wavpath is not None and wavpath.endswith("|"):
save_asis = False
if wavpath is not None and Path(wavpath).suffix != "." + args.audio_format:
save_asis = False
if not args.audio_format.endswith("ark") and subtypes is not None:
if args.audio_subtype is None:
subtype2 = soundfile.default_subtype(args.audio_format)
else:
subtype2 = args.audio_subtype
for subtype in subtypes:
if subtype != subtype2:
save_asis = False
break
if save_asis:
writer.fscp.write(f"{uttid} {wavpath}\n")
elif args.audio_format.endswith("ark"):
for name in soundfile.available_formats():
if name.lower() in args.audio_format.lower():
suf = name.lower()
break
else:
raise RuntimeError(f"{args.audio_format} is not supported.")
# NOTE(kamo): Using extended ark format style here.
# This format is incompatible with Kaldi
kaldiio.save_ark(
fark,
{uttid: (wave, rate)},
scp=fscp_out,
append=True,
write_function="soundfile",
write_kwargs={"format": suf, "subtype": args.audio_subtype},
)
else:
writer[uttid] = rate, wave
fnum_samples.write(f"{uttid} {len(wave)}\n")
if __name__ == "__main__":
main()
|
[
"#!/usr/bin/env python3\nimport argparse\nimport logging\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import Optional, Tuple\n\nimport humanfriendly\nimport kaldiio\nimport numpy as np\nimport resampy\nimport soundfile\nfrom tqdm import tqdm\nfrom typeguard import check_argument_types\n\nfrom espnet2.fileio.read_text import read_2columns_text\nfrom espnet2.fileio.sound_scp import SoundScpWriter, soundfile_read\nfrom espnet2.fileio.vad_scp import VADScpReader\nfrom espnet2.utils.types import str2bool\nfrom espnet.utils.cli_utils import get_commandline_args\n\n\ndef humanfriendly_or_none(value: str):\n if value in (\"none\", \"None\", \"NONE\"):\n return None\n return humanfriendly.parse_size(value)\n\n\ndef str2int_tuple(integers: str) -> Optional[Tuple[int, ...]]:\n \"\"\"\n\n >>> str2int_tuple('3,4,5')\n (3, 4, 5)\n\n \"\"\"\n assert check_argument_types()\n if integers.strip() in (\"none\", \"None\", \"NONE\", \"null\", \"Null\", \"NULL\"):\n return None\n return tuple(map(int, integers.strip().split(\",\")))\n\n\ndef vad_trim(vad_reader: VADScpReader, uttid: str, wav: np.array, fs: int) -> np.array:\n # Conduct trim wtih vad information\n\n assert check_argument_types()\n assert uttid in vad_reader, uttid\n\n vad_info = vad_reader[uttid]\n total_length = sum(int((time[1] - time[0]) * fs) for time in vad_info)\n new_wav = np.zeros((total_length,), dtype=wav.dtype)\n start_frame = 0\n for time in vad_info:\n # Note: we regard vad as [xxx, yyy)\n duration = int((time[1] - time[0]) * fs)\n orig_start_frame = int(time[0] * fs)\n orig_end_frame = orig_start_frame + duration\n\n end_frame = start_frame + duration\n new_wav[start_frame:end_frame] = wav[orig_start_frame:orig_end_frame]\n\n start_frame = end_frame\n\n return new_wav\n\n\nclass SegmentsExtractor:\n \"\"\"Emulating kaldi extract-segments.cc\n\n Args:\n segments (str): The file format is\n \"<segment-id> <recording-id> <start-time> <end-time>\\n\"\n \"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\\n\"\n \"\"\"\n\n def __init__(self, fname: str, segments: str = None, multi_columns: bool = False):\n assert check_argument_types()\n self.wav_scp = fname\n self.multi_columns = multi_columns\n self.wav_dict = {}\n with open(self.wav_scp, \"r\") as f:\n for line in f:\n recodeid, wavpath = line.strip().split(None, 1)\n if recodeid in self.wav_dict:\n raise RuntimeError(f\"{recodeid} is duplicated\")\n self.wav_dict[recodeid] = wavpath\n\n self.segments = segments\n self.segments_dict = {}\n with open(self.segments, \"r\") as f:\n for line in f:\n sps = line.rstrip().split(None)\n if len(sps) != 4:\n raise RuntimeError(\"Format is invalid: {}\".format(line))\n uttid, recodeid, st, et = sps\n self.segments_dict[uttid] = (recodeid, float(st), float(et))\n\n if recodeid not in self.wav_dict:\n raise RuntimeError(\n 'Not found \"{}\" in {}'.format(recodeid, self.wav_scp)\n )\n\n def generator(self):\n recodeid_counter = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1\n\n cached = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n wavpath = self.wav_dict[recodeid]\n if recodeid not in cached:\n if wavpath.endswith(\"|\"):\n if self.multi_columns:\n raise RuntimeError(\n \"Not supporting multi_columns wav.scp for inputs by pipe\"\n )\n # Streaming input e.g. cat a.wav |\n with kaldiio.open_like_kaldi(wavpath, \"rb\") as f:\n with BytesIO(f.read()) as g:\n array, rate = soundfile.read(g)\n\n else:\n if self.multi_columns:\n array, rate = soundfile_read(\n wavs=wavpath.split(),\n dtype=None,\n always_2d=False,\n concat_axis=1,\n )\n else:\n array, rate = soundfile.read(wavpath)\n cached[recodeid] = array, rate\n\n array, rate = cached[recodeid]\n # Keep array until the last query\n recodeid_counter[recodeid] -= 1\n if recodeid_counter[recodeid] == 0:\n cached.pop(recodeid)\n # Convert starting time of the segment to corresponding sample number.\n # If end time is -1 then use the whole file starting from start time.\n if et != -1:\n array = array[int(st * rate) : int(et * rate)]\n else:\n array = array[int(st * rate) :]\n\n yield utt, (array, rate), None, None\n\n\ndef main():\n logfmt = \"%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s\"\n logging.basicConfig(level=logging.INFO, format=logfmt)\n logging.info(get_commandline_args())\n\n parser = argparse.ArgumentParser(\n description='Create waves list from \"wav.scp\"',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n parser.add_argument(\"scp\")\n parser.add_argument(\"outdir\")\n parser.add_argument(\n \"--name\",\n default=\"wav\",\n help='Specify the prefix word of output file name such as \"wav.scp\"',\n )\n parser.add_argument(\"--segments\", default=None)\n parser.add_argument(\n \"--fs\",\n type=humanfriendly_or_none,\n default=None,\n help=\"If the sampling rate specified, Change the sampling rate.\",\n )\n parser.add_argument(\"--audio-format\", default=\"wav\")\n parser.add_argument(\"--vad_based_trim\", type=str, default=None)\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\"--ref-channels\", default=None, type=str2int_tuple)\n group.add_argument(\"--utt2ref-channels\", default=None, type=str)\n group.add_argument(\n \"--audio-subtype\",\n default=None,\n type=str,\n help=(\n \"Give a interpretable subtype by soundfile e.g. PCM_16. \"\n \"You can check all available types by soundfile.available_subtypes()\"\n ),\n )\n parser.add_argument(\n \"--multi-columns-input\",\n type=str2bool,\n default=False,\n help=(\n \"Enable multi columns mode for input wav.scp. \"\n \"e.g. 'ID a.wav b.wav c.wav' is interpreted as 3ch audio data\"\n ),\n )\n parser.add_argument(\n \"--multi-columns-output\",\n type=str2bool,\n default=False,\n help=(\n \"Enable multi columns mode for output wav.scp. \"\n \"e.g. If input audio data has 2ch, \"\n \"each line in wav.scp has the the format like \"\n \"'ID ID-CH0.wav ID-CH1.wav'\"\n ),\n )\n args = parser.parse_args()\n\n out_num_samples = Path(args.outdir) / \"utt2num_samples\"\n\n if args.ref_channels is not None:\n\n def utt2ref_channels(x) -> Tuple[int, ...]:\n return args.ref_channels\n\n elif args.utt2ref_channels is not None:\n utt2ref_channels_dict = read_2columns_text(args.utt2ref_channels)\n\n def utt2ref_channels(x, d=utt2ref_channels_dict) -> Tuple[int, ...]:\n chs_str = d[x]\n return tuple(map(int, chs_str.split()))\n\n else:\n utt2ref_channels = None\n\n if args.audio_format.endswith(\"ark\") and args.multi_columns_output:\n raise RuntimeError(\"Multi columns wav.scp is not supported for ark type\")\n\n Path(args.outdir).mkdir(parents=True, exist_ok=True)\n out_wavscp = Path(args.outdir) / f\"{args.name}.scp\"\n\n if args.audio_format.endswith(\"ark\"):\n fark = open(Path(args.outdir) / f\"data_{args.name}.ark\", \"wb\")\n fscp_out = out_wavscp.open(\"w\")\n writer = None\n else:\n writer = SoundScpWriter(\n args.outdir,\n out_wavscp,\n format=args.audio_format,\n multi_columns=args.multi_columns_output,\n subtype=args.audio_subtype,\n )\n fscp_out = None\n\n if args.vad_based_trim is not None:\n vad_reader = VADScpReader(args.vad_based_trim)\n\n if args.segments is not None:\n extractor = SegmentsExtractor(\n args.scp, segments=args.segments, multi_columns=args.multi_columns_input\n )\n generator = extractor.generator\n\n else:\n\n def generator():\n with Path(args.scp).open(\"r\") as fscp:\n for line in tqdm(fscp):\n uttid, wavpath = line.strip().split(None, 1)\n\n # B.a. Without segments and using pipe inputs\n if wavpath.endswith(\"|\"):\n if args.multi_columns_input:\n raise RuntimeError(\n \"Not supporting multi_columns wav.scp for inputs by\"\n \" pipe\"\n )\n # Streaming input e.g. cat a.wav |\n with kaldiio.open_like_kaldi(wavpath, \"rb\") as f:\n with BytesIO(f.read()) as g:\n wave, rate = soundfile.read(g)\n subtypes = None\n\n # B.b Without segments and not using pipe\n else:\n if args.multi_columns_input:\n wave, rate, subtypes = soundfile_read(\n wavs=wavpath.split(),\n dtype=None,\n always_2d=False,\n concat_axis=1,\n return_subtype=True,\n )\n else:\n with soundfile.SoundFile(wavpath) as sf:\n rate = sf.samplerate\n subtypes = [sf.subtype]\n wave = sf.read()\n yield uttid, (wave, rate), wavpath, subtypes\n\n with out_num_samples.open(\"w\") as fnum_samples:\n for uttid, (wave, rate), wavpath, subtypes in tqdm(generator()):\n save_asis = True\n if args.fs is not None and args.fs != rate:\n # FIXME(kamo): To use sox?\n wave = resampy.resample(wave, rate, args.fs, axis=0)\n rate = args.fs\n save_asis = False\n\n if args.vad_based_trim is not None:\n wave = vad_trim(vad_reader, uttid, wave, rate)\n save_asis = False\n\n if wave.ndim == 2 and utt2ref_channels is not None:\n wave = wave[:, utt2ref_channels(uttid)]\n save_asis = False\n\n if args.segments is not None:\n save_asis = False\n\n if args.audio_format.endswith(\"ark\"):\n save_asis = False\n\n if args.multi_columns_input:\n if args.multi_columns_output:\n if wavpath is not None:\n for _wavpath in wavpath.split():\n if Path(_wavpath).suffix != \".\" + args.audio_format:\n save_asis = False\n break\n\n if wave.ndim == 1:\n _num_ch = 1\n else:\n _num_ch = wave.shape[1]\n if len(wavpath.split()) != _num_ch:\n save_asis = False\n else:\n if wavpath is not None and len(wavpath.split()) > 1:\n save_asis = False\n\n elif args.multi_columns_output:\n if wave.ndim == 2 and wave.shape[1] > 1:\n save_asis = False\n\n if wavpath is not None and wavpath.endswith(\"|\"):\n save_asis = False\n if wavpath is not None and Path(wavpath).suffix != \".\" + args.audio_format:\n save_asis = False\n\n if not args.audio_format.endswith(\"ark\") and subtypes is not None:\n if args.audio_subtype is None:\n subtype2 = soundfile.default_subtype(args.audio_format)\n else:\n subtype2 = args.audio_subtype\n for subtype in subtypes:\n if subtype != subtype2:\n save_asis = False\n break\n\n if save_asis:\n writer.fscp.write(f\"{uttid} {wavpath}\\n\")\n\n elif args.audio_format.endswith(\"ark\"):\n for name in soundfile.available_formats():\n if name.lower() in args.audio_format.lower():\n suf = name.lower()\n break\n else:\n raise RuntimeError(f\"{args.audio_format} is not supported.\")\n\n # NOTE(kamo): Using extended ark format style here.\n # This format is incompatible with Kaldi\n kaldiio.save_ark(\n fark,\n {uttid: (wave, rate)},\n scp=fscp_out,\n append=True,\n write_function=\"soundfile\",\n write_kwargs={\"format\": suf, \"subtype\": args.audio_subtype},\n )\n\n else:\n writer[uttid] = rate, wave\n fnum_samples.write(f\"{uttid} {len(wave)}\\n\")\n\n\nif __name__ == \"__main__\":\n main()\n",
"import argparse\nimport logging\nfrom io import BytesIO\nfrom pathlib import Path\nfrom typing import Optional, Tuple\nimport humanfriendly\nimport kaldiio\nimport numpy as np\nimport resampy\nimport soundfile\nfrom tqdm import tqdm\nfrom typeguard import check_argument_types\nfrom espnet2.fileio.read_text import read_2columns_text\nfrom espnet2.fileio.sound_scp import SoundScpWriter, soundfile_read\nfrom espnet2.fileio.vad_scp import VADScpReader\nfrom espnet2.utils.types import str2bool\nfrom espnet.utils.cli_utils import get_commandline_args\n\n\ndef humanfriendly_or_none(value: str):\n if value in ('none', 'None', 'NONE'):\n return None\n return humanfriendly.parse_size(value)\n\n\ndef str2int_tuple(integers: str) ->Optional[Tuple[int, ...]]:\n \"\"\"\n\n >>> str2int_tuple('3,4,5')\n (3, 4, 5)\n\n \"\"\"\n assert check_argument_types()\n if integers.strip() in ('none', 'None', 'NONE', 'null', 'Null', 'NULL'):\n return None\n return tuple(map(int, integers.strip().split(',')))\n\n\ndef vad_trim(vad_reader: VADScpReader, uttid: str, wav: np.array, fs: int\n ) ->np.array:\n assert check_argument_types()\n assert uttid in vad_reader, uttid\n vad_info = vad_reader[uttid]\n total_length = sum(int((time[1] - time[0]) * fs) for time in vad_info)\n new_wav = np.zeros((total_length,), dtype=wav.dtype)\n start_frame = 0\n for time in vad_info:\n duration = int((time[1] - time[0]) * fs)\n orig_start_frame = int(time[0] * fs)\n orig_end_frame = orig_start_frame + duration\n end_frame = start_frame + duration\n new_wav[start_frame:end_frame] = wav[orig_start_frame:orig_end_frame]\n start_frame = end_frame\n return new_wav\n\n\nclass SegmentsExtractor:\n \"\"\"Emulating kaldi extract-segments.cc\n\n Args:\n segments (str): The file format is\n \"<segment-id> <recording-id> <start-time> <end-time>\n\"\n \"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n\"\n \"\"\"\n\n def __init__(self, fname: str, segments: str=None, multi_columns: bool=\n False):\n assert check_argument_types()\n self.wav_scp = fname\n self.multi_columns = multi_columns\n self.wav_dict = {}\n with open(self.wav_scp, 'r') as f:\n for line in f:\n recodeid, wavpath = line.strip().split(None, 1)\n if recodeid in self.wav_dict:\n raise RuntimeError(f'{recodeid} is duplicated')\n self.wav_dict[recodeid] = wavpath\n self.segments = segments\n self.segments_dict = {}\n with open(self.segments, 'r') as f:\n for line in f:\n sps = line.rstrip().split(None)\n if len(sps) != 4:\n raise RuntimeError('Format is invalid: {}'.format(line))\n uttid, recodeid, st, et = sps\n self.segments_dict[uttid] = recodeid, float(st), float(et)\n if recodeid not in self.wav_dict:\n raise RuntimeError('Not found \"{}\" in {}'.format(\n recodeid, self.wav_scp))\n\n def generator(self):\n recodeid_counter = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1\n cached = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n wavpath = self.wav_dict[recodeid]\n if recodeid not in cached:\n if wavpath.endswith('|'):\n if self.multi_columns:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n array, rate = soundfile.read(g)\n elif self.multi_columns:\n array, rate = soundfile_read(wavs=wavpath.split(),\n dtype=None, always_2d=False, concat_axis=1)\n else:\n array, rate = soundfile.read(wavpath)\n cached[recodeid] = array, rate\n array, rate = cached[recodeid]\n recodeid_counter[recodeid] -= 1\n if recodeid_counter[recodeid] == 0:\n cached.pop(recodeid)\n if et != -1:\n array = array[int(st * rate):int(et * rate)]\n else:\n array = array[int(st * rate):]\n yield utt, (array, rate), None, None\n\n\ndef main():\n logfmt = '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s'\n logging.basicConfig(level=logging.INFO, format=logfmt)\n logging.info(get_commandline_args())\n parser = argparse.ArgumentParser(description=\n 'Create waves list from \"wav.scp\"', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n parser.add_argument('scp')\n parser.add_argument('outdir')\n parser.add_argument('--name', default='wav', help=\n 'Specify the prefix word of output file name such as \"wav.scp\"')\n parser.add_argument('--segments', default=None)\n parser.add_argument('--fs', type=humanfriendly_or_none, default=None,\n help='If the sampling rate specified, Change the sampling rate.')\n parser.add_argument('--audio-format', default='wav')\n parser.add_argument('--vad_based_trim', type=str, default=None)\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--ref-channels', default=None, type=str2int_tuple)\n group.add_argument('--utt2ref-channels', default=None, type=str)\n group.add_argument('--audio-subtype', default=None, type=str, help=\n 'Give a interpretable subtype by soundfile e.g. PCM_16. You can check all available types by soundfile.available_subtypes()'\n )\n parser.add_argument('--multi-columns-input', type=str2bool, default=\n False, help=\n \"Enable multi columns mode for input wav.scp. e.g. 'ID a.wav b.wav c.wav' is interpreted as 3ch audio data\"\n )\n parser.add_argument('--multi-columns-output', type=str2bool, default=\n False, help=\n \"Enable multi columns mode for output wav.scp. e.g. If input audio data has 2ch, each line in wav.scp has the the format like 'ID ID-CH0.wav ID-CH1.wav'\"\n )\n args = parser.parse_args()\n out_num_samples = Path(args.outdir) / 'utt2num_samples'\n if args.ref_channels is not None:\n\n def utt2ref_channels(x) ->Tuple[int, ...]:\n return args.ref_channels\n elif args.utt2ref_channels is not None:\n utt2ref_channels_dict = read_2columns_text(args.utt2ref_channels)\n\n def utt2ref_channels(x, d=utt2ref_channels_dict) ->Tuple[int, ...]:\n chs_str = d[x]\n return tuple(map(int, chs_str.split()))\n else:\n utt2ref_channels = None\n if args.audio_format.endswith('ark') and args.multi_columns_output:\n raise RuntimeError(\n 'Multi columns wav.scp is not supported for ark type')\n Path(args.outdir).mkdir(parents=True, exist_ok=True)\n out_wavscp = Path(args.outdir) / f'{args.name}.scp'\n if args.audio_format.endswith('ark'):\n fark = open(Path(args.outdir) / f'data_{args.name}.ark', 'wb')\n fscp_out = out_wavscp.open('w')\n writer = None\n else:\n writer = SoundScpWriter(args.outdir, out_wavscp, format=args.\n audio_format, multi_columns=args.multi_columns_output, subtype=\n args.audio_subtype)\n fscp_out = None\n if args.vad_based_trim is not None:\n vad_reader = VADScpReader(args.vad_based_trim)\n if args.segments is not None:\n extractor = SegmentsExtractor(args.scp, segments=args.segments,\n multi_columns=args.multi_columns_input)\n generator = extractor.generator\n else:\n\n def generator():\n with Path(args.scp).open('r') as fscp:\n for line in tqdm(fscp):\n uttid, wavpath = line.strip().split(None, 1)\n if wavpath.endswith('|'):\n if args.multi_columns_input:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n wave, rate = soundfile.read(g)\n subtypes = None\n elif args.multi_columns_input:\n wave, rate, subtypes = soundfile_read(wavs=wavpath.\n split(), dtype=None, always_2d=False,\n concat_axis=1, return_subtype=True)\n else:\n with soundfile.SoundFile(wavpath) as sf:\n rate = sf.samplerate\n subtypes = [sf.subtype]\n wave = sf.read()\n yield uttid, (wave, rate), wavpath, subtypes\n with out_num_samples.open('w') as fnum_samples:\n for uttid, (wave, rate), wavpath, subtypes in tqdm(generator()):\n save_asis = True\n if args.fs is not None and args.fs != rate:\n wave = resampy.resample(wave, rate, args.fs, axis=0)\n rate = args.fs\n save_asis = False\n if args.vad_based_trim is not None:\n wave = vad_trim(vad_reader, uttid, wave, rate)\n save_asis = False\n if wave.ndim == 2 and utt2ref_channels is not None:\n wave = wave[:, utt2ref_channels(uttid)]\n save_asis = False\n if args.segments is not None:\n save_asis = False\n if args.audio_format.endswith('ark'):\n save_asis = False\n if args.multi_columns_input:\n if args.multi_columns_output:\n if wavpath is not None:\n for _wavpath in wavpath.split():\n if Path(_wavpath\n ).suffix != '.' + args.audio_format:\n save_asis = False\n break\n if wave.ndim == 1:\n _num_ch = 1\n else:\n _num_ch = wave.shape[1]\n if len(wavpath.split()) != _num_ch:\n save_asis = False\n elif wavpath is not None and len(wavpath.split()) > 1:\n save_asis = False\n elif args.multi_columns_output:\n if wave.ndim == 2 and wave.shape[1] > 1:\n save_asis = False\n if wavpath is not None and wavpath.endswith('|'):\n save_asis = False\n if wavpath is not None and Path(wavpath\n ).suffix != '.' + args.audio_format:\n save_asis = False\n if not args.audio_format.endswith('ark') and subtypes is not None:\n if args.audio_subtype is None:\n subtype2 = soundfile.default_subtype(args.audio_format)\n else:\n subtype2 = args.audio_subtype\n for subtype in subtypes:\n if subtype != subtype2:\n save_asis = False\n break\n if save_asis:\n writer.fscp.write(f'{uttid} {wavpath}\\n')\n elif args.audio_format.endswith('ark'):\n for name in soundfile.available_formats():\n if name.lower() in args.audio_format.lower():\n suf = name.lower()\n break\n else:\n raise RuntimeError(f'{args.audio_format} is not supported.'\n )\n kaldiio.save_ark(fark, {uttid: (wave, rate)}, scp=fscp_out,\n append=True, write_function='soundfile', write_kwargs={\n 'format': suf, 'subtype': args.audio_subtype})\n else:\n writer[uttid] = rate, wave\n fnum_samples.write(f'{uttid} {len(wave)}\\n')\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\ndef humanfriendly_or_none(value: str):\n if value in ('none', 'None', 'NONE'):\n return None\n return humanfriendly.parse_size(value)\n\n\ndef str2int_tuple(integers: str) ->Optional[Tuple[int, ...]]:\n \"\"\"\n\n >>> str2int_tuple('3,4,5')\n (3, 4, 5)\n\n \"\"\"\n assert check_argument_types()\n if integers.strip() in ('none', 'None', 'NONE', 'null', 'Null', 'NULL'):\n return None\n return tuple(map(int, integers.strip().split(',')))\n\n\ndef vad_trim(vad_reader: VADScpReader, uttid: str, wav: np.array, fs: int\n ) ->np.array:\n assert check_argument_types()\n assert uttid in vad_reader, uttid\n vad_info = vad_reader[uttid]\n total_length = sum(int((time[1] - time[0]) * fs) for time in vad_info)\n new_wav = np.zeros((total_length,), dtype=wav.dtype)\n start_frame = 0\n for time in vad_info:\n duration = int((time[1] - time[0]) * fs)\n orig_start_frame = int(time[0] * fs)\n orig_end_frame = orig_start_frame + duration\n end_frame = start_frame + duration\n new_wav[start_frame:end_frame] = wav[orig_start_frame:orig_end_frame]\n start_frame = end_frame\n return new_wav\n\n\nclass SegmentsExtractor:\n \"\"\"Emulating kaldi extract-segments.cc\n\n Args:\n segments (str): The file format is\n \"<segment-id> <recording-id> <start-time> <end-time>\n\"\n \"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n\"\n \"\"\"\n\n def __init__(self, fname: str, segments: str=None, multi_columns: bool=\n False):\n assert check_argument_types()\n self.wav_scp = fname\n self.multi_columns = multi_columns\n self.wav_dict = {}\n with open(self.wav_scp, 'r') as f:\n for line in f:\n recodeid, wavpath = line.strip().split(None, 1)\n if recodeid in self.wav_dict:\n raise RuntimeError(f'{recodeid} is duplicated')\n self.wav_dict[recodeid] = wavpath\n self.segments = segments\n self.segments_dict = {}\n with open(self.segments, 'r') as f:\n for line in f:\n sps = line.rstrip().split(None)\n if len(sps) != 4:\n raise RuntimeError('Format is invalid: {}'.format(line))\n uttid, recodeid, st, et = sps\n self.segments_dict[uttid] = recodeid, float(st), float(et)\n if recodeid not in self.wav_dict:\n raise RuntimeError('Not found \"{}\" in {}'.format(\n recodeid, self.wav_scp))\n\n def generator(self):\n recodeid_counter = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1\n cached = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n wavpath = self.wav_dict[recodeid]\n if recodeid not in cached:\n if wavpath.endswith('|'):\n if self.multi_columns:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n array, rate = soundfile.read(g)\n elif self.multi_columns:\n array, rate = soundfile_read(wavs=wavpath.split(),\n dtype=None, always_2d=False, concat_axis=1)\n else:\n array, rate = soundfile.read(wavpath)\n cached[recodeid] = array, rate\n array, rate = cached[recodeid]\n recodeid_counter[recodeid] -= 1\n if recodeid_counter[recodeid] == 0:\n cached.pop(recodeid)\n if et != -1:\n array = array[int(st * rate):int(et * rate)]\n else:\n array = array[int(st * rate):]\n yield utt, (array, rate), None, None\n\n\ndef main():\n logfmt = '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s'\n logging.basicConfig(level=logging.INFO, format=logfmt)\n logging.info(get_commandline_args())\n parser = argparse.ArgumentParser(description=\n 'Create waves list from \"wav.scp\"', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n parser.add_argument('scp')\n parser.add_argument('outdir')\n parser.add_argument('--name', default='wav', help=\n 'Specify the prefix word of output file name such as \"wav.scp\"')\n parser.add_argument('--segments', default=None)\n parser.add_argument('--fs', type=humanfriendly_or_none, default=None,\n help='If the sampling rate specified, Change the sampling rate.')\n parser.add_argument('--audio-format', default='wav')\n parser.add_argument('--vad_based_trim', type=str, default=None)\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--ref-channels', default=None, type=str2int_tuple)\n group.add_argument('--utt2ref-channels', default=None, type=str)\n group.add_argument('--audio-subtype', default=None, type=str, help=\n 'Give a interpretable subtype by soundfile e.g. PCM_16. You can check all available types by soundfile.available_subtypes()'\n )\n parser.add_argument('--multi-columns-input', type=str2bool, default=\n False, help=\n \"Enable multi columns mode for input wav.scp. e.g. 'ID a.wav b.wav c.wav' is interpreted as 3ch audio data\"\n )\n parser.add_argument('--multi-columns-output', type=str2bool, default=\n False, help=\n \"Enable multi columns mode for output wav.scp. e.g. If input audio data has 2ch, each line in wav.scp has the the format like 'ID ID-CH0.wav ID-CH1.wav'\"\n )\n args = parser.parse_args()\n out_num_samples = Path(args.outdir) / 'utt2num_samples'\n if args.ref_channels is not None:\n\n def utt2ref_channels(x) ->Tuple[int, ...]:\n return args.ref_channels\n elif args.utt2ref_channels is not None:\n utt2ref_channels_dict = read_2columns_text(args.utt2ref_channels)\n\n def utt2ref_channels(x, d=utt2ref_channels_dict) ->Tuple[int, ...]:\n chs_str = d[x]\n return tuple(map(int, chs_str.split()))\n else:\n utt2ref_channels = None\n if args.audio_format.endswith('ark') and args.multi_columns_output:\n raise RuntimeError(\n 'Multi columns wav.scp is not supported for ark type')\n Path(args.outdir).mkdir(parents=True, exist_ok=True)\n out_wavscp = Path(args.outdir) / f'{args.name}.scp'\n if args.audio_format.endswith('ark'):\n fark = open(Path(args.outdir) / f'data_{args.name}.ark', 'wb')\n fscp_out = out_wavscp.open('w')\n writer = None\n else:\n writer = SoundScpWriter(args.outdir, out_wavscp, format=args.\n audio_format, multi_columns=args.multi_columns_output, subtype=\n args.audio_subtype)\n fscp_out = None\n if args.vad_based_trim is not None:\n vad_reader = VADScpReader(args.vad_based_trim)\n if args.segments is not None:\n extractor = SegmentsExtractor(args.scp, segments=args.segments,\n multi_columns=args.multi_columns_input)\n generator = extractor.generator\n else:\n\n def generator():\n with Path(args.scp).open('r') as fscp:\n for line in tqdm(fscp):\n uttid, wavpath = line.strip().split(None, 1)\n if wavpath.endswith('|'):\n if args.multi_columns_input:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n wave, rate = soundfile.read(g)\n subtypes = None\n elif args.multi_columns_input:\n wave, rate, subtypes = soundfile_read(wavs=wavpath.\n split(), dtype=None, always_2d=False,\n concat_axis=1, return_subtype=True)\n else:\n with soundfile.SoundFile(wavpath) as sf:\n rate = sf.samplerate\n subtypes = [sf.subtype]\n wave = sf.read()\n yield uttid, (wave, rate), wavpath, subtypes\n with out_num_samples.open('w') as fnum_samples:\n for uttid, (wave, rate), wavpath, subtypes in tqdm(generator()):\n save_asis = True\n if args.fs is not None and args.fs != rate:\n wave = resampy.resample(wave, rate, args.fs, axis=0)\n rate = args.fs\n save_asis = False\n if args.vad_based_trim is not None:\n wave = vad_trim(vad_reader, uttid, wave, rate)\n save_asis = False\n if wave.ndim == 2 and utt2ref_channels is not None:\n wave = wave[:, utt2ref_channels(uttid)]\n save_asis = False\n if args.segments is not None:\n save_asis = False\n if args.audio_format.endswith('ark'):\n save_asis = False\n if args.multi_columns_input:\n if args.multi_columns_output:\n if wavpath is not None:\n for _wavpath in wavpath.split():\n if Path(_wavpath\n ).suffix != '.' + args.audio_format:\n save_asis = False\n break\n if wave.ndim == 1:\n _num_ch = 1\n else:\n _num_ch = wave.shape[1]\n if len(wavpath.split()) != _num_ch:\n save_asis = False\n elif wavpath is not None and len(wavpath.split()) > 1:\n save_asis = False\n elif args.multi_columns_output:\n if wave.ndim == 2 and wave.shape[1] > 1:\n save_asis = False\n if wavpath is not None and wavpath.endswith('|'):\n save_asis = False\n if wavpath is not None and Path(wavpath\n ).suffix != '.' + args.audio_format:\n save_asis = False\n if not args.audio_format.endswith('ark') and subtypes is not None:\n if args.audio_subtype is None:\n subtype2 = soundfile.default_subtype(args.audio_format)\n else:\n subtype2 = args.audio_subtype\n for subtype in subtypes:\n if subtype != subtype2:\n save_asis = False\n break\n if save_asis:\n writer.fscp.write(f'{uttid} {wavpath}\\n')\n elif args.audio_format.endswith('ark'):\n for name in soundfile.available_formats():\n if name.lower() in args.audio_format.lower():\n suf = name.lower()\n break\n else:\n raise RuntimeError(f'{args.audio_format} is not supported.'\n )\n kaldiio.save_ark(fark, {uttid: (wave, rate)}, scp=fscp_out,\n append=True, write_function='soundfile', write_kwargs={\n 'format': suf, 'subtype': args.audio_subtype})\n else:\n writer[uttid] = rate, wave\n fnum_samples.write(f'{uttid} {len(wave)}\\n')\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\ndef humanfriendly_or_none(value: str):\n if value in ('none', 'None', 'NONE'):\n return None\n return humanfriendly.parse_size(value)\n\n\ndef str2int_tuple(integers: str) ->Optional[Tuple[int, ...]]:\n \"\"\"\n\n >>> str2int_tuple('3,4,5')\n (3, 4, 5)\n\n \"\"\"\n assert check_argument_types()\n if integers.strip() in ('none', 'None', 'NONE', 'null', 'Null', 'NULL'):\n return None\n return tuple(map(int, integers.strip().split(',')))\n\n\ndef vad_trim(vad_reader: VADScpReader, uttid: str, wav: np.array, fs: int\n ) ->np.array:\n assert check_argument_types()\n assert uttid in vad_reader, uttid\n vad_info = vad_reader[uttid]\n total_length = sum(int((time[1] - time[0]) * fs) for time in vad_info)\n new_wav = np.zeros((total_length,), dtype=wav.dtype)\n start_frame = 0\n for time in vad_info:\n duration = int((time[1] - time[0]) * fs)\n orig_start_frame = int(time[0] * fs)\n orig_end_frame = orig_start_frame + duration\n end_frame = start_frame + duration\n new_wav[start_frame:end_frame] = wav[orig_start_frame:orig_end_frame]\n start_frame = end_frame\n return new_wav\n\n\nclass SegmentsExtractor:\n \"\"\"Emulating kaldi extract-segments.cc\n\n Args:\n segments (str): The file format is\n \"<segment-id> <recording-id> <start-time> <end-time>\n\"\n \"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n\"\n \"\"\"\n\n def __init__(self, fname: str, segments: str=None, multi_columns: bool=\n False):\n assert check_argument_types()\n self.wav_scp = fname\n self.multi_columns = multi_columns\n self.wav_dict = {}\n with open(self.wav_scp, 'r') as f:\n for line in f:\n recodeid, wavpath = line.strip().split(None, 1)\n if recodeid in self.wav_dict:\n raise RuntimeError(f'{recodeid} is duplicated')\n self.wav_dict[recodeid] = wavpath\n self.segments = segments\n self.segments_dict = {}\n with open(self.segments, 'r') as f:\n for line in f:\n sps = line.rstrip().split(None)\n if len(sps) != 4:\n raise RuntimeError('Format is invalid: {}'.format(line))\n uttid, recodeid, st, et = sps\n self.segments_dict[uttid] = recodeid, float(st), float(et)\n if recodeid not in self.wav_dict:\n raise RuntimeError('Not found \"{}\" in {}'.format(\n recodeid, self.wav_scp))\n\n def generator(self):\n recodeid_counter = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1\n cached = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n wavpath = self.wav_dict[recodeid]\n if recodeid not in cached:\n if wavpath.endswith('|'):\n if self.multi_columns:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n array, rate = soundfile.read(g)\n elif self.multi_columns:\n array, rate = soundfile_read(wavs=wavpath.split(),\n dtype=None, always_2d=False, concat_axis=1)\n else:\n array, rate = soundfile.read(wavpath)\n cached[recodeid] = array, rate\n array, rate = cached[recodeid]\n recodeid_counter[recodeid] -= 1\n if recodeid_counter[recodeid] == 0:\n cached.pop(recodeid)\n if et != -1:\n array = array[int(st * rate):int(et * rate)]\n else:\n array = array[int(st * rate):]\n yield utt, (array, rate), None, None\n\n\ndef main():\n logfmt = '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s'\n logging.basicConfig(level=logging.INFO, format=logfmt)\n logging.info(get_commandline_args())\n parser = argparse.ArgumentParser(description=\n 'Create waves list from \"wav.scp\"', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n parser.add_argument('scp')\n parser.add_argument('outdir')\n parser.add_argument('--name', default='wav', help=\n 'Specify the prefix word of output file name such as \"wav.scp\"')\n parser.add_argument('--segments', default=None)\n parser.add_argument('--fs', type=humanfriendly_or_none, default=None,\n help='If the sampling rate specified, Change the sampling rate.')\n parser.add_argument('--audio-format', default='wav')\n parser.add_argument('--vad_based_trim', type=str, default=None)\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--ref-channels', default=None, type=str2int_tuple)\n group.add_argument('--utt2ref-channels', default=None, type=str)\n group.add_argument('--audio-subtype', default=None, type=str, help=\n 'Give a interpretable subtype by soundfile e.g. PCM_16. You can check all available types by soundfile.available_subtypes()'\n )\n parser.add_argument('--multi-columns-input', type=str2bool, default=\n False, help=\n \"Enable multi columns mode for input wav.scp. e.g. 'ID a.wav b.wav c.wav' is interpreted as 3ch audio data\"\n )\n parser.add_argument('--multi-columns-output', type=str2bool, default=\n False, help=\n \"Enable multi columns mode for output wav.scp. e.g. If input audio data has 2ch, each line in wav.scp has the the format like 'ID ID-CH0.wav ID-CH1.wav'\"\n )\n args = parser.parse_args()\n out_num_samples = Path(args.outdir) / 'utt2num_samples'\n if args.ref_channels is not None:\n\n def utt2ref_channels(x) ->Tuple[int, ...]:\n return args.ref_channels\n elif args.utt2ref_channels is not None:\n utt2ref_channels_dict = read_2columns_text(args.utt2ref_channels)\n\n def utt2ref_channels(x, d=utt2ref_channels_dict) ->Tuple[int, ...]:\n chs_str = d[x]\n return tuple(map(int, chs_str.split()))\n else:\n utt2ref_channels = None\n if args.audio_format.endswith('ark') and args.multi_columns_output:\n raise RuntimeError(\n 'Multi columns wav.scp is not supported for ark type')\n Path(args.outdir).mkdir(parents=True, exist_ok=True)\n out_wavscp = Path(args.outdir) / f'{args.name}.scp'\n if args.audio_format.endswith('ark'):\n fark = open(Path(args.outdir) / f'data_{args.name}.ark', 'wb')\n fscp_out = out_wavscp.open('w')\n writer = None\n else:\n writer = SoundScpWriter(args.outdir, out_wavscp, format=args.\n audio_format, multi_columns=args.multi_columns_output, subtype=\n args.audio_subtype)\n fscp_out = None\n if args.vad_based_trim is not None:\n vad_reader = VADScpReader(args.vad_based_trim)\n if args.segments is not None:\n extractor = SegmentsExtractor(args.scp, segments=args.segments,\n multi_columns=args.multi_columns_input)\n generator = extractor.generator\n else:\n\n def generator():\n with Path(args.scp).open('r') as fscp:\n for line in tqdm(fscp):\n uttid, wavpath = line.strip().split(None, 1)\n if wavpath.endswith('|'):\n if args.multi_columns_input:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n wave, rate = soundfile.read(g)\n subtypes = None\n elif args.multi_columns_input:\n wave, rate, subtypes = soundfile_read(wavs=wavpath.\n split(), dtype=None, always_2d=False,\n concat_axis=1, return_subtype=True)\n else:\n with soundfile.SoundFile(wavpath) as sf:\n rate = sf.samplerate\n subtypes = [sf.subtype]\n wave = sf.read()\n yield uttid, (wave, rate), wavpath, subtypes\n with out_num_samples.open('w') as fnum_samples:\n for uttid, (wave, rate), wavpath, subtypes in tqdm(generator()):\n save_asis = True\n if args.fs is not None and args.fs != rate:\n wave = resampy.resample(wave, rate, args.fs, axis=0)\n rate = args.fs\n save_asis = False\n if args.vad_based_trim is not None:\n wave = vad_trim(vad_reader, uttid, wave, rate)\n save_asis = False\n if wave.ndim == 2 and utt2ref_channels is not None:\n wave = wave[:, utt2ref_channels(uttid)]\n save_asis = False\n if args.segments is not None:\n save_asis = False\n if args.audio_format.endswith('ark'):\n save_asis = False\n if args.multi_columns_input:\n if args.multi_columns_output:\n if wavpath is not None:\n for _wavpath in wavpath.split():\n if Path(_wavpath\n ).suffix != '.' + args.audio_format:\n save_asis = False\n break\n if wave.ndim == 1:\n _num_ch = 1\n else:\n _num_ch = wave.shape[1]\n if len(wavpath.split()) != _num_ch:\n save_asis = False\n elif wavpath is not None and len(wavpath.split()) > 1:\n save_asis = False\n elif args.multi_columns_output:\n if wave.ndim == 2 and wave.shape[1] > 1:\n save_asis = False\n if wavpath is not None and wavpath.endswith('|'):\n save_asis = False\n if wavpath is not None and Path(wavpath\n ).suffix != '.' + args.audio_format:\n save_asis = False\n if not args.audio_format.endswith('ark') and subtypes is not None:\n if args.audio_subtype is None:\n subtype2 = soundfile.default_subtype(args.audio_format)\n else:\n subtype2 = args.audio_subtype\n for subtype in subtypes:\n if subtype != subtype2:\n save_asis = False\n break\n if save_asis:\n writer.fscp.write(f'{uttid} {wavpath}\\n')\n elif args.audio_format.endswith('ark'):\n for name in soundfile.available_formats():\n if name.lower() in args.audio_format.lower():\n suf = name.lower()\n break\n else:\n raise RuntimeError(f'{args.audio_format} is not supported.'\n )\n kaldiio.save_ark(fark, {uttid: (wave, rate)}, scp=fscp_out,\n append=True, write_function='soundfile', write_kwargs={\n 'format': suf, 'subtype': args.audio_subtype})\n else:\n writer[uttid] = rate, wave\n fnum_samples.write(f'{uttid} {len(wave)}\\n')\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef str2int_tuple(integers: str) ->Optional[Tuple[int, ...]]:\n \"\"\"\n\n >>> str2int_tuple('3,4,5')\n (3, 4, 5)\n\n \"\"\"\n assert check_argument_types()\n if integers.strip() in ('none', 'None', 'NONE', 'null', 'Null', 'NULL'):\n return None\n return tuple(map(int, integers.strip().split(',')))\n\n\ndef vad_trim(vad_reader: VADScpReader, uttid: str, wav: np.array, fs: int\n ) ->np.array:\n assert check_argument_types()\n assert uttid in vad_reader, uttid\n vad_info = vad_reader[uttid]\n total_length = sum(int((time[1] - time[0]) * fs) for time in vad_info)\n new_wav = np.zeros((total_length,), dtype=wav.dtype)\n start_frame = 0\n for time in vad_info:\n duration = int((time[1] - time[0]) * fs)\n orig_start_frame = int(time[0] * fs)\n orig_end_frame = orig_start_frame + duration\n end_frame = start_frame + duration\n new_wav[start_frame:end_frame] = wav[orig_start_frame:orig_end_frame]\n start_frame = end_frame\n return new_wav\n\n\nclass SegmentsExtractor:\n \"\"\"Emulating kaldi extract-segments.cc\n\n Args:\n segments (str): The file format is\n \"<segment-id> <recording-id> <start-time> <end-time>\n\"\n \"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n\"\n \"\"\"\n\n def __init__(self, fname: str, segments: str=None, multi_columns: bool=\n False):\n assert check_argument_types()\n self.wav_scp = fname\n self.multi_columns = multi_columns\n self.wav_dict = {}\n with open(self.wav_scp, 'r') as f:\n for line in f:\n recodeid, wavpath = line.strip().split(None, 1)\n if recodeid in self.wav_dict:\n raise RuntimeError(f'{recodeid} is duplicated')\n self.wav_dict[recodeid] = wavpath\n self.segments = segments\n self.segments_dict = {}\n with open(self.segments, 'r') as f:\n for line in f:\n sps = line.rstrip().split(None)\n if len(sps) != 4:\n raise RuntimeError('Format is invalid: {}'.format(line))\n uttid, recodeid, st, et = sps\n self.segments_dict[uttid] = recodeid, float(st), float(et)\n if recodeid not in self.wav_dict:\n raise RuntimeError('Not found \"{}\" in {}'.format(\n recodeid, self.wav_scp))\n\n def generator(self):\n recodeid_counter = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1\n cached = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n wavpath = self.wav_dict[recodeid]\n if recodeid not in cached:\n if wavpath.endswith('|'):\n if self.multi_columns:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n array, rate = soundfile.read(g)\n elif self.multi_columns:\n array, rate = soundfile_read(wavs=wavpath.split(),\n dtype=None, always_2d=False, concat_axis=1)\n else:\n array, rate = soundfile.read(wavpath)\n cached[recodeid] = array, rate\n array, rate = cached[recodeid]\n recodeid_counter[recodeid] -= 1\n if recodeid_counter[recodeid] == 0:\n cached.pop(recodeid)\n if et != -1:\n array = array[int(st * rate):int(et * rate)]\n else:\n array = array[int(st * rate):]\n yield utt, (array, rate), None, None\n\n\ndef main():\n logfmt = '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s'\n logging.basicConfig(level=logging.INFO, format=logfmt)\n logging.info(get_commandline_args())\n parser = argparse.ArgumentParser(description=\n 'Create waves list from \"wav.scp\"', formatter_class=argparse.\n ArgumentDefaultsHelpFormatter)\n parser.add_argument('scp')\n parser.add_argument('outdir')\n parser.add_argument('--name', default='wav', help=\n 'Specify the prefix word of output file name such as \"wav.scp\"')\n parser.add_argument('--segments', default=None)\n parser.add_argument('--fs', type=humanfriendly_or_none, default=None,\n help='If the sampling rate specified, Change the sampling rate.')\n parser.add_argument('--audio-format', default='wav')\n parser.add_argument('--vad_based_trim', type=str, default=None)\n group = parser.add_mutually_exclusive_group()\n group.add_argument('--ref-channels', default=None, type=str2int_tuple)\n group.add_argument('--utt2ref-channels', default=None, type=str)\n group.add_argument('--audio-subtype', default=None, type=str, help=\n 'Give a interpretable subtype by soundfile e.g. PCM_16. You can check all available types by soundfile.available_subtypes()'\n )\n parser.add_argument('--multi-columns-input', type=str2bool, default=\n False, help=\n \"Enable multi columns mode for input wav.scp. e.g. 'ID a.wav b.wav c.wav' is interpreted as 3ch audio data\"\n )\n parser.add_argument('--multi-columns-output', type=str2bool, default=\n False, help=\n \"Enable multi columns mode for output wav.scp. e.g. If input audio data has 2ch, each line in wav.scp has the the format like 'ID ID-CH0.wav ID-CH1.wav'\"\n )\n args = parser.parse_args()\n out_num_samples = Path(args.outdir) / 'utt2num_samples'\n if args.ref_channels is not None:\n\n def utt2ref_channels(x) ->Tuple[int, ...]:\n return args.ref_channels\n elif args.utt2ref_channels is not None:\n utt2ref_channels_dict = read_2columns_text(args.utt2ref_channels)\n\n def utt2ref_channels(x, d=utt2ref_channels_dict) ->Tuple[int, ...]:\n chs_str = d[x]\n return tuple(map(int, chs_str.split()))\n else:\n utt2ref_channels = None\n if args.audio_format.endswith('ark') and args.multi_columns_output:\n raise RuntimeError(\n 'Multi columns wav.scp is not supported for ark type')\n Path(args.outdir).mkdir(parents=True, exist_ok=True)\n out_wavscp = Path(args.outdir) / f'{args.name}.scp'\n if args.audio_format.endswith('ark'):\n fark = open(Path(args.outdir) / f'data_{args.name}.ark', 'wb')\n fscp_out = out_wavscp.open('w')\n writer = None\n else:\n writer = SoundScpWriter(args.outdir, out_wavscp, format=args.\n audio_format, multi_columns=args.multi_columns_output, subtype=\n args.audio_subtype)\n fscp_out = None\n if args.vad_based_trim is not None:\n vad_reader = VADScpReader(args.vad_based_trim)\n if args.segments is not None:\n extractor = SegmentsExtractor(args.scp, segments=args.segments,\n multi_columns=args.multi_columns_input)\n generator = extractor.generator\n else:\n\n def generator():\n with Path(args.scp).open('r') as fscp:\n for line in tqdm(fscp):\n uttid, wavpath = line.strip().split(None, 1)\n if wavpath.endswith('|'):\n if args.multi_columns_input:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n wave, rate = soundfile.read(g)\n subtypes = None\n elif args.multi_columns_input:\n wave, rate, subtypes = soundfile_read(wavs=wavpath.\n split(), dtype=None, always_2d=False,\n concat_axis=1, return_subtype=True)\n else:\n with soundfile.SoundFile(wavpath) as sf:\n rate = sf.samplerate\n subtypes = [sf.subtype]\n wave = sf.read()\n yield uttid, (wave, rate), wavpath, subtypes\n with out_num_samples.open('w') as fnum_samples:\n for uttid, (wave, rate), wavpath, subtypes in tqdm(generator()):\n save_asis = True\n if args.fs is not None and args.fs != rate:\n wave = resampy.resample(wave, rate, args.fs, axis=0)\n rate = args.fs\n save_asis = False\n if args.vad_based_trim is not None:\n wave = vad_trim(vad_reader, uttid, wave, rate)\n save_asis = False\n if wave.ndim == 2 and utt2ref_channels is not None:\n wave = wave[:, utt2ref_channels(uttid)]\n save_asis = False\n if args.segments is not None:\n save_asis = False\n if args.audio_format.endswith('ark'):\n save_asis = False\n if args.multi_columns_input:\n if args.multi_columns_output:\n if wavpath is not None:\n for _wavpath in wavpath.split():\n if Path(_wavpath\n ).suffix != '.' + args.audio_format:\n save_asis = False\n break\n if wave.ndim == 1:\n _num_ch = 1\n else:\n _num_ch = wave.shape[1]\n if len(wavpath.split()) != _num_ch:\n save_asis = False\n elif wavpath is not None and len(wavpath.split()) > 1:\n save_asis = False\n elif args.multi_columns_output:\n if wave.ndim == 2 and wave.shape[1] > 1:\n save_asis = False\n if wavpath is not None and wavpath.endswith('|'):\n save_asis = False\n if wavpath is not None and Path(wavpath\n ).suffix != '.' + args.audio_format:\n save_asis = False\n if not args.audio_format.endswith('ark') and subtypes is not None:\n if args.audio_subtype is None:\n subtype2 = soundfile.default_subtype(args.audio_format)\n else:\n subtype2 = args.audio_subtype\n for subtype in subtypes:\n if subtype != subtype2:\n save_asis = False\n break\n if save_asis:\n writer.fscp.write(f'{uttid} {wavpath}\\n')\n elif args.audio_format.endswith('ark'):\n for name in soundfile.available_formats():\n if name.lower() in args.audio_format.lower():\n suf = name.lower()\n break\n else:\n raise RuntimeError(f'{args.audio_format} is not supported.'\n )\n kaldiio.save_ark(fark, {uttid: (wave, rate)}, scp=fscp_out,\n append=True, write_function='soundfile', write_kwargs={\n 'format': suf, 'subtype': args.audio_subtype})\n else:\n writer[uttid] = rate, wave\n fnum_samples.write(f'{uttid} {len(wave)}\\n')\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef str2int_tuple(integers: str) ->Optional[Tuple[int, ...]]:\n \"\"\"\n\n >>> str2int_tuple('3,4,5')\n (3, 4, 5)\n\n \"\"\"\n assert check_argument_types()\n if integers.strip() in ('none', 'None', 'NONE', 'null', 'Null', 'NULL'):\n return None\n return tuple(map(int, integers.strip().split(',')))\n\n\ndef vad_trim(vad_reader: VADScpReader, uttid: str, wav: np.array, fs: int\n ) ->np.array:\n assert check_argument_types()\n assert uttid in vad_reader, uttid\n vad_info = vad_reader[uttid]\n total_length = sum(int((time[1] - time[0]) * fs) for time in vad_info)\n new_wav = np.zeros((total_length,), dtype=wav.dtype)\n start_frame = 0\n for time in vad_info:\n duration = int((time[1] - time[0]) * fs)\n orig_start_frame = int(time[0] * fs)\n orig_end_frame = orig_start_frame + duration\n end_frame = start_frame + duration\n new_wav[start_frame:end_frame] = wav[orig_start_frame:orig_end_frame]\n start_frame = end_frame\n return new_wav\n\n\nclass SegmentsExtractor:\n \"\"\"Emulating kaldi extract-segments.cc\n\n Args:\n segments (str): The file format is\n \"<segment-id> <recording-id> <start-time> <end-time>\n\"\n \"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n\"\n \"\"\"\n\n def __init__(self, fname: str, segments: str=None, multi_columns: bool=\n False):\n assert check_argument_types()\n self.wav_scp = fname\n self.multi_columns = multi_columns\n self.wav_dict = {}\n with open(self.wav_scp, 'r') as f:\n for line in f:\n recodeid, wavpath = line.strip().split(None, 1)\n if recodeid in self.wav_dict:\n raise RuntimeError(f'{recodeid} is duplicated')\n self.wav_dict[recodeid] = wavpath\n self.segments = segments\n self.segments_dict = {}\n with open(self.segments, 'r') as f:\n for line in f:\n sps = line.rstrip().split(None)\n if len(sps) != 4:\n raise RuntimeError('Format is invalid: {}'.format(line))\n uttid, recodeid, st, et = sps\n self.segments_dict[uttid] = recodeid, float(st), float(et)\n if recodeid not in self.wav_dict:\n raise RuntimeError('Not found \"{}\" in {}'.format(\n recodeid, self.wav_scp))\n\n def generator(self):\n recodeid_counter = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1\n cached = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n wavpath = self.wav_dict[recodeid]\n if recodeid not in cached:\n if wavpath.endswith('|'):\n if self.multi_columns:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n array, rate = soundfile.read(g)\n elif self.multi_columns:\n array, rate = soundfile_read(wavs=wavpath.split(),\n dtype=None, always_2d=False, concat_axis=1)\n else:\n array, rate = soundfile.read(wavpath)\n cached[recodeid] = array, rate\n array, rate = cached[recodeid]\n recodeid_counter[recodeid] -= 1\n if recodeid_counter[recodeid] == 0:\n cached.pop(recodeid)\n if et != -1:\n array = array[int(st * rate):int(et * rate)]\n else:\n array = array[int(st * rate):]\n yield utt, (array, rate), None, None\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n\n\ndef str2int_tuple(integers: str) ->Optional[Tuple[int, ...]]:\n \"\"\"\n\n >>> str2int_tuple('3,4,5')\n (3, 4, 5)\n\n \"\"\"\n assert check_argument_types()\n if integers.strip() in ('none', 'None', 'NONE', 'null', 'Null', 'NULL'):\n return None\n return tuple(map(int, integers.strip().split(',')))\n\n\n<function token>\n\n\nclass SegmentsExtractor:\n \"\"\"Emulating kaldi extract-segments.cc\n\n Args:\n segments (str): The file format is\n \"<segment-id> <recording-id> <start-time> <end-time>\n\"\n \"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n\"\n \"\"\"\n\n def __init__(self, fname: str, segments: str=None, multi_columns: bool=\n False):\n assert check_argument_types()\n self.wav_scp = fname\n self.multi_columns = multi_columns\n self.wav_dict = {}\n with open(self.wav_scp, 'r') as f:\n for line in f:\n recodeid, wavpath = line.strip().split(None, 1)\n if recodeid in self.wav_dict:\n raise RuntimeError(f'{recodeid} is duplicated')\n self.wav_dict[recodeid] = wavpath\n self.segments = segments\n self.segments_dict = {}\n with open(self.segments, 'r') as f:\n for line in f:\n sps = line.rstrip().split(None)\n if len(sps) != 4:\n raise RuntimeError('Format is invalid: {}'.format(line))\n uttid, recodeid, st, et = sps\n self.segments_dict[uttid] = recodeid, float(st), float(et)\n if recodeid not in self.wav_dict:\n raise RuntimeError('Not found \"{}\" in {}'.format(\n recodeid, self.wav_scp))\n\n def generator(self):\n recodeid_counter = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1\n cached = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n wavpath = self.wav_dict[recodeid]\n if recodeid not in cached:\n if wavpath.endswith('|'):\n if self.multi_columns:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n array, rate = soundfile.read(g)\n elif self.multi_columns:\n array, rate = soundfile_read(wavs=wavpath.split(),\n dtype=None, always_2d=False, concat_axis=1)\n else:\n array, rate = soundfile.read(wavpath)\n cached[recodeid] = array, rate\n array, rate = cached[recodeid]\n recodeid_counter[recodeid] -= 1\n if recodeid_counter[recodeid] == 0:\n cached.pop(recodeid)\n if et != -1:\n array = array[int(st * rate):int(et * rate)]\n else:\n array = array[int(st * rate):]\n yield utt, (array, rate), None, None\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\nclass SegmentsExtractor:\n \"\"\"Emulating kaldi extract-segments.cc\n\n Args:\n segments (str): The file format is\n \"<segment-id> <recording-id> <start-time> <end-time>\n\"\n \"e.g. call-861225-A-0050-0065 call-861225-A 5.0 6.5\n\"\n \"\"\"\n\n def __init__(self, fname: str, segments: str=None, multi_columns: bool=\n False):\n assert check_argument_types()\n self.wav_scp = fname\n self.multi_columns = multi_columns\n self.wav_dict = {}\n with open(self.wav_scp, 'r') as f:\n for line in f:\n recodeid, wavpath = line.strip().split(None, 1)\n if recodeid in self.wav_dict:\n raise RuntimeError(f'{recodeid} is duplicated')\n self.wav_dict[recodeid] = wavpath\n self.segments = segments\n self.segments_dict = {}\n with open(self.segments, 'r') as f:\n for line in f:\n sps = line.rstrip().split(None)\n if len(sps) != 4:\n raise RuntimeError('Format is invalid: {}'.format(line))\n uttid, recodeid, st, et = sps\n self.segments_dict[uttid] = recodeid, float(st), float(et)\n if recodeid not in self.wav_dict:\n raise RuntimeError('Not found \"{}\" in {}'.format(\n recodeid, self.wav_scp))\n\n def generator(self):\n recodeid_counter = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1\n cached = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n wavpath = self.wav_dict[recodeid]\n if recodeid not in cached:\n if wavpath.endswith('|'):\n if self.multi_columns:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n array, rate = soundfile.read(g)\n elif self.multi_columns:\n array, rate = soundfile_read(wavs=wavpath.split(),\n dtype=None, always_2d=False, concat_axis=1)\n else:\n array, rate = soundfile.read(wavpath)\n cached[recodeid] = array, rate\n array, rate = cached[recodeid]\n recodeid_counter[recodeid] -= 1\n if recodeid_counter[recodeid] == 0:\n cached.pop(recodeid)\n if et != -1:\n array = array[int(st * rate):int(et * rate)]\n else:\n array = array[int(st * rate):]\n yield utt, (array, rate), None, None\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\nclass SegmentsExtractor:\n <docstring token>\n\n def __init__(self, fname: str, segments: str=None, multi_columns: bool=\n False):\n assert check_argument_types()\n self.wav_scp = fname\n self.multi_columns = multi_columns\n self.wav_dict = {}\n with open(self.wav_scp, 'r') as f:\n for line in f:\n recodeid, wavpath = line.strip().split(None, 1)\n if recodeid in self.wav_dict:\n raise RuntimeError(f'{recodeid} is duplicated')\n self.wav_dict[recodeid] = wavpath\n self.segments = segments\n self.segments_dict = {}\n with open(self.segments, 'r') as f:\n for line in f:\n sps = line.rstrip().split(None)\n if len(sps) != 4:\n raise RuntimeError('Format is invalid: {}'.format(line))\n uttid, recodeid, st, et = sps\n self.segments_dict[uttid] = recodeid, float(st), float(et)\n if recodeid not in self.wav_dict:\n raise RuntimeError('Not found \"{}\" in {}'.format(\n recodeid, self.wav_scp))\n\n def generator(self):\n recodeid_counter = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1\n cached = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n wavpath = self.wav_dict[recodeid]\n if recodeid not in cached:\n if wavpath.endswith('|'):\n if self.multi_columns:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n array, rate = soundfile.read(g)\n elif self.multi_columns:\n array, rate = soundfile_read(wavs=wavpath.split(),\n dtype=None, always_2d=False, concat_axis=1)\n else:\n array, rate = soundfile.read(wavpath)\n cached[recodeid] = array, rate\n array, rate = cached[recodeid]\n recodeid_counter[recodeid] -= 1\n if recodeid_counter[recodeid] == 0:\n cached.pop(recodeid)\n if et != -1:\n array = array[int(st * rate):int(et * rate)]\n else:\n array = array[int(st * rate):]\n yield utt, (array, rate), None, None\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\nclass SegmentsExtractor:\n <docstring token>\n <function token>\n\n def generator(self):\n recodeid_counter = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n recodeid_counter[recodeid] = recodeid_counter.get(recodeid, 0) + 1\n cached = {}\n for utt, (recodeid, st, et) in self.segments_dict.items():\n wavpath = self.wav_dict[recodeid]\n if recodeid not in cached:\n if wavpath.endswith('|'):\n if self.multi_columns:\n raise RuntimeError(\n 'Not supporting multi_columns wav.scp for inputs by pipe'\n )\n with kaldiio.open_like_kaldi(wavpath, 'rb') as f:\n with BytesIO(f.read()) as g:\n array, rate = soundfile.read(g)\n elif self.multi_columns:\n array, rate = soundfile_read(wavs=wavpath.split(),\n dtype=None, always_2d=False, concat_axis=1)\n else:\n array, rate = soundfile.read(wavpath)\n cached[recodeid] = array, rate\n array, rate = cached[recodeid]\n recodeid_counter[recodeid] -= 1\n if recodeid_counter[recodeid] == 0:\n cached.pop(recodeid)\n if et != -1:\n array = array[int(st * rate):int(et * rate)]\n else:\n array = array[int(st * rate):]\n yield utt, (array, rate), None, None\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\nclass SegmentsExtractor:\n <docstring token>\n <function token>\n <function token>\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<class token>\n<function token>\n<code token>\n"
] | false |
99,373 |
e51bf40beae4d5c6a9454befc29e49780be2e754
|
def UpdateDB(NewFile,Picname,DATABASE_URL):
# Dependencies
# ----------------------------------
# Imports the method used for connecting to DBs
from sqlalchemy import create_engine
# Imports the methods needed to abstract classes into tables
from sqlalchemy.ext.declarative import declarative_base
# Allow us to declare column types
from sqlalchemy import Column, Integer, String, Float
# Create Class
# ----------------------------------
Base = declarative_base()
class UploadClass(Base):
__tablename__ = 'GeoTagData'
__table_args__ = { 'extend_existing': True }
index=Column (Integer,primary_key=True)
latitude= Column (Float)
longitude= Column (Float)
landmark=Column(String(255))
country=Column(String(255))
state=Column(String(255))
county=Column(String(255))
city=Column(String(255))
zipcode=Column(Integer)
ImageTimeStamp=Column(String(255))
FileAddress=Column(String(255))
# add the new picture data into a dataframe
import ProcessPicture
picAddress=NewFile
# execute function into a variable
df=ProcessPicture.ExtractPicData(picAddress,Picname,DATABASE_URL)
# Create variables to hold new picture data info
FileAddress=df['FileAddress']
ImageTimeStamp=df['ImageTimeStamp']
city=df['city']
country=df['country']
county=df['county']
landmark=df['landmark']
latitude=df['latitude']
longitude=df['longitude']
state=df['state']
zipcode=df['zipcode']
# look for the last index, so we can add the index column
# Create Database Connection
# ----------------------------------
#database_path = "db/Oldports.sqlite"
#engine = create_engine(f"sqlite:///{database_path}")
engine = create_engine(DATABASE_URL)
# conn = engine.connect()
data = engine.execute('SELECT * FROM "GeoTagData"')
index=[]
for record in data:
index.append(record)
# index int a variable
index=len(index)
# update the class with the dataframe varables
pictureData=UploadClass(index=index,FileAddress=FileAddress,ImageTimeStamp=ImageTimeStamp, city=city, country=country,county=county, landmark=landmark, latitude=latitude, longitude=longitude,state=state,zipcode=zipcode)
# Create a "Metadata" Layer That Abstracts our SQL Database
Base.metadata.create_all(engine)
# Create a Session Object to Connect to DB
# ----------------------------------
from sqlalchemy.orm import Session
session = Session(bind=engine)
# Add Records to the Appropriate DB
# ----------------------------------
session.add(pictureData)
session.commit()
ProcessPicture.make_thumbnail(NewFile)
|
[
"def UpdateDB(NewFile,Picname,DATABASE_URL):\n\n # Dependencies\n # ----------------------------------\n # Imports the method used for connecting to DBs\n from sqlalchemy import create_engine\n # Imports the methods needed to abstract classes into tables\n from sqlalchemy.ext.declarative import declarative_base\n # Allow us to declare column types\n from sqlalchemy import Column, Integer, String, Float \n\n\n # Create Class\n # ----------------------------------\n Base = declarative_base()\n class UploadClass(Base):\n __tablename__ = 'GeoTagData'\n __table_args__ = { 'extend_existing': True }\n\n index=Column (Integer,primary_key=True)\n latitude= Column (Float)\n longitude= Column (Float)\n landmark=Column(String(255))\n country=Column(String(255))\n state=Column(String(255))\n county=Column(String(255))\n city=Column(String(255))\n zipcode=Column(Integer)\n ImageTimeStamp=Column(String(255))\n FileAddress=Column(String(255))\n \n\n # add the new picture data into a dataframe\n import ProcessPicture\n picAddress=NewFile\n # execute function into a variable\n df=ProcessPicture.ExtractPicData(picAddress,Picname,DATABASE_URL)\n\n\n # Create variables to hold new picture data info\n FileAddress=df['FileAddress']\n ImageTimeStamp=df['ImageTimeStamp']\n city=df['city']\n country=df['country']\n county=df['county']\n landmark=df['landmark']\n latitude=df['latitude']\n longitude=df['longitude']\n state=df['state']\n zipcode=df['zipcode']\n\n # look for the last index, so we can add the index column\n # Create Database Connection\n # ----------------------------------\n #database_path = \"db/Oldports.sqlite\"\n #engine = create_engine(f\"sqlite:///{database_path}\")\n engine = create_engine(DATABASE_URL)\n # conn = engine.connect()\n data = engine.execute('SELECT * FROM \"GeoTagData\"')\n index=[]\n for record in data:\n index.append(record)\n # index int a variable\n index=len(index)\n\n # update the class with the dataframe varables\n pictureData=UploadClass(index=index,FileAddress=FileAddress,ImageTimeStamp=ImageTimeStamp, city=city, country=country,county=county, landmark=landmark, latitude=latitude, longitude=longitude,state=state,zipcode=zipcode) \n\n # Create a \"Metadata\" Layer That Abstracts our SQL Database\n Base.metadata.create_all(engine)\n # Create a Session Object to Connect to DB\n # ----------------------------------\n from sqlalchemy.orm import Session\n session = Session(bind=engine)\n # Add Records to the Appropriate DB\n # ----------------------------------\n session.add(pictureData)\n\n session.commit()\n\n ProcessPicture.make_thumbnail(NewFile)\n",
"def UpdateDB(NewFile, Picname, DATABASE_URL):\n from sqlalchemy import create_engine\n from sqlalchemy.ext.declarative import declarative_base\n from sqlalchemy import Column, Integer, String, Float\n Base = declarative_base()\n\n\n class UploadClass(Base):\n __tablename__ = 'GeoTagData'\n __table_args__ = {'extend_existing': True}\n index = Column(Integer, primary_key=True)\n latitude = Column(Float)\n longitude = Column(Float)\n landmark = Column(String(255))\n country = Column(String(255))\n state = Column(String(255))\n county = Column(String(255))\n city = Column(String(255))\n zipcode = Column(Integer)\n ImageTimeStamp = Column(String(255))\n FileAddress = Column(String(255))\n import ProcessPicture\n picAddress = NewFile\n df = ProcessPicture.ExtractPicData(picAddress, Picname, DATABASE_URL)\n FileAddress = df['FileAddress']\n ImageTimeStamp = df['ImageTimeStamp']\n city = df['city']\n country = df['country']\n county = df['county']\n landmark = df['landmark']\n latitude = df['latitude']\n longitude = df['longitude']\n state = df['state']\n zipcode = df['zipcode']\n engine = create_engine(DATABASE_URL)\n data = engine.execute('SELECT * FROM \"GeoTagData\"')\n index = []\n for record in data:\n index.append(record)\n index = len(index)\n pictureData = UploadClass(index=index, FileAddress=FileAddress,\n ImageTimeStamp=ImageTimeStamp, city=city, country=country, county=\n county, landmark=landmark, latitude=latitude, longitude=longitude,\n state=state, zipcode=zipcode)\n Base.metadata.create_all(engine)\n from sqlalchemy.orm import Session\n session = Session(bind=engine)\n session.add(pictureData)\n session.commit()\n ProcessPicture.make_thumbnail(NewFile)\n",
"<function token>\n"
] | false |
99,374 |
7d0b371e68b6362ccf1ed24b4edb096a76f1054c
|
# coding: utf-8
# python3.6
'''PatchMaker
パッチファイルを作成するツール。
型注釈を使っているからpython3.6未満では動かない気がする。
'''
targetpaths = '''
project/html/html1.html
project/html/html2.html
'''
import os
import sys
from pprint import pprint
import datetime
import shutil
PATCHNAME = datetime.datetime.today().strftime('%Y%m%d_%H%M%S') + '_patch'
class PatchMaker:
def __init__(self):
pass
def cd_(self):
'''カレントディレクトリを移す。'''
if hasattr(sys, 'frozen'):
os.chdir(os.path.dirname(sys.executable))
else:
os.chdir(os.path.dirname(os.path.abspath(__file__)))
def run(self, targetpaths):
'''トップレベルメソッド。'''
# リストで渡しても文字列で渡してもいいようにしました。
targetpaths = targetpaths if isinstance(targetpaths, list) else self.make_pathlist(targetpaths)
absentpaths = self.get_absent_paths(targetpaths)
donelist = self.create_patch(list(set(targetpaths)-set(absentpaths)))
self.output_result(donelist, absentpaths)
def make_pathlist(self, targetpaths: str) -> list:
'''冒頭でインプットした文字列を配列にする。'''
pathlist = []
for t in targetpaths.strip().split('\n'):
if t:
pathlist.append(t)
return pathlist
def get_absent_paths(self, pathlist: list) -> list:
'''インプットされたパスのうち、存在しないものを返します。'''
return [path for path in pathlist if not os.path.exists(path)]
def create_patch(self, pathlist: list) -> list:
'''目的であるパッチの作成。'''
os.mkdir(PATCHNAME)
donelist = []
for path in pathlist:
dest_dir = f'{PATCHNAME}/{os.path.dirname(path)}'
dest_file = f'{PATCHNAME}/{path}'
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
donelist.append(
shutil.copytree(path, dest_file)
if os.path.isdir(path)
else shutil.copy(path, dest_file))
return donelist
def output_result(self, donelist, absentpaths):
'''「終わったよー」の出力。'''
pprint(absentpaths)
print(f'<INFO> {len(absentpaths)} files above were not found and were ignored.')
print(f'<INFO> Succeeded! {len(donelist)} patch files were created. They are not shown on console.')
if __name__ == '__main__':
pm = PatchMaker()
pm.cd_()
pm.run(targetpaths)
|
[
"# coding: utf-8\n# python3.6\n\n'''PatchMaker\n\nパッチファイルを作成するツール。\n型注釈を使っているからpython3.6未満では動かない気がする。\n'''\n\ntargetpaths = '''\n\nproject/html/html1.html\nproject/html/html2.html\n\n'''\n\nimport os\nimport sys\nfrom pprint import pprint\nimport datetime\nimport shutil\n\n\nPATCHNAME = datetime.datetime.today().strftime('%Y%m%d_%H%M%S') + '_patch'\n\n\nclass PatchMaker:\n def __init__(self):\n pass\n\n def cd_(self):\n '''カレントディレクトリを移す。'''\n if hasattr(sys, 'frozen'):\n os.chdir(os.path.dirname(sys.executable))\n else:\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n def run(self, targetpaths):\n '''トップレベルメソッド。'''\n # リストで渡しても文字列で渡してもいいようにしました。\n targetpaths = targetpaths if isinstance(targetpaths, list) else self.make_pathlist(targetpaths)\n absentpaths = self.get_absent_paths(targetpaths)\n donelist = self.create_patch(list(set(targetpaths)-set(absentpaths)))\n self.output_result(donelist, absentpaths)\n\n def make_pathlist(self, targetpaths: str) -> list:\n '''冒頭でインプットした文字列を配列にする。'''\n pathlist = []\n for t in targetpaths.strip().split('\\n'):\n if t:\n pathlist.append(t)\n return pathlist\n\n def get_absent_paths(self, pathlist: list) -> list:\n '''インプットされたパスのうち、存在しないものを返します。'''\n return [path for path in pathlist if not os.path.exists(path)]\n\n def create_patch(self, pathlist: list) -> list:\n '''目的であるパッチの作成。'''\n os.mkdir(PATCHNAME)\n donelist = []\n for path in pathlist:\n dest_dir = f'{PATCHNAME}/{os.path.dirname(path)}'\n dest_file = f'{PATCHNAME}/{path}'\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n donelist.append(\n shutil.copytree(path, dest_file) \n if os.path.isdir(path)\n else shutil.copy(path, dest_file))\n return donelist\n\n def output_result(self, donelist, absentpaths):\n '''「終わったよー」の出力。'''\n pprint(absentpaths)\n print(f'<INFO> {len(absentpaths)} files above were not found and were ignored.')\n print(f'<INFO> Succeeded! {len(donelist)} patch files were created. They are not shown on console.')\n\n\nif __name__ == '__main__':\n pm = PatchMaker()\n pm.cd_()\n pm.run(targetpaths)\n",
"<docstring token>\ntargetpaths = \"\"\"\n\nproject/html/html1.html\nproject/html/html2.html\n\n\"\"\"\nimport os\nimport sys\nfrom pprint import pprint\nimport datetime\nimport shutil\nPATCHNAME = datetime.datetime.today().strftime('%Y%m%d_%H%M%S') + '_patch'\n\n\nclass PatchMaker:\n\n def __init__(self):\n pass\n\n def cd_(self):\n \"\"\"カレントディレクトリを移す。\"\"\"\n if hasattr(sys, 'frozen'):\n os.chdir(os.path.dirname(sys.executable))\n else:\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n def run(self, targetpaths):\n \"\"\"トップレベルメソッド。\"\"\"\n targetpaths = targetpaths if isinstance(targetpaths, list\n ) else self.make_pathlist(targetpaths)\n absentpaths = self.get_absent_paths(targetpaths)\n donelist = self.create_patch(list(set(targetpaths) - set(absentpaths)))\n self.output_result(donelist, absentpaths)\n\n def make_pathlist(self, targetpaths: str) ->list:\n \"\"\"冒頭でインプットした文字列を配列にする。\"\"\"\n pathlist = []\n for t in targetpaths.strip().split('\\n'):\n if t:\n pathlist.append(t)\n return pathlist\n\n def get_absent_paths(self, pathlist: list) ->list:\n \"\"\"インプットされたパスのうち、存在しないものを返します。\"\"\"\n return [path for path in pathlist if not os.path.exists(path)]\n\n def create_patch(self, pathlist: list) ->list:\n \"\"\"目的であるパッチの作成。\"\"\"\n os.mkdir(PATCHNAME)\n donelist = []\n for path in pathlist:\n dest_dir = f'{PATCHNAME}/{os.path.dirname(path)}'\n dest_file = f'{PATCHNAME}/{path}'\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n donelist.append(shutil.copytree(path, dest_file) if os.path.\n isdir(path) else shutil.copy(path, dest_file))\n return donelist\n\n def output_result(self, donelist, absentpaths):\n \"\"\"「終わったよー」の出力。\"\"\"\n pprint(absentpaths)\n print(\n f'<INFO> {len(absentpaths)} files above were not found and were ignored.'\n )\n print(\n f'<INFO> Succeeded! {len(donelist)} patch files were created. They are not shown on console.'\n )\n\n\nif __name__ == '__main__':\n pm = PatchMaker()\n pm.cd_()\n pm.run(targetpaths)\n",
"<docstring token>\ntargetpaths = \"\"\"\n\nproject/html/html1.html\nproject/html/html2.html\n\n\"\"\"\n<import token>\nPATCHNAME = datetime.datetime.today().strftime('%Y%m%d_%H%M%S') + '_patch'\n\n\nclass PatchMaker:\n\n def __init__(self):\n pass\n\n def cd_(self):\n \"\"\"カレントディレクトリを移す。\"\"\"\n if hasattr(sys, 'frozen'):\n os.chdir(os.path.dirname(sys.executable))\n else:\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n def run(self, targetpaths):\n \"\"\"トップレベルメソッド。\"\"\"\n targetpaths = targetpaths if isinstance(targetpaths, list\n ) else self.make_pathlist(targetpaths)\n absentpaths = self.get_absent_paths(targetpaths)\n donelist = self.create_patch(list(set(targetpaths) - set(absentpaths)))\n self.output_result(donelist, absentpaths)\n\n def make_pathlist(self, targetpaths: str) ->list:\n \"\"\"冒頭でインプットした文字列を配列にする。\"\"\"\n pathlist = []\n for t in targetpaths.strip().split('\\n'):\n if t:\n pathlist.append(t)\n return pathlist\n\n def get_absent_paths(self, pathlist: list) ->list:\n \"\"\"インプットされたパスのうち、存在しないものを返します。\"\"\"\n return [path for path in pathlist if not os.path.exists(path)]\n\n def create_patch(self, pathlist: list) ->list:\n \"\"\"目的であるパッチの作成。\"\"\"\n os.mkdir(PATCHNAME)\n donelist = []\n for path in pathlist:\n dest_dir = f'{PATCHNAME}/{os.path.dirname(path)}'\n dest_file = f'{PATCHNAME}/{path}'\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n donelist.append(shutil.copytree(path, dest_file) if os.path.\n isdir(path) else shutil.copy(path, dest_file))\n return donelist\n\n def output_result(self, donelist, absentpaths):\n \"\"\"「終わったよー」の出力。\"\"\"\n pprint(absentpaths)\n print(\n f'<INFO> {len(absentpaths)} files above were not found and were ignored.'\n )\n print(\n f'<INFO> Succeeded! {len(donelist)} patch files were created. They are not shown on console.'\n )\n\n\nif __name__ == '__main__':\n pm = PatchMaker()\n pm.cd_()\n pm.run(targetpaths)\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\nclass PatchMaker:\n\n def __init__(self):\n pass\n\n def cd_(self):\n \"\"\"カレントディレクトリを移す。\"\"\"\n if hasattr(sys, 'frozen'):\n os.chdir(os.path.dirname(sys.executable))\n else:\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n def run(self, targetpaths):\n \"\"\"トップレベルメソッド。\"\"\"\n targetpaths = targetpaths if isinstance(targetpaths, list\n ) else self.make_pathlist(targetpaths)\n absentpaths = self.get_absent_paths(targetpaths)\n donelist = self.create_patch(list(set(targetpaths) - set(absentpaths)))\n self.output_result(donelist, absentpaths)\n\n def make_pathlist(self, targetpaths: str) ->list:\n \"\"\"冒頭でインプットした文字列を配列にする。\"\"\"\n pathlist = []\n for t in targetpaths.strip().split('\\n'):\n if t:\n pathlist.append(t)\n return pathlist\n\n def get_absent_paths(self, pathlist: list) ->list:\n \"\"\"インプットされたパスのうち、存在しないものを返します。\"\"\"\n return [path for path in pathlist if not os.path.exists(path)]\n\n def create_patch(self, pathlist: list) ->list:\n \"\"\"目的であるパッチの作成。\"\"\"\n os.mkdir(PATCHNAME)\n donelist = []\n for path in pathlist:\n dest_dir = f'{PATCHNAME}/{os.path.dirname(path)}'\n dest_file = f'{PATCHNAME}/{path}'\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n donelist.append(shutil.copytree(path, dest_file) if os.path.\n isdir(path) else shutil.copy(path, dest_file))\n return donelist\n\n def output_result(self, donelist, absentpaths):\n \"\"\"「終わったよー」の出力。\"\"\"\n pprint(absentpaths)\n print(\n f'<INFO> {len(absentpaths)} files above were not found and were ignored.'\n )\n print(\n f'<INFO> Succeeded! {len(donelist)} patch files were created. They are not shown on console.'\n )\n\n\nif __name__ == '__main__':\n pm = PatchMaker()\n pm.cd_()\n pm.run(targetpaths)\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\nclass PatchMaker:\n\n def __init__(self):\n pass\n\n def cd_(self):\n \"\"\"カレントディレクトリを移す。\"\"\"\n if hasattr(sys, 'frozen'):\n os.chdir(os.path.dirname(sys.executable))\n else:\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n def run(self, targetpaths):\n \"\"\"トップレベルメソッド。\"\"\"\n targetpaths = targetpaths if isinstance(targetpaths, list\n ) else self.make_pathlist(targetpaths)\n absentpaths = self.get_absent_paths(targetpaths)\n donelist = self.create_patch(list(set(targetpaths) - set(absentpaths)))\n self.output_result(donelist, absentpaths)\n\n def make_pathlist(self, targetpaths: str) ->list:\n \"\"\"冒頭でインプットした文字列を配列にする。\"\"\"\n pathlist = []\n for t in targetpaths.strip().split('\\n'):\n if t:\n pathlist.append(t)\n return pathlist\n\n def get_absent_paths(self, pathlist: list) ->list:\n \"\"\"インプットされたパスのうち、存在しないものを返します。\"\"\"\n return [path for path in pathlist if not os.path.exists(path)]\n\n def create_patch(self, pathlist: list) ->list:\n \"\"\"目的であるパッチの作成。\"\"\"\n os.mkdir(PATCHNAME)\n donelist = []\n for path in pathlist:\n dest_dir = f'{PATCHNAME}/{os.path.dirname(path)}'\n dest_file = f'{PATCHNAME}/{path}'\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n donelist.append(shutil.copytree(path, dest_file) if os.path.\n isdir(path) else shutil.copy(path, dest_file))\n return donelist\n\n def output_result(self, donelist, absentpaths):\n \"\"\"「終わったよー」の出力。\"\"\"\n pprint(absentpaths)\n print(\n f'<INFO> {len(absentpaths)} files above were not found and were ignored.'\n )\n print(\n f'<INFO> Succeeded! {len(donelist)} patch files were created. They are not shown on console.'\n )\n\n\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\nclass PatchMaker:\n\n def __init__(self):\n pass\n\n def cd_(self):\n \"\"\"カレントディレクトリを移す。\"\"\"\n if hasattr(sys, 'frozen'):\n os.chdir(os.path.dirname(sys.executable))\n else:\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n\n def run(self, targetpaths):\n \"\"\"トップレベルメソッド。\"\"\"\n targetpaths = targetpaths if isinstance(targetpaths, list\n ) else self.make_pathlist(targetpaths)\n absentpaths = self.get_absent_paths(targetpaths)\n donelist = self.create_patch(list(set(targetpaths) - set(absentpaths)))\n self.output_result(donelist, absentpaths)\n\n def make_pathlist(self, targetpaths: str) ->list:\n \"\"\"冒頭でインプットした文字列を配列にする。\"\"\"\n pathlist = []\n for t in targetpaths.strip().split('\\n'):\n if t:\n pathlist.append(t)\n return pathlist\n <function token>\n\n def create_patch(self, pathlist: list) ->list:\n \"\"\"目的であるパッチの作成。\"\"\"\n os.mkdir(PATCHNAME)\n donelist = []\n for path in pathlist:\n dest_dir = f'{PATCHNAME}/{os.path.dirname(path)}'\n dest_file = f'{PATCHNAME}/{path}'\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n donelist.append(shutil.copytree(path, dest_file) if os.path.\n isdir(path) else shutil.copy(path, dest_file))\n return donelist\n\n def output_result(self, donelist, absentpaths):\n \"\"\"「終わったよー」の出力。\"\"\"\n pprint(absentpaths)\n print(\n f'<INFO> {len(absentpaths)} files above were not found and were ignored.'\n )\n print(\n f'<INFO> Succeeded! {len(donelist)} patch files were created. They are not shown on console.'\n )\n\n\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\nclass PatchMaker:\n\n def __init__(self):\n pass\n <function token>\n\n def run(self, targetpaths):\n \"\"\"トップレベルメソッド。\"\"\"\n targetpaths = targetpaths if isinstance(targetpaths, list\n ) else self.make_pathlist(targetpaths)\n absentpaths = self.get_absent_paths(targetpaths)\n donelist = self.create_patch(list(set(targetpaths) - set(absentpaths)))\n self.output_result(donelist, absentpaths)\n\n def make_pathlist(self, targetpaths: str) ->list:\n \"\"\"冒頭でインプットした文字列を配列にする。\"\"\"\n pathlist = []\n for t in targetpaths.strip().split('\\n'):\n if t:\n pathlist.append(t)\n return pathlist\n <function token>\n\n def create_patch(self, pathlist: list) ->list:\n \"\"\"目的であるパッチの作成。\"\"\"\n os.mkdir(PATCHNAME)\n donelist = []\n for path in pathlist:\n dest_dir = f'{PATCHNAME}/{os.path.dirname(path)}'\n dest_file = f'{PATCHNAME}/{path}'\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n donelist.append(shutil.copytree(path, dest_file) if os.path.\n isdir(path) else shutil.copy(path, dest_file))\n return donelist\n\n def output_result(self, donelist, absentpaths):\n \"\"\"「終わったよー」の出力。\"\"\"\n pprint(absentpaths)\n print(\n f'<INFO> {len(absentpaths)} files above were not found and were ignored.'\n )\n print(\n f'<INFO> Succeeded! {len(donelist)} patch files were created. They are not shown on console.'\n )\n\n\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\nclass PatchMaker:\n\n def __init__(self):\n pass\n <function token>\n\n def run(self, targetpaths):\n \"\"\"トップレベルメソッド。\"\"\"\n targetpaths = targetpaths if isinstance(targetpaths, list\n ) else self.make_pathlist(targetpaths)\n absentpaths = self.get_absent_paths(targetpaths)\n donelist = self.create_patch(list(set(targetpaths) - set(absentpaths)))\n self.output_result(donelist, absentpaths)\n\n def make_pathlist(self, targetpaths: str) ->list:\n \"\"\"冒頭でインプットした文字列を配列にする。\"\"\"\n pathlist = []\n for t in targetpaths.strip().split('\\n'):\n if t:\n pathlist.append(t)\n return pathlist\n <function token>\n <function token>\n\n def output_result(self, donelist, absentpaths):\n \"\"\"「終わったよー」の出力。\"\"\"\n pprint(absentpaths)\n print(\n f'<INFO> {len(absentpaths)} files above were not found and were ignored.'\n )\n print(\n f'<INFO> Succeeded! {len(donelist)} patch files were created. They are not shown on console.'\n )\n\n\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\nclass PatchMaker:\n <function token>\n <function token>\n\n def run(self, targetpaths):\n \"\"\"トップレベルメソッド。\"\"\"\n targetpaths = targetpaths if isinstance(targetpaths, list\n ) else self.make_pathlist(targetpaths)\n absentpaths = self.get_absent_paths(targetpaths)\n donelist = self.create_patch(list(set(targetpaths) - set(absentpaths)))\n self.output_result(donelist, absentpaths)\n\n def make_pathlist(self, targetpaths: str) ->list:\n \"\"\"冒頭でインプットした文字列を配列にする。\"\"\"\n pathlist = []\n for t in targetpaths.strip().split('\\n'):\n if t:\n pathlist.append(t)\n return pathlist\n <function token>\n <function token>\n\n def output_result(self, donelist, absentpaths):\n \"\"\"「終わったよー」の出力。\"\"\"\n pprint(absentpaths)\n print(\n f'<INFO> {len(absentpaths)} files above were not found and were ignored.'\n )\n print(\n f'<INFO> Succeeded! {len(donelist)} patch files were created. They are not shown on console.'\n )\n\n\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\nclass PatchMaker:\n <function token>\n <function token>\n\n def run(self, targetpaths):\n \"\"\"トップレベルメソッド。\"\"\"\n targetpaths = targetpaths if isinstance(targetpaths, list\n ) else self.make_pathlist(targetpaths)\n absentpaths = self.get_absent_paths(targetpaths)\n donelist = self.create_patch(list(set(targetpaths) - set(absentpaths)))\n self.output_result(donelist, absentpaths)\n\n def make_pathlist(self, targetpaths: str) ->list:\n \"\"\"冒頭でインプットした文字列を配列にする。\"\"\"\n pathlist = []\n for t in targetpaths.strip().split('\\n'):\n if t:\n pathlist.append(t)\n return pathlist\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\nclass PatchMaker:\n <function token>\n <function token>\n\n def run(self, targetpaths):\n \"\"\"トップレベルメソッド。\"\"\"\n targetpaths = targetpaths if isinstance(targetpaths, list\n ) else self.make_pathlist(targetpaths)\n absentpaths = self.get_absent_paths(targetpaths)\n donelist = self.create_patch(list(set(targetpaths) - set(absentpaths)))\n self.output_result(donelist, absentpaths)\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n\n\nclass PatchMaker:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<assignment token>\n<import token>\n<assignment token>\n<class token>\n<code token>\n"
] | false |
99,375 |
f39c2b02f638b9024bb4db77bd36d0743714c4f1
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 20 16:10:16 2020
@author: 月光下的云海
"""
from DLL.CreateModel import CreateModel
import os
'''===================== WriteToSummary把训练信息写入txt =======================
FUNCTION: WriteToSummary
FEATURE: WriteToSummary把训练信息写入txt
INPUTS:model,min_index,min_loss
model-----------模型名称
min_index-------最小loss的id
min_loss--------最小的loss值
OUTPUT:无
============================================================================='''
def WriteToSummary(model,min_index,min_loss,x_test_ph_name,test_output_name):
line = model+':'+str(min_index)+','+str(min_loss)+'\n'
if( not os.path.exists(model+'_Summary.txt')):
Summary = open(model+'_Summary.txt','w')
Summary.close()
Summary = open(model+'_Summary.txt','r+')
summary_content = ''
try:
for info in Summary:
name_loc = info.index(model)
name_loc = info.index(':')
name = info[0:name_loc]
if(model == name):
info = info.replace(info[name_loc+1:],str(min_index)+','+str(min_loss))
info = info+';Input:'+x_test_ph_name+',Output:'+test_output_name+'\n'
summary_content += info
else:
summary_content += info
Summary.close()
Summary = open(model+'_Summary.txt','w+')
Summary.write(summary_content)
Summary.close()
except ValueError:
Summary.close()
Summary = open(model+'_Summary.txt','a+')
Summary.write(line)
Summary.close()
if __name__ == '__main__':
model = input("Which model do u wanna choose :")
scale = int(input("And the magnification is :"))
source_dir = os.path.abspath(os.path.dirname(os.getcwd()))+'\\'
net_model = CreateModel(model = model,lr = 1e-3,batch_size = 128)
x_test,y_test,x_train,y_train,train_size,test_size = net_model.prepareSparseData(
source_dir+'Saprse_Train_Data\\',0.2)
print('\n\nSparseModel ( ' + model +' x '+str(scale)+ ' ) Trainning ... ...')
min_index,min_loss,sp_train_li,sp_test_li,x_test_ph_name1,test_output_name1 = net_model.trainNet(x_train,
y_train,
x_test,
y_test,
train_size,
test_size,
Epoch = int(10e3),
iter_view = 500,
saved_path = source_dir+model+'_SparseSR_x'+str(scale))
WriteToSummary(model,min_index,min_loss,x_test_ph_name1,test_output_name1)
train_img,label_img,test_img,test_label_img = net_model.prepareImageData(
source_dir1 = source_dir+'\\xTrainData\\',source_dir2 = source_dir+'\\yTrainData\\',ratio = 0.2,scale = scale)
net_model = CreateModel(model = model,lr = 1e-3,batch_size = 128)
print('\n\nSRModel (' + model +' x '+str(scale)+ ') Trainning ... ...')
min_index,min_loss,sr_train_li,sr_test_li,x_test_ph_name2,test_output_name2 = net_model.trainNet(train_img,
label_img,
test_img,
test_label_img,
train_size = train_img.shape[0],
test_size = test_img.shape[0],
Epoch = int(5e3),
iter_view = 500,
saved_path = source_dir+model+'x'+str(scale))
import matplotlib.pyplot as plt
plt.plot(sp_test_li,'r');plt.plot(sr_test_li,'b');
plt.xlabel('Epoch:100')
plt.ylabel('Loss:0.01')
plt.title('Loss Curve')
|
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 20 16:10:16 2020\r\n\r\n@author: 月光下的云海\r\n\"\"\"\r\n\r\nfrom DLL.CreateModel import CreateModel\r\nimport os\r\n'''===================== WriteToSummary把训练信息写入txt =======================\r\nFUNCTION: WriteToSummary\r\nFEATURE: WriteToSummary把训练信息写入txt\r\nINPUTS:model,min_index,min_loss\r\n model-----------模型名称\r\n min_index-------最小loss的id\r\n min_loss--------最小的loss值\r\nOUTPUT:无\r\n============================================================================='''\r\ndef WriteToSummary(model,min_index,min_loss,x_test_ph_name,test_output_name):\r\n line = model+':'+str(min_index)+','+str(min_loss)+'\\n'\r\n if( not os.path.exists(model+'_Summary.txt')):\r\n Summary = open(model+'_Summary.txt','w')\r\n Summary.close()\r\n Summary = open(model+'_Summary.txt','r+')\r\n summary_content = ''\r\n try: \r\n for info in Summary:\r\n name_loc = info.index(model)\r\n name_loc = info.index(':')\r\n name = info[0:name_loc]\r\n if(model == name):\r\n info = info.replace(info[name_loc+1:],str(min_index)+','+str(min_loss))\r\n info = info+';Input:'+x_test_ph_name+',Output:'+test_output_name+'\\n'\r\n summary_content += info\r\n else:\r\n summary_content += info\r\n Summary.close()\r\n Summary = open(model+'_Summary.txt','w+')\r\n Summary.write(summary_content)\r\n Summary.close()\r\n except ValueError:\r\n Summary.close()\r\n Summary = open(model+'_Summary.txt','a+')\r\n Summary.write(line)\r\n Summary.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n model = input(\"Which model do u wanna choose :\")\r\n scale = int(input(\"And the magnification is :\"))\r\n source_dir = os.path.abspath(os.path.dirname(os.getcwd()))+'\\\\'\r\n net_model = CreateModel(model = model,lr = 1e-3,batch_size = 128)\r\n x_test,y_test,x_train,y_train,train_size,test_size = net_model.prepareSparseData(\r\n source_dir+'Saprse_Train_Data\\\\',0.2)\r\n print('\\n\\nSparseModel ( ' + model +' x '+str(scale)+ ' ) Trainning ... ...')\r\n \r\n min_index,min_loss,sp_train_li,sp_test_li,x_test_ph_name1,test_output_name1 = net_model.trainNet(x_train,\r\n y_train,\r\n x_test,\r\n y_test,\r\n train_size,\r\n test_size,\r\n Epoch = int(10e3),\r\n iter_view = 500,\r\n saved_path = source_dir+model+'_SparseSR_x'+str(scale))\r\n WriteToSummary(model,min_index,min_loss,x_test_ph_name1,test_output_name1)\r\n \r\n \r\n train_img,label_img,test_img,test_label_img = net_model.prepareImageData(\r\n source_dir1 = source_dir+'\\\\xTrainData\\\\',source_dir2 = source_dir+'\\\\yTrainData\\\\',ratio = 0.2,scale = scale)\r\n net_model = CreateModel(model = model,lr = 1e-3,batch_size = 128)\r\n print('\\n\\nSRModel (' + model +' x '+str(scale)+ ') Trainning ... ...')\r\n min_index,min_loss,sr_train_li,sr_test_li,x_test_ph_name2,test_output_name2 = net_model.trainNet(train_img,\r\n label_img,\r\n test_img,\r\n test_label_img,\r\n train_size = train_img.shape[0],\r\n test_size = test_img.shape[0],\r\n Epoch = int(5e3),\r\n iter_view = 500,\r\n saved_path = source_dir+model+'x'+str(scale))\r\n \r\n import matplotlib.pyplot as plt\r\n plt.plot(sp_test_li,'r');plt.plot(sr_test_li,'b');\r\n plt.xlabel('Epoch:100')\r\n plt.ylabel('Loss:0.01')\r\n plt.title('Loss Curve')\r\n ",
"<docstring token>\nfrom DLL.CreateModel import CreateModel\nimport os\n<docstring token>\n\n\ndef WriteToSummary(model, min_index, min_loss, x_test_ph_name, test_output_name\n ):\n line = model + ':' + str(min_index) + ',' + str(min_loss) + '\\n'\n if not os.path.exists(model + '_Summary.txt'):\n Summary = open(model + '_Summary.txt', 'w')\n Summary.close()\n Summary = open(model + '_Summary.txt', 'r+')\n summary_content = ''\n try:\n for info in Summary:\n name_loc = info.index(model)\n name_loc = info.index(':')\n name = info[0:name_loc]\n if model == name:\n info = info.replace(info[name_loc + 1:], str(min_index) +\n ',' + str(min_loss))\n info = (info + ';Input:' + x_test_ph_name + ',Output:' +\n test_output_name + '\\n')\n summary_content += info\n else:\n summary_content += info\n Summary.close()\n Summary = open(model + '_Summary.txt', 'w+')\n Summary.write(summary_content)\n Summary.close()\n except ValueError:\n Summary.close()\n Summary = open(model + '_Summary.txt', 'a+')\n Summary.write(line)\n Summary.close()\n\n\nif __name__ == '__main__':\n model = input('Which model do u wanna choose :')\n scale = int(input('And the magnification is :'))\n source_dir = os.path.abspath(os.path.dirname(os.getcwd())) + '\\\\'\n net_model = CreateModel(model=model, lr=0.001, batch_size=128)\n x_test, y_test, x_train, y_train, train_size, test_size = (net_model.\n prepareSparseData(source_dir + 'Saprse_Train_Data\\\\', 0.2))\n print('\\n\\nSparseModel ( ' + model + ' x ' + str(scale) +\n ' ) Trainning ... ...')\n (min_index, min_loss, sp_train_li, sp_test_li, x_test_ph_name1,\n test_output_name1) = (net_model.trainNet(x_train, y_train, x_test,\n y_test, train_size, test_size, Epoch=int(10000.0), iter_view=500,\n saved_path=source_dir + model + '_SparseSR_x' + str(scale)))\n WriteToSummary(model, min_index, min_loss, x_test_ph_name1,\n test_output_name1)\n train_img, label_img, test_img, test_label_img = (net_model.\n prepareImageData(source_dir1=source_dir + '\\\\xTrainData\\\\',\n source_dir2=source_dir + '\\\\yTrainData\\\\', ratio=0.2, scale=scale))\n net_model = CreateModel(model=model, lr=0.001, batch_size=128)\n print('\\n\\nSRModel (' + model + ' x ' + str(scale) + ') Trainning ... ...')\n (min_index, min_loss, sr_train_li, sr_test_li, x_test_ph_name2,\n test_output_name2) = (net_model.trainNet(train_img, label_img,\n test_img, test_label_img, train_size=train_img.shape[0], test_size=\n test_img.shape[0], Epoch=int(5000.0), iter_view=500, saved_path=\n source_dir + model + 'x' + str(scale)))\n import matplotlib.pyplot as plt\n plt.plot(sp_test_li, 'r')\n plt.plot(sr_test_li, 'b')\n plt.xlabel('Epoch:100')\n plt.ylabel('Loss:0.01')\n plt.title('Loss Curve')\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\ndef WriteToSummary(model, min_index, min_loss, x_test_ph_name, test_output_name\n ):\n line = model + ':' + str(min_index) + ',' + str(min_loss) + '\\n'\n if not os.path.exists(model + '_Summary.txt'):\n Summary = open(model + '_Summary.txt', 'w')\n Summary.close()\n Summary = open(model + '_Summary.txt', 'r+')\n summary_content = ''\n try:\n for info in Summary:\n name_loc = info.index(model)\n name_loc = info.index(':')\n name = info[0:name_loc]\n if model == name:\n info = info.replace(info[name_loc + 1:], str(min_index) +\n ',' + str(min_loss))\n info = (info + ';Input:' + x_test_ph_name + ',Output:' +\n test_output_name + '\\n')\n summary_content += info\n else:\n summary_content += info\n Summary.close()\n Summary = open(model + '_Summary.txt', 'w+')\n Summary.write(summary_content)\n Summary.close()\n except ValueError:\n Summary.close()\n Summary = open(model + '_Summary.txt', 'a+')\n Summary.write(line)\n Summary.close()\n\n\nif __name__ == '__main__':\n model = input('Which model do u wanna choose :')\n scale = int(input('And the magnification is :'))\n source_dir = os.path.abspath(os.path.dirname(os.getcwd())) + '\\\\'\n net_model = CreateModel(model=model, lr=0.001, batch_size=128)\n x_test, y_test, x_train, y_train, train_size, test_size = (net_model.\n prepareSparseData(source_dir + 'Saprse_Train_Data\\\\', 0.2))\n print('\\n\\nSparseModel ( ' + model + ' x ' + str(scale) +\n ' ) Trainning ... ...')\n (min_index, min_loss, sp_train_li, sp_test_li, x_test_ph_name1,\n test_output_name1) = (net_model.trainNet(x_train, y_train, x_test,\n y_test, train_size, test_size, Epoch=int(10000.0), iter_view=500,\n saved_path=source_dir + model + '_SparseSR_x' + str(scale)))\n WriteToSummary(model, min_index, min_loss, x_test_ph_name1,\n test_output_name1)\n train_img, label_img, test_img, test_label_img = (net_model.\n prepareImageData(source_dir1=source_dir + '\\\\xTrainData\\\\',\n source_dir2=source_dir + '\\\\yTrainData\\\\', ratio=0.2, scale=scale))\n net_model = CreateModel(model=model, lr=0.001, batch_size=128)\n print('\\n\\nSRModel (' + model + ' x ' + str(scale) + ') Trainning ... ...')\n (min_index, min_loss, sr_train_li, sr_test_li, x_test_ph_name2,\n test_output_name2) = (net_model.trainNet(train_img, label_img,\n test_img, test_label_img, train_size=train_img.shape[0], test_size=\n test_img.shape[0], Epoch=int(5000.0), iter_view=500, saved_path=\n source_dir + model + 'x' + str(scale)))\n import matplotlib.pyplot as plt\n plt.plot(sp_test_li, 'r')\n plt.plot(sr_test_li, 'b')\n plt.xlabel('Epoch:100')\n plt.ylabel('Loss:0.01')\n plt.title('Loss Curve')\n",
"<docstring token>\n<import token>\n<docstring token>\n\n\ndef WriteToSummary(model, min_index, min_loss, x_test_ph_name, test_output_name\n ):\n line = model + ':' + str(min_index) + ',' + str(min_loss) + '\\n'\n if not os.path.exists(model + '_Summary.txt'):\n Summary = open(model + '_Summary.txt', 'w')\n Summary.close()\n Summary = open(model + '_Summary.txt', 'r+')\n summary_content = ''\n try:\n for info in Summary:\n name_loc = info.index(model)\n name_loc = info.index(':')\n name = info[0:name_loc]\n if model == name:\n info = info.replace(info[name_loc + 1:], str(min_index) +\n ',' + str(min_loss))\n info = (info + ';Input:' + x_test_ph_name + ',Output:' +\n test_output_name + '\\n')\n summary_content += info\n else:\n summary_content += info\n Summary.close()\n Summary = open(model + '_Summary.txt', 'w+')\n Summary.write(summary_content)\n Summary.close()\n except ValueError:\n Summary.close()\n Summary = open(model + '_Summary.txt', 'a+')\n Summary.write(line)\n Summary.close()\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<docstring token>\n<function token>\n<code token>\n"
] | false |
99,376 |
8ec35d8a882a52e06ee65b34ddfd971258ffc3fe
|
# coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import dbpedia
from dbpedia.api.comics_creator_api import ComicsCreatorApi # noqa: E501
from dbpedia.rest import ApiException
class TestComicsCreatorApi(unittest.TestCase):
"""ComicsCreatorApi unit test stubs"""
def setUp(self):
self.api = dbpedia.api.comics_creator_api.ComicsCreatorApi() # noqa: E501
def tearDown(self):
pass
def test_comicscreators_get(self):
"""Test case for comicscreators_get
List all instances of ComicsCreator # noqa: E501
"""
pass
def test_comicscreators_id_get(self):
"""Test case for comicscreators_id_get
Get a single ComicsCreator by its id # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"# coding: utf-8\n\n\"\"\"\n DBpedia\n\n This is the API of the DBpedia Ontology # noqa: E501\n\n The version of the OpenAPI document: v0.0.1\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nfrom __future__ import absolute_import\n\nimport unittest\n\nimport dbpedia\nfrom dbpedia.api.comics_creator_api import ComicsCreatorApi # noqa: E501\nfrom dbpedia.rest import ApiException\n\n\nclass TestComicsCreatorApi(unittest.TestCase):\n \"\"\"ComicsCreatorApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = dbpedia.api.comics_creator_api.ComicsCreatorApi() # noqa: E501\n\n def tearDown(self):\n pass\n\n def test_comicscreators_get(self):\n \"\"\"Test case for comicscreators_get\n\n List all instances of ComicsCreator # noqa: E501\n \"\"\"\n pass\n\n def test_comicscreators_id_get(self):\n \"\"\"Test case for comicscreators_id_get\n\n Get a single ComicsCreator by its id # noqa: E501\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<docstring token>\nfrom __future__ import absolute_import\nimport unittest\nimport dbpedia\nfrom dbpedia.api.comics_creator_api import ComicsCreatorApi\nfrom dbpedia.rest import ApiException\n\n\nclass TestComicsCreatorApi(unittest.TestCase):\n \"\"\"ComicsCreatorApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = dbpedia.api.comics_creator_api.ComicsCreatorApi()\n\n def tearDown(self):\n pass\n\n def test_comicscreators_get(self):\n \"\"\"Test case for comicscreators_get\n\n List all instances of ComicsCreator # noqa: E501\n \"\"\"\n pass\n\n def test_comicscreators_id_get(self):\n \"\"\"Test case for comicscreators_id_get\n\n Get a single ComicsCreator by its id # noqa: E501\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<docstring token>\n<import token>\n\n\nclass TestComicsCreatorApi(unittest.TestCase):\n \"\"\"ComicsCreatorApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = dbpedia.api.comics_creator_api.ComicsCreatorApi()\n\n def tearDown(self):\n pass\n\n def test_comicscreators_get(self):\n \"\"\"Test case for comicscreators_get\n\n List all instances of ComicsCreator # noqa: E501\n \"\"\"\n pass\n\n def test_comicscreators_id_get(self):\n \"\"\"Test case for comicscreators_id_get\n\n Get a single ComicsCreator by its id # noqa: E501\n \"\"\"\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<docstring token>\n<import token>\n\n\nclass TestComicsCreatorApi(unittest.TestCase):\n \"\"\"ComicsCreatorApi unit test stubs\"\"\"\n\n def setUp(self):\n self.api = dbpedia.api.comics_creator_api.ComicsCreatorApi()\n\n def tearDown(self):\n pass\n\n def test_comicscreators_get(self):\n \"\"\"Test case for comicscreators_get\n\n List all instances of ComicsCreator # noqa: E501\n \"\"\"\n pass\n\n def test_comicscreators_id_get(self):\n \"\"\"Test case for comicscreators_id_get\n\n Get a single ComicsCreator by its id # noqa: E501\n \"\"\"\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass TestComicsCreatorApi(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.api = dbpedia.api.comics_creator_api.ComicsCreatorApi()\n\n def tearDown(self):\n pass\n\n def test_comicscreators_get(self):\n \"\"\"Test case for comicscreators_get\n\n List all instances of ComicsCreator # noqa: E501\n \"\"\"\n pass\n\n def test_comicscreators_id_get(self):\n \"\"\"Test case for comicscreators_id_get\n\n Get a single ComicsCreator by its id # noqa: E501\n \"\"\"\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass TestComicsCreatorApi(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n self.api = dbpedia.api.comics_creator_api.ComicsCreatorApi()\n <function token>\n\n def test_comicscreators_get(self):\n \"\"\"Test case for comicscreators_get\n\n List all instances of ComicsCreator # noqa: E501\n \"\"\"\n pass\n\n def test_comicscreators_id_get(self):\n \"\"\"Test case for comicscreators_id_get\n\n Get a single ComicsCreator by its id # noqa: E501\n \"\"\"\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass TestComicsCreatorApi(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n\n def test_comicscreators_get(self):\n \"\"\"Test case for comicscreators_get\n\n List all instances of ComicsCreator # noqa: E501\n \"\"\"\n pass\n\n def test_comicscreators_id_get(self):\n \"\"\"Test case for comicscreators_id_get\n\n Get a single ComicsCreator by its id # noqa: E501\n \"\"\"\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass TestComicsCreatorApi(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n\n def test_comicscreators_get(self):\n \"\"\"Test case for comicscreators_get\n\n List all instances of ComicsCreator # noqa: E501\n \"\"\"\n pass\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass TestComicsCreatorApi(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n<code token>\n"
] | false |
99,377 |
2a96e000c79f230788aeaccf21cad3a6d5aa6f77
|
from core.effect.base import EffectBase
from core.tuning.skill import SkillTuning
from raisecap.tuning.effect import EffectTuning
from siege import game
class Regen(EffectBase):
TUNING = EffectTuning.REGEN
def __init__(self, owner, level, duration, source, isRefresh):
super(Regen, self).__init__(owner, duration, isRefresh)
self.adjustment = owner.stats.HP.getMax() * SkillTuning.REGENERATE.HEALTH_PERCENTAGES[level - 1] / 100.0
self.statUid = owner.stats.HPRegen.mod(self.adjustment)
def onRemove(self, owner):
owner.stats.HPRegen.unmod(self.statUid)
@staticmethod
def register():
game.effects.register(Regen.TUNING.NAME, Regen)
|
[
"from core.effect.base import EffectBase\nfrom core.tuning.skill import SkillTuning\nfrom raisecap.tuning.effect import EffectTuning\nfrom siege import game\n\n\nclass Regen(EffectBase):\n TUNING = EffectTuning.REGEN\n\n def __init__(self, owner, level, duration, source, isRefresh):\n super(Regen, self).__init__(owner, duration, isRefresh)\n self.adjustment = owner.stats.HP.getMax() * SkillTuning.REGENERATE.HEALTH_PERCENTAGES[level - 1] / 100.0\n self.statUid = owner.stats.HPRegen.mod(self.adjustment)\n\n def onRemove(self, owner):\n owner.stats.HPRegen.unmod(self.statUid)\n\n @staticmethod\n def register():\n game.effects.register(Regen.TUNING.NAME, Regen)\n",
"from core.effect.base import EffectBase\nfrom core.tuning.skill import SkillTuning\nfrom raisecap.tuning.effect import EffectTuning\nfrom siege import game\n\n\nclass Regen(EffectBase):\n TUNING = EffectTuning.REGEN\n\n def __init__(self, owner, level, duration, source, isRefresh):\n super(Regen, self).__init__(owner, duration, isRefresh)\n self.adjustment = owner.stats.HP.getMax(\n ) * SkillTuning.REGENERATE.HEALTH_PERCENTAGES[level - 1] / 100.0\n self.statUid = owner.stats.HPRegen.mod(self.adjustment)\n\n def onRemove(self, owner):\n owner.stats.HPRegen.unmod(self.statUid)\n\n @staticmethod\n def register():\n game.effects.register(Regen.TUNING.NAME, Regen)\n",
"<import token>\n\n\nclass Regen(EffectBase):\n TUNING = EffectTuning.REGEN\n\n def __init__(self, owner, level, duration, source, isRefresh):\n super(Regen, self).__init__(owner, duration, isRefresh)\n self.adjustment = owner.stats.HP.getMax(\n ) * SkillTuning.REGENERATE.HEALTH_PERCENTAGES[level - 1] / 100.0\n self.statUid = owner.stats.HPRegen.mod(self.adjustment)\n\n def onRemove(self, owner):\n owner.stats.HPRegen.unmod(self.statUid)\n\n @staticmethod\n def register():\n game.effects.register(Regen.TUNING.NAME, Regen)\n",
"<import token>\n\n\nclass Regen(EffectBase):\n <assignment token>\n\n def __init__(self, owner, level, duration, source, isRefresh):\n super(Regen, self).__init__(owner, duration, isRefresh)\n self.adjustment = owner.stats.HP.getMax(\n ) * SkillTuning.REGENERATE.HEALTH_PERCENTAGES[level - 1] / 100.0\n self.statUid = owner.stats.HPRegen.mod(self.adjustment)\n\n def onRemove(self, owner):\n owner.stats.HPRegen.unmod(self.statUid)\n\n @staticmethod\n def register():\n game.effects.register(Regen.TUNING.NAME, Regen)\n",
"<import token>\n\n\nclass Regen(EffectBase):\n <assignment token>\n <function token>\n\n def onRemove(self, owner):\n owner.stats.HPRegen.unmod(self.statUid)\n\n @staticmethod\n def register():\n game.effects.register(Regen.TUNING.NAME, Regen)\n",
"<import token>\n\n\nclass Regen(EffectBase):\n <assignment token>\n <function token>\n <function token>\n\n @staticmethod\n def register():\n game.effects.register(Regen.TUNING.NAME, Regen)\n",
"<import token>\n\n\nclass Regen(EffectBase):\n <assignment token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,378 |
a92affe8b0afb78a1c8610adfff9fe2d407ddb83
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from unittest import TestCase,TestSuite
from tree import Tree
from mdsdata import *
from mdsscalar import *
from mdsarray import *
import numpy as np
import random
import os
import sys
import tempfile
_tmpdir=tempfile.mkdtemp()
def setUpModule():
pass
def tearDownModule():
import shutil
shutil.rmtree(_tmpdir)
class segmentsTests(TestCase):
def setUp(self):
os.environ["seg_tree_path"]=_tmpdir
def tearDown(self):
pass
def arrayDimensionOrder(self):
ptree=Tree('seg_tree',-1,'NEW')
ptree.addNode('IMM')
ptree.write()
ptree=Tree('seg_tree',-1)
ptree.createPulse(1)
ptree=Tree('seg_tree',1)
node=ptree.getNode('IMM')
WIDTH = 640
HEIGHT =480;
currFrame=np.zeros(WIDTH*HEIGHT, dtype = np.int16);
currTime=float(0);
for i in range(0,WIDTH):
for j in range(0,HEIGHT):
currFrame[i*HEIGHT+j]=random.randint(0,255)
currTime = float(0)
startTime = Float32(currTime)
endTime = Float32(currTime)
dim = Float32Array(currTime)
segment = Int16Array(currFrame)
segment.resize([1,HEIGHT,WIDTH])
shape = segment.getShape()
node.makeSegment(startTime, endTime, dim, segment)
retShape = node.getShape()
self.assertEqual(shape[0],retShape[0])
self.assertEqual(shape[1],retShape[1])
self.assertEqual(shape[2],retShape[2])
def runTest(self):
self.arrayDimensionOrder()
def suite():
tests = ['arrayDimensionOrder']
return TestSuite(map(segmentsTests,tests))
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom unittest import TestCase,TestSuite\n\nfrom tree import Tree\nfrom mdsdata import *\nfrom mdsscalar import *\nfrom mdsarray import *\n\nimport numpy as np\nimport random\nimport os\nimport sys\n\n\n\nimport tempfile\n_tmpdir=tempfile.mkdtemp()\n\ndef setUpModule(): \n pass\n\ndef tearDownModule():\n import shutil\n shutil.rmtree(_tmpdir)\n \n\nclass segmentsTests(TestCase):\n\n def setUp(self):\n os.environ[\"seg_tree_path\"]=_tmpdir\n \n def tearDown(self):\n pass\n\n def arrayDimensionOrder(self):\n ptree=Tree('seg_tree',-1,'NEW')\n ptree.addNode('IMM')\n ptree.write()\n ptree=Tree('seg_tree',-1)\n ptree.createPulse(1)\n ptree=Tree('seg_tree',1)\n node=ptree.getNode('IMM')\n \n WIDTH = 640\n HEIGHT =480;\n currFrame=np.zeros(WIDTH*HEIGHT, dtype = np.int16);\n currTime=float(0);\n for i in range(0,WIDTH):\n for j in range(0,HEIGHT):\n currFrame[i*HEIGHT+j]=random.randint(0,255)\n currTime = float(0)\n startTime = Float32(currTime)\n endTime = Float32(currTime)\n dim = Float32Array(currTime)\n segment = Int16Array(currFrame)\n segment.resize([1,HEIGHT,WIDTH])\n shape = segment.getShape()\n node.makeSegment(startTime, endTime, dim, segment)\n retShape = node.getShape()\n \n self.assertEqual(shape[0],retShape[0])\n self.assertEqual(shape[1],retShape[1])\n self.assertEqual(shape[2],retShape[2])\n\n def runTest(self):\n self.arrayDimensionOrder()\n\n\n\ndef suite():\n tests = ['arrayDimensionOrder']\n return TestSuite(map(segmentsTests,tests))\n",
"from unittest import TestCase, TestSuite\nfrom tree import Tree\nfrom mdsdata import *\nfrom mdsscalar import *\nfrom mdsarray import *\nimport numpy as np\nimport random\nimport os\nimport sys\nimport tempfile\n_tmpdir = tempfile.mkdtemp()\n\n\ndef setUpModule():\n pass\n\n\ndef tearDownModule():\n import shutil\n shutil.rmtree(_tmpdir)\n\n\nclass segmentsTests(TestCase):\n\n def setUp(self):\n os.environ['seg_tree_path'] = _tmpdir\n\n def tearDown(self):\n pass\n\n def arrayDimensionOrder(self):\n ptree = Tree('seg_tree', -1, 'NEW')\n ptree.addNode('IMM')\n ptree.write()\n ptree = Tree('seg_tree', -1)\n ptree.createPulse(1)\n ptree = Tree('seg_tree', 1)\n node = ptree.getNode('IMM')\n WIDTH = 640\n HEIGHT = 480\n currFrame = np.zeros(WIDTH * HEIGHT, dtype=np.int16)\n currTime = float(0)\n for i in range(0, WIDTH):\n for j in range(0, HEIGHT):\n currFrame[i * HEIGHT + j] = random.randint(0, 255)\n currTime = float(0)\n startTime = Float32(currTime)\n endTime = Float32(currTime)\n dim = Float32Array(currTime)\n segment = Int16Array(currFrame)\n segment.resize([1, HEIGHT, WIDTH])\n shape = segment.getShape()\n node.makeSegment(startTime, endTime, dim, segment)\n retShape = node.getShape()\n self.assertEqual(shape[0], retShape[0])\n self.assertEqual(shape[1], retShape[1])\n self.assertEqual(shape[2], retShape[2])\n\n def runTest(self):\n self.arrayDimensionOrder()\n\n\ndef suite():\n tests = ['arrayDimensionOrder']\n return TestSuite(map(segmentsTests, tests))\n",
"<import token>\n_tmpdir = tempfile.mkdtemp()\n\n\ndef setUpModule():\n pass\n\n\ndef tearDownModule():\n import shutil\n shutil.rmtree(_tmpdir)\n\n\nclass segmentsTests(TestCase):\n\n def setUp(self):\n os.environ['seg_tree_path'] = _tmpdir\n\n def tearDown(self):\n pass\n\n def arrayDimensionOrder(self):\n ptree = Tree('seg_tree', -1, 'NEW')\n ptree.addNode('IMM')\n ptree.write()\n ptree = Tree('seg_tree', -1)\n ptree.createPulse(1)\n ptree = Tree('seg_tree', 1)\n node = ptree.getNode('IMM')\n WIDTH = 640\n HEIGHT = 480\n currFrame = np.zeros(WIDTH * HEIGHT, dtype=np.int16)\n currTime = float(0)\n for i in range(0, WIDTH):\n for j in range(0, HEIGHT):\n currFrame[i * HEIGHT + j] = random.randint(0, 255)\n currTime = float(0)\n startTime = Float32(currTime)\n endTime = Float32(currTime)\n dim = Float32Array(currTime)\n segment = Int16Array(currFrame)\n segment.resize([1, HEIGHT, WIDTH])\n shape = segment.getShape()\n node.makeSegment(startTime, endTime, dim, segment)\n retShape = node.getShape()\n self.assertEqual(shape[0], retShape[0])\n self.assertEqual(shape[1], retShape[1])\n self.assertEqual(shape[2], retShape[2])\n\n def runTest(self):\n self.arrayDimensionOrder()\n\n\ndef suite():\n tests = ['arrayDimensionOrder']\n return TestSuite(map(segmentsTests, tests))\n",
"<import token>\n<assignment token>\n\n\ndef setUpModule():\n pass\n\n\ndef tearDownModule():\n import shutil\n shutil.rmtree(_tmpdir)\n\n\nclass segmentsTests(TestCase):\n\n def setUp(self):\n os.environ['seg_tree_path'] = _tmpdir\n\n def tearDown(self):\n pass\n\n def arrayDimensionOrder(self):\n ptree = Tree('seg_tree', -1, 'NEW')\n ptree.addNode('IMM')\n ptree.write()\n ptree = Tree('seg_tree', -1)\n ptree.createPulse(1)\n ptree = Tree('seg_tree', 1)\n node = ptree.getNode('IMM')\n WIDTH = 640\n HEIGHT = 480\n currFrame = np.zeros(WIDTH * HEIGHT, dtype=np.int16)\n currTime = float(0)\n for i in range(0, WIDTH):\n for j in range(0, HEIGHT):\n currFrame[i * HEIGHT + j] = random.randint(0, 255)\n currTime = float(0)\n startTime = Float32(currTime)\n endTime = Float32(currTime)\n dim = Float32Array(currTime)\n segment = Int16Array(currFrame)\n segment.resize([1, HEIGHT, WIDTH])\n shape = segment.getShape()\n node.makeSegment(startTime, endTime, dim, segment)\n retShape = node.getShape()\n self.assertEqual(shape[0], retShape[0])\n self.assertEqual(shape[1], retShape[1])\n self.assertEqual(shape[2], retShape[2])\n\n def runTest(self):\n self.arrayDimensionOrder()\n\n\ndef suite():\n tests = ['arrayDimensionOrder']\n return TestSuite(map(segmentsTests, tests))\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef tearDownModule():\n import shutil\n shutil.rmtree(_tmpdir)\n\n\nclass segmentsTests(TestCase):\n\n def setUp(self):\n os.environ['seg_tree_path'] = _tmpdir\n\n def tearDown(self):\n pass\n\n def arrayDimensionOrder(self):\n ptree = Tree('seg_tree', -1, 'NEW')\n ptree.addNode('IMM')\n ptree.write()\n ptree = Tree('seg_tree', -1)\n ptree.createPulse(1)\n ptree = Tree('seg_tree', 1)\n node = ptree.getNode('IMM')\n WIDTH = 640\n HEIGHT = 480\n currFrame = np.zeros(WIDTH * HEIGHT, dtype=np.int16)\n currTime = float(0)\n for i in range(0, WIDTH):\n for j in range(0, HEIGHT):\n currFrame[i * HEIGHT + j] = random.randint(0, 255)\n currTime = float(0)\n startTime = Float32(currTime)\n endTime = Float32(currTime)\n dim = Float32Array(currTime)\n segment = Int16Array(currFrame)\n segment.resize([1, HEIGHT, WIDTH])\n shape = segment.getShape()\n node.makeSegment(startTime, endTime, dim, segment)\n retShape = node.getShape()\n self.assertEqual(shape[0], retShape[0])\n self.assertEqual(shape[1], retShape[1])\n self.assertEqual(shape[2], retShape[2])\n\n def runTest(self):\n self.arrayDimensionOrder()\n\n\ndef suite():\n tests = ['arrayDimensionOrder']\n return TestSuite(map(segmentsTests, tests))\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass segmentsTests(TestCase):\n\n def setUp(self):\n os.environ['seg_tree_path'] = _tmpdir\n\n def tearDown(self):\n pass\n\n def arrayDimensionOrder(self):\n ptree = Tree('seg_tree', -1, 'NEW')\n ptree.addNode('IMM')\n ptree.write()\n ptree = Tree('seg_tree', -1)\n ptree.createPulse(1)\n ptree = Tree('seg_tree', 1)\n node = ptree.getNode('IMM')\n WIDTH = 640\n HEIGHT = 480\n currFrame = np.zeros(WIDTH * HEIGHT, dtype=np.int16)\n currTime = float(0)\n for i in range(0, WIDTH):\n for j in range(0, HEIGHT):\n currFrame[i * HEIGHT + j] = random.randint(0, 255)\n currTime = float(0)\n startTime = Float32(currTime)\n endTime = Float32(currTime)\n dim = Float32Array(currTime)\n segment = Int16Array(currFrame)\n segment.resize([1, HEIGHT, WIDTH])\n shape = segment.getShape()\n node.makeSegment(startTime, endTime, dim, segment)\n retShape = node.getShape()\n self.assertEqual(shape[0], retShape[0])\n self.assertEqual(shape[1], retShape[1])\n self.assertEqual(shape[2], retShape[2])\n\n def runTest(self):\n self.arrayDimensionOrder()\n\n\ndef suite():\n tests = ['arrayDimensionOrder']\n return TestSuite(map(segmentsTests, tests))\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass segmentsTests(TestCase):\n\n def setUp(self):\n os.environ['seg_tree_path'] = _tmpdir\n\n def tearDown(self):\n pass\n\n def arrayDimensionOrder(self):\n ptree = Tree('seg_tree', -1, 'NEW')\n ptree.addNode('IMM')\n ptree.write()\n ptree = Tree('seg_tree', -1)\n ptree.createPulse(1)\n ptree = Tree('seg_tree', 1)\n node = ptree.getNode('IMM')\n WIDTH = 640\n HEIGHT = 480\n currFrame = np.zeros(WIDTH * HEIGHT, dtype=np.int16)\n currTime = float(0)\n for i in range(0, WIDTH):\n for j in range(0, HEIGHT):\n currFrame[i * HEIGHT + j] = random.randint(0, 255)\n currTime = float(0)\n startTime = Float32(currTime)\n endTime = Float32(currTime)\n dim = Float32Array(currTime)\n segment = Int16Array(currFrame)\n segment.resize([1, HEIGHT, WIDTH])\n shape = segment.getShape()\n node.makeSegment(startTime, endTime, dim, segment)\n retShape = node.getShape()\n self.assertEqual(shape[0], retShape[0])\n self.assertEqual(shape[1], retShape[1])\n self.assertEqual(shape[2], retShape[2])\n\n def runTest(self):\n self.arrayDimensionOrder()\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass segmentsTests(TestCase):\n\n def setUp(self):\n os.environ['seg_tree_path'] = _tmpdir\n\n def tearDown(self):\n pass\n\n def arrayDimensionOrder(self):\n ptree = Tree('seg_tree', -1, 'NEW')\n ptree.addNode('IMM')\n ptree.write()\n ptree = Tree('seg_tree', -1)\n ptree.createPulse(1)\n ptree = Tree('seg_tree', 1)\n node = ptree.getNode('IMM')\n WIDTH = 640\n HEIGHT = 480\n currFrame = np.zeros(WIDTH * HEIGHT, dtype=np.int16)\n currTime = float(0)\n for i in range(0, WIDTH):\n for j in range(0, HEIGHT):\n currFrame[i * HEIGHT + j] = random.randint(0, 255)\n currTime = float(0)\n startTime = Float32(currTime)\n endTime = Float32(currTime)\n dim = Float32Array(currTime)\n segment = Int16Array(currFrame)\n segment.resize([1, HEIGHT, WIDTH])\n shape = segment.getShape()\n node.makeSegment(startTime, endTime, dim, segment)\n retShape = node.getShape()\n self.assertEqual(shape[0], retShape[0])\n self.assertEqual(shape[1], retShape[1])\n self.assertEqual(shape[2], retShape[2])\n <function token>\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass segmentsTests(TestCase):\n\n def setUp(self):\n os.environ['seg_tree_path'] = _tmpdir\n <function token>\n\n def arrayDimensionOrder(self):\n ptree = Tree('seg_tree', -1, 'NEW')\n ptree.addNode('IMM')\n ptree.write()\n ptree = Tree('seg_tree', -1)\n ptree.createPulse(1)\n ptree = Tree('seg_tree', 1)\n node = ptree.getNode('IMM')\n WIDTH = 640\n HEIGHT = 480\n currFrame = np.zeros(WIDTH * HEIGHT, dtype=np.int16)\n currTime = float(0)\n for i in range(0, WIDTH):\n for j in range(0, HEIGHT):\n currFrame[i * HEIGHT + j] = random.randint(0, 255)\n currTime = float(0)\n startTime = Float32(currTime)\n endTime = Float32(currTime)\n dim = Float32Array(currTime)\n segment = Int16Array(currFrame)\n segment.resize([1, HEIGHT, WIDTH])\n shape = segment.getShape()\n node.makeSegment(startTime, endTime, dim, segment)\n retShape = node.getShape()\n self.assertEqual(shape[0], retShape[0])\n self.assertEqual(shape[1], retShape[1])\n self.assertEqual(shape[2], retShape[2])\n <function token>\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass segmentsTests(TestCase):\n <function token>\n <function token>\n\n def arrayDimensionOrder(self):\n ptree = Tree('seg_tree', -1, 'NEW')\n ptree.addNode('IMM')\n ptree.write()\n ptree = Tree('seg_tree', -1)\n ptree.createPulse(1)\n ptree = Tree('seg_tree', 1)\n node = ptree.getNode('IMM')\n WIDTH = 640\n HEIGHT = 480\n currFrame = np.zeros(WIDTH * HEIGHT, dtype=np.int16)\n currTime = float(0)\n for i in range(0, WIDTH):\n for j in range(0, HEIGHT):\n currFrame[i * HEIGHT + j] = random.randint(0, 255)\n currTime = float(0)\n startTime = Float32(currTime)\n endTime = Float32(currTime)\n dim = Float32Array(currTime)\n segment = Int16Array(currFrame)\n segment.resize([1, HEIGHT, WIDTH])\n shape = segment.getShape()\n node.makeSegment(startTime, endTime, dim, segment)\n retShape = node.getShape()\n self.assertEqual(shape[0], retShape[0])\n self.assertEqual(shape[1], retShape[1])\n self.assertEqual(shape[2], retShape[2])\n <function token>\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n\n\nclass segmentsTests(TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n<class token>\n<function token>\n"
] | false |
99,379 |
9b304ac513057336f3a98a7acaf6d61006efcdb3
|
# =============================================================================
# =============================================================================
# som_and_rf_alpha.py
# Created by Chance Haycock January 2020
#
# Similar to Interim model but with RF and SOM. We have expanded the data set
# by using training data from campaigns 1, 2, 3, 4
#
# =============================================================================
# =============================================================================
from models.model_utilities import *
# =========================================
# SOM AND RF (K2SC_ALPHA) MODEL
# =========================================
# First need to collate master table data and SOM data using training set alpha
def make_model_table():
training_epics = pd.read_csv('{}/training_sets/k2sc/c1-4_alpha.csv'.format(project_dir))['epic_number'].to_numpy()
master_c1 = pd.read_csv('{}/tables/k2sc/campaign_1_master_table.csv'.format(project_dir))
master_c2 = pd.read_csv('{}/tables/k2sc/campaign_2_master_table.csv'.format(project_dir))
master_c3 = pd.read_csv('{}/tables/k2sc/campaign_3_master_table.csv'.format(project_dir))
master_c4 = pd.read_csv('{}/tables/k2sc/campaign_4_master_table.csv'.format(project_dir))
data_master = master_c1.append(master_c2, ignore_index=True).append(master_c3, ignore_index=True).append(master_c4, ignore_index=True)
bin_columns = make_bin_columns(64)
data_master = data_master.drop(bin_columns, axis=1)
data_train = data_master[data_master['epic_number'].isin(training_epics)]
som_c1 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_1.csv'.format(project_dir))
som_c2 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_2.csv'.format(project_dir))
som_c3 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_3.csv'.format(project_dir))
som_c4 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_4.csv'.format(project_dir))
som_master = som_c1.append(som_c2, ignore_index=True).append(som_c3, ignore_index=True).append(som_c4, ignore_index=True)
som_train = som_master[som_master['epic_number'].isin(training_epics)]
train_df = data_train.merge(som_train, how='left', on='epic_number')
# Drop these for now
train_df = train_df.drop('k2_teff', axis=1).drop('k2_rad', axis=1).drop('k2_mass', axis=1)
train_df.to_csv('{}/src/models/som_and_rf_alpha/train_FULL.csv'.format(project_dir), index=False)
print('Model Table Created!')
print(len(train_df.columns))
return
# Things that I want from an overall model
# - Overall score (means and variance for stability)
# - Confusion Matrix (means and variance)
# - Feature Importance
# - Learning Curve
# - summary f_1 scores?
def SOM_and_RF_alpha():
model_label = 'alpha'
model_number = sys.argv[1]
print_model_type("SOM and Random Forest")
# Import global training data. Contains roughly 100 of each class.
training_file = "{}/src/models/som_and_rf_alpha/train.csv".format(project_dir)
df = pd.read_csv(training_file)
print("Using training file: {}".format(training_file))
# Fill empty entries. Maybe try filling these later.
df = df.fillna(-1)
# Features to be tested. Column 0 is epics.
features = df.drop('class', axis=1).drop('probability', axis=1).columns[1:len(df.columns)-2]
blank_classifier = RCF(random_state=2, class_weight='balanced')
# parameters = {'n_estimators':[300, 400, 500, 600],\
# 'min_samples_split':[2, 3, 4, 5, 6],\
# 'max_features':[4, 5, 6, 7, 8] }
parameters = {'n_estimators':[300],\
'min_samples_split':[3],\
'max_features':[4] }
evaluate_model(model_label, model_number, blank_classifier, df, parameters, features, in_cv=5, out_cv=5)
# Do learning curve analysis here
return
# =============================================================================
# MAIN
# =============================================================================
def main():
make_model_table()
# SOM_and_RF_alpha()
if __name__ == "__main__":
main()
|
[
"# =============================================================================\n# =============================================================================\n# som_and_rf_alpha.py\n# Created by Chance Haycock January 2020\n#\n# Similar to Interim model but with RF and SOM. We have expanded the data set\n# by using training data from campaigns 1, 2, 3, 4\n#\n# =============================================================================\n# =============================================================================\nfrom models.model_utilities import *\n\n# =========================================\n# SOM AND RF (K2SC_ALPHA) MODEL\n# =========================================\n\n# First need to collate master table data and SOM data using training set alpha\ndef make_model_table():\n\ttraining_epics = pd.read_csv('{}/training_sets/k2sc/c1-4_alpha.csv'.format(project_dir))['epic_number'].to_numpy()\n\n\tmaster_c1 = pd.read_csv('{}/tables/k2sc/campaign_1_master_table.csv'.format(project_dir))\n\tmaster_c2 = pd.read_csv('{}/tables/k2sc/campaign_2_master_table.csv'.format(project_dir))\n\tmaster_c3 = pd.read_csv('{}/tables/k2sc/campaign_3_master_table.csv'.format(project_dir))\n\tmaster_c4 = pd.read_csv('{}/tables/k2sc/campaign_4_master_table.csv'.format(project_dir))\n\tdata_master = master_c1.append(master_c2, ignore_index=True).append(master_c3, ignore_index=True).append(master_c4, ignore_index=True)\n\tbin_columns = make_bin_columns(64)\n\tdata_master = data_master.drop(bin_columns, axis=1)\n\tdata_train = data_master[data_master['epic_number'].isin(training_epics)]\n\n\tsom_c1 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_1.csv'.format(project_dir))\n\tsom_c2 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_2.csv'.format(project_dir))\n\tsom_c3 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_3.csv'.format(project_dir))\n\tsom_c4 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_4.csv'.format(project_dir))\n\tsom_master = som_c1.append(som_c2, ignore_index=True).append(som_c3, ignore_index=True).append(som_c4, ignore_index=True)\n\tsom_train = som_master[som_master['epic_number'].isin(training_epics)]\n\n\ttrain_df = data_train.merge(som_train, how='left', on='epic_number')\n\t# Drop these for now\n\ttrain_df = train_df.drop('k2_teff', axis=1).drop('k2_rad', axis=1).drop('k2_mass', axis=1)\n\ttrain_df.to_csv('{}/src/models/som_and_rf_alpha/train_FULL.csv'.format(project_dir), index=False)\n\tprint('Model Table Created!')\n\tprint(len(train_df.columns))\n\treturn\n\n\n# Things that I want from an overall model\n# - Overall score (means and variance for stability)\n# - Confusion Matrix (means and variance)\n# - Feature Importance\n# - Learning Curve\n# - summary f_1 scores?\n\ndef SOM_and_RF_alpha():\n\n\tmodel_label = 'alpha'\n\tmodel_number = sys.argv[1]\n\n\tprint_model_type(\"SOM and Random Forest\")\n\n\t# Import global training data. Contains roughly 100 of each class.\n\ttraining_file = \"{}/src/models/som_and_rf_alpha/train.csv\".format(project_dir)\n\tdf = pd.read_csv(training_file)\n\tprint(\"Using training file: {}\".format(training_file))\n\n\t# Fill empty entries. Maybe try filling these later.\n\tdf = df.fillna(-1)\n\n\t# Features to be tested. Column 0 is epics.\n\tfeatures = df.drop('class', axis=1).drop('probability', axis=1).columns[1:len(df.columns)-2]\n\n\tblank_classifier = RCF(random_state=2, class_weight='balanced')\n#\tparameters = {'n_estimators':[300, 400, 500, 600],\\\n#\t 'min_samples_split':[2, 3, 4, 5, 6],\\\n#\t 'max_features':[4, 5, 6, 7, 8] }\n\tparameters = {'n_estimators':[300],\\\n\t 'min_samples_split':[3],\\\n\t 'max_features':[4] }\n\tevaluate_model(model_label, model_number, blank_classifier, df, parameters, features, in_cv=5, out_cv=5)\n\n\t# Do learning curve analysis here\n\n\treturn\n\n\n# =============================================================================\n# MAIN\n# =============================================================================\n\ndef main():\n\tmake_model_table()\n#\tSOM_and_RF_alpha()\n\nif __name__ == \"__main__\":\n\tmain()\n",
"from models.model_utilities import *\n\n\ndef make_model_table():\n training_epics = pd.read_csv('{}/training_sets/k2sc/c1-4_alpha.csv'.\n format(project_dir))['epic_number'].to_numpy()\n master_c1 = pd.read_csv('{}/tables/k2sc/campaign_1_master_table.csv'.\n format(project_dir))\n master_c2 = pd.read_csv('{}/tables/k2sc/campaign_2_master_table.csv'.\n format(project_dir))\n master_c3 = pd.read_csv('{}/tables/k2sc/campaign_3_master_table.csv'.\n format(project_dir))\n master_c4 = pd.read_csv('{}/tables/k2sc/campaign_4_master_table.csv'.\n format(project_dir))\n data_master = master_c1.append(master_c2, ignore_index=True).append(\n master_c3, ignore_index=True).append(master_c4, ignore_index=True)\n bin_columns = make_bin_columns(64)\n data_master = data_master.drop(bin_columns, axis=1)\n data_train = data_master[data_master['epic_number'].isin(training_epics)]\n som_c1 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_1.csv'\n .format(project_dir))\n som_c2 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_2.csv'\n .format(project_dir))\n som_c3 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_3.csv'\n .format(project_dir))\n som_c4 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_4.csv'\n .format(project_dir))\n som_master = som_c1.append(som_c2, ignore_index=True).append(som_c3,\n ignore_index=True).append(som_c4, ignore_index=True)\n som_train = som_master[som_master['epic_number'].isin(training_epics)]\n train_df = data_train.merge(som_train, how='left', on='epic_number')\n train_df = train_df.drop('k2_teff', axis=1).drop('k2_rad', axis=1).drop(\n 'k2_mass', axis=1)\n train_df.to_csv('{}/src/models/som_and_rf_alpha/train_FULL.csv'.format(\n project_dir), index=False)\n print('Model Table Created!')\n print(len(train_df.columns))\n return\n\n\ndef SOM_and_RF_alpha():\n model_label = 'alpha'\n model_number = sys.argv[1]\n print_model_type('SOM and Random Forest')\n training_file = '{}/src/models/som_and_rf_alpha/train.csv'.format(\n project_dir)\n df = pd.read_csv(training_file)\n print('Using training file: {}'.format(training_file))\n df = df.fillna(-1)\n features = df.drop('class', axis=1).drop('probability', axis=1).columns[\n 1:len(df.columns) - 2]\n blank_classifier = RCF(random_state=2, class_weight='balanced')\n parameters = {'n_estimators': [300], 'min_samples_split': [3],\n 'max_features': [4]}\n evaluate_model(model_label, model_number, blank_classifier, df,\n parameters, features, in_cv=5, out_cv=5)\n return\n\n\ndef main():\n make_model_table()\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\ndef make_model_table():\n training_epics = pd.read_csv('{}/training_sets/k2sc/c1-4_alpha.csv'.\n format(project_dir))['epic_number'].to_numpy()\n master_c1 = pd.read_csv('{}/tables/k2sc/campaign_1_master_table.csv'.\n format(project_dir))\n master_c2 = pd.read_csv('{}/tables/k2sc/campaign_2_master_table.csv'.\n format(project_dir))\n master_c3 = pd.read_csv('{}/tables/k2sc/campaign_3_master_table.csv'.\n format(project_dir))\n master_c4 = pd.read_csv('{}/tables/k2sc/campaign_4_master_table.csv'.\n format(project_dir))\n data_master = master_c1.append(master_c2, ignore_index=True).append(\n master_c3, ignore_index=True).append(master_c4, ignore_index=True)\n bin_columns = make_bin_columns(64)\n data_master = data_master.drop(bin_columns, axis=1)\n data_train = data_master[data_master['epic_number'].isin(training_epics)]\n som_c1 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_1.csv'\n .format(project_dir))\n som_c2 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_2.csv'\n .format(project_dir))\n som_c3 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_3.csv'\n .format(project_dir))\n som_c4 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_4.csv'\n .format(project_dir))\n som_master = som_c1.append(som_c2, ignore_index=True).append(som_c3,\n ignore_index=True).append(som_c4, ignore_index=True)\n som_train = som_master[som_master['epic_number'].isin(training_epics)]\n train_df = data_train.merge(som_train, how='left', on='epic_number')\n train_df = train_df.drop('k2_teff', axis=1).drop('k2_rad', axis=1).drop(\n 'k2_mass', axis=1)\n train_df.to_csv('{}/src/models/som_and_rf_alpha/train_FULL.csv'.format(\n project_dir), index=False)\n print('Model Table Created!')\n print(len(train_df.columns))\n return\n\n\ndef SOM_and_RF_alpha():\n model_label = 'alpha'\n model_number = sys.argv[1]\n print_model_type('SOM and Random Forest')\n training_file = '{}/src/models/som_and_rf_alpha/train.csv'.format(\n project_dir)\n df = pd.read_csv(training_file)\n print('Using training file: {}'.format(training_file))\n df = df.fillna(-1)\n features = df.drop('class', axis=1).drop('probability', axis=1).columns[\n 1:len(df.columns) - 2]\n blank_classifier = RCF(random_state=2, class_weight='balanced')\n parameters = {'n_estimators': [300], 'min_samples_split': [3],\n 'max_features': [4]}\n evaluate_model(model_label, model_number, blank_classifier, df,\n parameters, features, in_cv=5, out_cv=5)\n return\n\n\ndef main():\n make_model_table()\n\n\nif __name__ == '__main__':\n main()\n",
"<import token>\n\n\ndef make_model_table():\n training_epics = pd.read_csv('{}/training_sets/k2sc/c1-4_alpha.csv'.\n format(project_dir))['epic_number'].to_numpy()\n master_c1 = pd.read_csv('{}/tables/k2sc/campaign_1_master_table.csv'.\n format(project_dir))\n master_c2 = pd.read_csv('{}/tables/k2sc/campaign_2_master_table.csv'.\n format(project_dir))\n master_c3 = pd.read_csv('{}/tables/k2sc/campaign_3_master_table.csv'.\n format(project_dir))\n master_c4 = pd.read_csv('{}/tables/k2sc/campaign_4_master_table.csv'.\n format(project_dir))\n data_master = master_c1.append(master_c2, ignore_index=True).append(\n master_c3, ignore_index=True).append(master_c4, ignore_index=True)\n bin_columns = make_bin_columns(64)\n data_master = data_master.drop(bin_columns, axis=1)\n data_train = data_master[data_master['epic_number'].isin(training_epics)]\n som_c1 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_1.csv'\n .format(project_dir))\n som_c2 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_2.csv'\n .format(project_dir))\n som_c3 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_3.csv'\n .format(project_dir))\n som_c4 = pd.read_csv('{}/som_statistics/k2sc/c1-4_alpha/campaign_4.csv'\n .format(project_dir))\n som_master = som_c1.append(som_c2, ignore_index=True).append(som_c3,\n ignore_index=True).append(som_c4, ignore_index=True)\n som_train = som_master[som_master['epic_number'].isin(training_epics)]\n train_df = data_train.merge(som_train, how='left', on='epic_number')\n train_df = train_df.drop('k2_teff', axis=1).drop('k2_rad', axis=1).drop(\n 'k2_mass', axis=1)\n train_df.to_csv('{}/src/models/som_and_rf_alpha/train_FULL.csv'.format(\n project_dir), index=False)\n print('Model Table Created!')\n print(len(train_df.columns))\n return\n\n\ndef SOM_and_RF_alpha():\n model_label = 'alpha'\n model_number = sys.argv[1]\n print_model_type('SOM and Random Forest')\n training_file = '{}/src/models/som_and_rf_alpha/train.csv'.format(\n project_dir)\n df = pd.read_csv(training_file)\n print('Using training file: {}'.format(training_file))\n df = df.fillna(-1)\n features = df.drop('class', axis=1).drop('probability', axis=1).columns[\n 1:len(df.columns) - 2]\n blank_classifier = RCF(random_state=2, class_weight='balanced')\n parameters = {'n_estimators': [300], 'min_samples_split': [3],\n 'max_features': [4]}\n evaluate_model(model_label, model_number, blank_classifier, df,\n parameters, features, in_cv=5, out_cv=5)\n return\n\n\ndef main():\n make_model_table()\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef SOM_and_RF_alpha():\n model_label = 'alpha'\n model_number = sys.argv[1]\n print_model_type('SOM and Random Forest')\n training_file = '{}/src/models/som_and_rf_alpha/train.csv'.format(\n project_dir)\n df = pd.read_csv(training_file)\n print('Using training file: {}'.format(training_file))\n df = df.fillna(-1)\n features = df.drop('class', axis=1).drop('probability', axis=1).columns[\n 1:len(df.columns) - 2]\n blank_classifier = RCF(random_state=2, class_weight='balanced')\n parameters = {'n_estimators': [300], 'min_samples_split': [3],\n 'max_features': [4]}\n evaluate_model(model_label, model_number, blank_classifier, df,\n parameters, features, in_cv=5, out_cv=5)\n return\n\n\ndef main():\n make_model_table()\n\n\n<code token>\n",
"<import token>\n<function token>\n\n\ndef SOM_and_RF_alpha():\n model_label = 'alpha'\n model_number = sys.argv[1]\n print_model_type('SOM and Random Forest')\n training_file = '{}/src/models/som_and_rf_alpha/train.csv'.format(\n project_dir)\n df = pd.read_csv(training_file)\n print('Using training file: {}'.format(training_file))\n df = df.fillna(-1)\n features = df.drop('class', axis=1).drop('probability', axis=1).columns[\n 1:len(df.columns) - 2]\n blank_classifier = RCF(random_state=2, class_weight='balanced')\n parameters = {'n_estimators': [300], 'min_samples_split': [3],\n 'max_features': [4]}\n evaluate_model(model_label, model_number, blank_classifier, df,\n parameters, features, in_cv=5, out_cv=5)\n return\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,380 |
abe3c5da35fb8905ce99392cdce6ed2961c6dd3f
|
from django.contrib import admin
from dashboard.sensors.models import Sensor
class SensorModelAdmin(admin.ModelAdmin):
list_display = ["name", "timestamp", "user", "active"]
list_filter = ["timestamp"]
search_fields = ["name"]
class Meta:
model = Sensor
admin.site.register(Sensor, SensorModelAdmin)
|
[
"from django.contrib import admin\n\nfrom dashboard.sensors.models import Sensor\n\nclass SensorModelAdmin(admin.ModelAdmin):\n list_display = [\"name\", \"timestamp\", \"user\", \"active\"]\n list_filter = [\"timestamp\"]\n search_fields = [\"name\"]\n\n class Meta:\n model = Sensor\n\nadmin.site.register(Sensor, SensorModelAdmin)\n",
"from django.contrib import admin\nfrom dashboard.sensors.models import Sensor\n\n\nclass SensorModelAdmin(admin.ModelAdmin):\n list_display = ['name', 'timestamp', 'user', 'active']\n list_filter = ['timestamp']\n search_fields = ['name']\n\n\n class Meta:\n model = Sensor\n\n\nadmin.site.register(Sensor, SensorModelAdmin)\n",
"<import token>\n\n\nclass SensorModelAdmin(admin.ModelAdmin):\n list_display = ['name', 'timestamp', 'user', 'active']\n list_filter = ['timestamp']\n search_fields = ['name']\n\n\n class Meta:\n model = Sensor\n\n\nadmin.site.register(Sensor, SensorModelAdmin)\n",
"<import token>\n\n\nclass SensorModelAdmin(admin.ModelAdmin):\n list_display = ['name', 'timestamp', 'user', 'active']\n list_filter = ['timestamp']\n search_fields = ['name']\n\n\n class Meta:\n model = Sensor\n\n\n<code token>\n",
"<import token>\n\n\nclass SensorModelAdmin(admin.ModelAdmin):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n class Meta:\n model = Sensor\n\n\n<code token>\n",
"<import token>\n<class token>\n<code token>\n"
] | false |
99,381 |
e375bdce3279b0cb51fb9140b81c1f3cf6808c25
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
from django.shortcuts import render, redirect
from django.contrib import messages
from django.core.mail import EmailMessage
from django.template.loader import get_template
import bcrypt
from .forms import *
from .models import *
# Create your views here.
def login(request):
email = request.POST['email']
password = request.POST['password']
user = User.objects.filter(email=email)
if len(user) > 0:
is_pass = bcrypt.checkpw(password.encode(), user[0].password.encode())
if is_pass:
request.session['id'] = user[0].id
messages.success(request, 'Logged In!')
return redirect('/reviews')
else:
messages.error(request, "Incorrect email and/or password")
return redirect('/login-page')
else:
messages.error(request, "User does not exist")
return redirect('/login-page')
def register(request):
errors = User.objects.validate_user(request.POST)
if len(errors):
for tag, error in errors.iteritems():
messages.error(request, error)
return redirect('/register-page')
else:
name = request.POST['name']
email = request.POST['email']
password = request.POST['password']
hashed_pw = bcrypt.hashpw(password.encode(), bcrypt.gensalt())
User.objects.create(name=name, email=email, password=hashed_pw)
messages.success(request, 'User Registered')
return redirect('/login-page')
def logout(request):
request.session.clear()
return redirect('/')
def index(request):
return render(request, 'breath/index.html')
def about(request):
return render(request, 'breath/about.html')
def register_page(request):
return render(request, 'breath/register.html')
def login_page(request):
return render(request, 'breath/login.html')
def gallery(request):
return render(request, 'breath/gallery.html')
def map(request):
return render(request, 'breath/map.html')
def reviews(request):
reviews = Review.objects.all()
context = {
"reviews": reviews
}
return render(request, 'breath/reviews.html', context)
def add_review(request):
errors = Review.objects.validate_review(request.POST)
if len(errors):
for tag, error in errors.iteritems():
messages.error(request, error)
return redirect('/reviews')
else:
user = User.objects.get(id=request.session['id'])
title = request.POST['title']
body = request.POST['body']
rating = request.POST['rating']
Review.objects.create(title=title, body=body, rating=rating, user=user)
messages.success(request, 'Review Created')
return redirect('/reviews')
def contact(request):
form_class = ContactForm
if request.method == 'POST':
form = form_class(data=request.POST)
if form.is_valid():
contact_name = request.POST.get(
'contact_name'
, '')
contact_email = request.POST.get(
'contact_email'
, '')
form_content = request.POST.get('content', '')
# Email the profile with the
# contact information
template = get_template('breath/contact_template.txt')
context = {
'contact_name': contact_name,
'contact_email': contact_email,
'form_content': form_content,
}
content = template.render(context)
email = EmailMessage(
"New contact form submission",
content,
"Your website" +'',
['[email protected]'],
headers = {'Reply-To': contact_email }
)
email.send()
return redirect('/contact')
return render(request, 'breath/contact.html', {
'form': form_class,
})
|
[
"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom django.core.validators import validate_email\nfrom django.core.exceptions import ValidationError\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import get_template\nimport bcrypt\nfrom .forms import *\nfrom .models import *\n\n# Create your views here.\n\ndef login(request):\n email = request.POST['email']\n password = request.POST['password']\n user = User.objects.filter(email=email)\n if len(user) > 0:\n is_pass = bcrypt.checkpw(password.encode(), user[0].password.encode())\n if is_pass:\n request.session['id'] = user[0].id\n messages.success(request, 'Logged In!')\n return redirect('/reviews')\n else:\n messages.error(request, \"Incorrect email and/or password\")\n return redirect('/login-page')\n else:\n messages.error(request, \"User does not exist\")\n return redirect('/login-page')\n\ndef register(request):\n errors = User.objects.validate_user(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/register-page')\n else:\n name = request.POST['name']\n email = request.POST['email']\n password = request.POST['password']\n hashed_pw = bcrypt.hashpw(password.encode(), bcrypt.gensalt())\n User.objects.create(name=name, email=email, password=hashed_pw)\n messages.success(request, 'User Registered')\n return redirect('/login-page')\n\ndef logout(request):\n request.session.clear()\n return redirect('/')\n\ndef index(request):\n return render(request, 'breath/index.html')\n\ndef about(request):\n return render(request, 'breath/about.html')\n\ndef register_page(request):\n return render(request, 'breath/register.html')\n\ndef login_page(request):\n return render(request, 'breath/login.html')\n\ndef gallery(request):\n return render(request, 'breath/gallery.html')\n\ndef map(request):\n return render(request, 'breath/map.html')\n\ndef reviews(request):\n reviews = Review.objects.all()\n context = {\n \"reviews\": reviews\n }\n return render(request, 'breath/reviews.html', context)\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get(\n 'contact_name'\n , '')\n contact_email = request.POST.get(\n 'contact_email'\n , '')\n form_content = request.POST.get('content', '')\n # Email the profile with the\n # contact information\n template = get_template('breath/contact_template.txt')\n context = {\n 'contact_name': contact_name,\n 'contact_email': contact_email,\n 'form_content': form_content,\n }\n content = template.render(context)\n email = EmailMessage(\n \"New contact form submission\",\n content,\n \"Your website\" +'',\n ['[email protected]'],\n headers = {'Reply-To': contact_email }\n )\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {\n 'form': form_class,\n })\n",
"from __future__ import unicode_literals\nfrom django.core.validators import validate_email\nfrom django.core.exceptions import ValidationError\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import get_template\nimport bcrypt\nfrom .forms import *\nfrom .models import *\n\n\ndef login(request):\n email = request.POST['email']\n password = request.POST['password']\n user = User.objects.filter(email=email)\n if len(user) > 0:\n is_pass = bcrypt.checkpw(password.encode(), user[0].password.encode())\n if is_pass:\n request.session['id'] = user[0].id\n messages.success(request, 'Logged In!')\n return redirect('/reviews')\n else:\n messages.error(request, 'Incorrect email and/or password')\n return redirect('/login-page')\n else:\n messages.error(request, 'User does not exist')\n return redirect('/login-page')\n\n\ndef register(request):\n errors = User.objects.validate_user(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/register-page')\n else:\n name = request.POST['name']\n email = request.POST['email']\n password = request.POST['password']\n hashed_pw = bcrypt.hashpw(password.encode(), bcrypt.gensalt())\n User.objects.create(name=name, email=email, password=hashed_pw)\n messages.success(request, 'User Registered')\n return redirect('/login-page')\n\n\ndef logout(request):\n request.session.clear()\n return redirect('/')\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\ndef about(request):\n return render(request, 'breath/about.html')\n\n\ndef register_page(request):\n return render(request, 'breath/register.html')\n\n\ndef login_page(request):\n return render(request, 'breath/login.html')\n\n\ndef gallery(request):\n return render(request, 'breath/gallery.html')\n\n\ndef map(request):\n return render(request, 'breath/map.html')\n\n\ndef reviews(request):\n reviews = Review.objects.all()\n context = {'reviews': reviews}\n return render(request, 'breath/reviews.html', context)\n\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n\n\ndef login(request):\n email = request.POST['email']\n password = request.POST['password']\n user = User.objects.filter(email=email)\n if len(user) > 0:\n is_pass = bcrypt.checkpw(password.encode(), user[0].password.encode())\n if is_pass:\n request.session['id'] = user[0].id\n messages.success(request, 'Logged In!')\n return redirect('/reviews')\n else:\n messages.error(request, 'Incorrect email and/or password')\n return redirect('/login-page')\n else:\n messages.error(request, 'User does not exist')\n return redirect('/login-page')\n\n\ndef register(request):\n errors = User.objects.validate_user(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/register-page')\n else:\n name = request.POST['name']\n email = request.POST['email']\n password = request.POST['password']\n hashed_pw = bcrypt.hashpw(password.encode(), bcrypt.gensalt())\n User.objects.create(name=name, email=email, password=hashed_pw)\n messages.success(request, 'User Registered')\n return redirect('/login-page')\n\n\ndef logout(request):\n request.session.clear()\n return redirect('/')\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\ndef about(request):\n return render(request, 'breath/about.html')\n\n\ndef register_page(request):\n return render(request, 'breath/register.html')\n\n\ndef login_page(request):\n return render(request, 'breath/login.html')\n\n\ndef gallery(request):\n return render(request, 'breath/gallery.html')\n\n\ndef map(request):\n return render(request, 'breath/map.html')\n\n\ndef reviews(request):\n reviews = Review.objects.all()\n context = {'reviews': reviews}\n return render(request, 'breath/reviews.html', context)\n\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n\n\ndef login(request):\n email = request.POST['email']\n password = request.POST['password']\n user = User.objects.filter(email=email)\n if len(user) > 0:\n is_pass = bcrypt.checkpw(password.encode(), user[0].password.encode())\n if is_pass:\n request.session['id'] = user[0].id\n messages.success(request, 'Logged In!')\n return redirect('/reviews')\n else:\n messages.error(request, 'Incorrect email and/or password')\n return redirect('/login-page')\n else:\n messages.error(request, 'User does not exist')\n return redirect('/login-page')\n\n\ndef register(request):\n errors = User.objects.validate_user(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/register-page')\n else:\n name = request.POST['name']\n email = request.POST['email']\n password = request.POST['password']\n hashed_pw = bcrypt.hashpw(password.encode(), bcrypt.gensalt())\n User.objects.create(name=name, email=email, password=hashed_pw)\n messages.success(request, 'User Registered')\n return redirect('/login-page')\n\n\ndef logout(request):\n request.session.clear()\n return redirect('/')\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\ndef about(request):\n return render(request, 'breath/about.html')\n\n\ndef register_page(request):\n return render(request, 'breath/register.html')\n\n\ndef login_page(request):\n return render(request, 'breath/login.html')\n\n\n<function token>\n\n\ndef map(request):\n return render(request, 'breath/map.html')\n\n\ndef reviews(request):\n reviews = Review.objects.all()\n context = {'reviews': reviews}\n return render(request, 'breath/reviews.html', context)\n\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n<function token>\n\n\ndef register(request):\n errors = User.objects.validate_user(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/register-page')\n else:\n name = request.POST['name']\n email = request.POST['email']\n password = request.POST['password']\n hashed_pw = bcrypt.hashpw(password.encode(), bcrypt.gensalt())\n User.objects.create(name=name, email=email, password=hashed_pw)\n messages.success(request, 'User Registered')\n return redirect('/login-page')\n\n\ndef logout(request):\n request.session.clear()\n return redirect('/')\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\ndef about(request):\n return render(request, 'breath/about.html')\n\n\ndef register_page(request):\n return render(request, 'breath/register.html')\n\n\ndef login_page(request):\n return render(request, 'breath/login.html')\n\n\n<function token>\n\n\ndef map(request):\n return render(request, 'breath/map.html')\n\n\ndef reviews(request):\n reviews = Review.objects.all()\n context = {'reviews': reviews}\n return render(request, 'breath/reviews.html', context)\n\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n<function token>\n\n\ndef register(request):\n errors = User.objects.validate_user(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/register-page')\n else:\n name = request.POST['name']\n email = request.POST['email']\n password = request.POST['password']\n hashed_pw = bcrypt.hashpw(password.encode(), bcrypt.gensalt())\n User.objects.create(name=name, email=email, password=hashed_pw)\n messages.success(request, 'User Registered')\n return redirect('/login-page')\n\n\ndef logout(request):\n request.session.clear()\n return redirect('/')\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\ndef about(request):\n return render(request, 'breath/about.html')\n\n\ndef register_page(request):\n return render(request, 'breath/register.html')\n\n\ndef login_page(request):\n return render(request, 'breath/login.html')\n\n\n<function token>\n\n\ndef map(request):\n return render(request, 'breath/map.html')\n\n\n<function token>\n\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n<function token>\n\n\ndef register(request):\n errors = User.objects.validate_user(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/register-page')\n else:\n name = request.POST['name']\n email = request.POST['email']\n password = request.POST['password']\n hashed_pw = bcrypt.hashpw(password.encode(), bcrypt.gensalt())\n User.objects.create(name=name, email=email, password=hashed_pw)\n messages.success(request, 'User Registered')\n return redirect('/login-page')\n\n\ndef logout(request):\n request.session.clear()\n return redirect('/')\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\ndef about(request):\n return render(request, 'breath/about.html')\n\n\ndef register_page(request):\n return render(request, 'breath/register.html')\n\n\n<function token>\n<function token>\n\n\ndef map(request):\n return render(request, 'breath/map.html')\n\n\n<function token>\n\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n<function token>\n\n\ndef register(request):\n errors = User.objects.validate_user(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/register-page')\n else:\n name = request.POST['name']\n email = request.POST['email']\n password = request.POST['password']\n hashed_pw = bcrypt.hashpw(password.encode(), bcrypt.gensalt())\n User.objects.create(name=name, email=email, password=hashed_pw)\n messages.success(request, 'User Registered')\n return redirect('/login-page')\n\n\ndef logout(request):\n request.session.clear()\n return redirect('/')\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\n<function token>\n\n\ndef register_page(request):\n return render(request, 'breath/register.html')\n\n\n<function token>\n<function token>\n\n\ndef map(request):\n return render(request, 'breath/map.html')\n\n\n<function token>\n\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n<function token>\n<function token>\n\n\ndef logout(request):\n request.session.clear()\n return redirect('/')\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\n<function token>\n\n\ndef register_page(request):\n return render(request, 'breath/register.html')\n\n\n<function token>\n<function token>\n\n\ndef map(request):\n return render(request, 'breath/map.html')\n\n\n<function token>\n\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\n<function token>\n\n\ndef register_page(request):\n return render(request, 'breath/register.html')\n\n\n<function token>\n<function token>\n\n\ndef map(request):\n return render(request, 'breath/map.html')\n\n\n<function token>\n\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\n<function token>\n\n\ndef register_page(request):\n return render(request, 'breath/register.html')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef add_review(request):\n errors = Review.objects.validate_review(request.POST)\n if len(errors):\n for tag, error in errors.iteritems():\n messages.error(request, error)\n return redirect('/reviews')\n else:\n user = User.objects.get(id=request.session['id'])\n title = request.POST['title']\n body = request.POST['body']\n rating = request.POST['rating']\n Review.objects.create(title=title, body=body, rating=rating, user=user)\n messages.success(request, 'Review Created')\n return redirect('/reviews')\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef contact(request):\n form_class = ContactForm\n if request.method == 'POST':\n form = form_class(data=request.POST)\n if form.is_valid():\n contact_name = request.POST.get('contact_name', '')\n contact_email = request.POST.get('contact_email', '')\n form_content = request.POST.get('content', '')\n template = get_template('breath/contact_template.txt')\n context = {'contact_name': contact_name, 'contact_email':\n contact_email, 'form_content': form_content}\n content = template.render(context)\n email = EmailMessage('New contact form submission', content, \n 'Your website' + '', ['[email protected]'], headers={\n 'Reply-To': contact_email})\n email.send()\n return redirect('/contact')\n return render(request, 'breath/contact.html', {'form': form_class})\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef index(request):\n return render(request, 'breath/index.html')\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,382 |
f1084c0e6b78397f870309b816df766909b40ed9
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/messenger/inject.py
from messenger import MessengerEntry
class messengerEntryProperty(property):
def __get__(self, obj, objType=None):
return MessengerEntry.g_instance
class channelsCtrlProperty(property):
def __get__(self, obj, objType=None):
return MessengerEntry.g_instance.gui.channelsCtrl
|
[
"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/messenger/inject.py\nfrom messenger import MessengerEntry\n\nclass messengerEntryProperty(property):\n\n def __get__(self, obj, objType=None):\n return MessengerEntry.g_instance\n\n\nclass channelsCtrlProperty(property):\n\n def __get__(self, obj, objType=None):\n return MessengerEntry.g_instance.gui.channelsCtrl\n",
"from messenger import MessengerEntry\n\n\nclass messengerEntryProperty(property):\n\n def __get__(self, obj, objType=None):\n return MessengerEntry.g_instance\n\n\nclass channelsCtrlProperty(property):\n\n def __get__(self, obj, objType=None):\n return MessengerEntry.g_instance.gui.channelsCtrl\n",
"<import token>\n\n\nclass messengerEntryProperty(property):\n\n def __get__(self, obj, objType=None):\n return MessengerEntry.g_instance\n\n\nclass channelsCtrlProperty(property):\n\n def __get__(self, obj, objType=None):\n return MessengerEntry.g_instance.gui.channelsCtrl\n",
"<import token>\n\n\nclass messengerEntryProperty(property):\n <function token>\n\n\nclass channelsCtrlProperty(property):\n\n def __get__(self, obj, objType=None):\n return MessengerEntry.g_instance.gui.channelsCtrl\n",
"<import token>\n<class token>\n\n\nclass channelsCtrlProperty(property):\n\n def __get__(self, obj, objType=None):\n return MessengerEntry.g_instance.gui.channelsCtrl\n",
"<import token>\n<class token>\n\n\nclass channelsCtrlProperty(property):\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
99,383 |
dbe32ed7e4a92f63970b49df5cc96c3a6d465b58
|
# this is simply a module that lets the interpreter know this directory is a Python package
from compression import *
from read import *
from date_extraction import *
from detrend import *
from raster_extraction import *
from effect_tools import *
|
[
"# this is simply a module that lets the interpreter know this directory is a Python package\n\nfrom compression import *\nfrom read import *\nfrom date_extraction import *\nfrom detrend import *\nfrom raster_extraction import *\nfrom effect_tools import *",
"from compression import *\nfrom read import *\nfrom date_extraction import *\nfrom detrend import *\nfrom raster_extraction import *\nfrom effect_tools import *\n",
"<import token>\n"
] | false |
99,384 |
dee06de3c33f0e38df08169845e6edaf1474b583
|
def merge(dict1,dict2):
return (dict2.update(dict1))
dict1={'a':10,'b':8}
dict2={'x':17,'y':20}
print(merge(dict1,dict2))
print(dict2)
|
[
"def merge(dict1,dict2):\r\n return (dict2.update(dict1))\r\ndict1={'a':10,'b':8}\r\ndict2={'x':17,'y':20}\r\nprint(merge(dict1,dict2))\r\nprint(dict2)\r\n",
"def merge(dict1, dict2):\n return dict2.update(dict1)\n\n\ndict1 = {'a': 10, 'b': 8}\ndict2 = {'x': 17, 'y': 20}\nprint(merge(dict1, dict2))\nprint(dict2)\n",
"def merge(dict1, dict2):\n return dict2.update(dict1)\n\n\n<assignment token>\nprint(merge(dict1, dict2))\nprint(dict2)\n",
"def merge(dict1, dict2):\n return dict2.update(dict1)\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<assignment token>\n<code token>\n"
] | false |
99,385 |
068f8110cbb9330d0b9f112b7c475259e42ae8b3
|
#!/usr/bin/python3
import os
from formula import formula
fullname = os.environ.get("RIT_FULLNAME")
formula.run(fullname)
|
[
"#!/usr/bin/python3\nimport os\n\nfrom formula import formula\n\nfullname = os.environ.get(\"RIT_FULLNAME\")\n\nformula.run(fullname)\n",
"import os\nfrom formula import formula\nfullname = os.environ.get('RIT_FULLNAME')\nformula.run(fullname)\n",
"<import token>\nfullname = os.environ.get('RIT_FULLNAME')\nformula.run(fullname)\n",
"<import token>\n<assignment token>\nformula.run(fullname)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
99,386 |
08218372e8fe023accea6eb2728548ea0ff8172f
|
def wrap(string, max_width):
mystring=''
for i in range(len(string)):
if i >= max_width and i % max_width == 0 and i!=0:
mystring += '\n'
mystring+=string[i]
return mystring
|
[
"def wrap(string, max_width):\n mystring=''\n for i in range(len(string)):\n if i >= max_width and i % max_width == 0 and i!=0:\n mystring += '\\n'\n mystring+=string[i]\n return mystring\n",
"def wrap(string, max_width):\n mystring = ''\n for i in range(len(string)):\n if i >= max_width and i % max_width == 0 and i != 0:\n mystring += '\\n'\n mystring += string[i]\n return mystring\n",
"<function token>\n"
] | false |
99,387 |
578d2634bc08518f38be4ad90b98a1759d5dca88
|
# -*- coding:utf-8 -*-
'''
Created on 2018年2月28日
@author: ning.lin
'''
'''
大图地址class或id有big字样 的
<div class="pho_big" id="phoBig" style="height: 640px;">
<div class="big_pic fn-clear" id="bigImg">
小图地址
<div class="pho_small_box fn-clear mt25 " id="phoSmallPic">
'''
import json
import time
from scrapy import log
from scrapy import cmdline
import scrapy
from scrapy.http import Request
from scrapy.http.request.form import FormRequest
from scrapy_redis.spiders import RedisSpider
from selenium import webdriver
from jiayuan.settings import IMAGES_STORE,USER_NAME,PASSWD
from jiayuan.items import JiayuanItem,MainItem
import redis
class jiayuan_data(RedisSpider):
pool=redis.ConnectionPool(host='127.0.0.1',port=6379,db=0,decode_responses=True) #427条记录
r = redis.StrictRedis(connection_pool=pool)
name = "jiayuan_main"
redis_key = 'jiayuan_main:start_urls'
url_base = 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d'
redis_key = "sinaspider:start_urls"
login_url = 'http://login.jiayuan.com/'#登录时的url
start_urls = []
pre_page_num = 25#每个搜索业面有25条记录
#head less模拟登录
option = webdriver.ChromeOptions()
option.add_argument('--headless')
option.add_argument("--window-size=1920,1080")
prefs={"profile.managed_default_content_settings.images":2}#禁止加载图片
option.add_experimental_option("prefs",prefs)
try:
driver = webdriver.Chrome(chrome_options=option)
except Exception as e:
driver.close()
print("spider出现了异常,关闭",str(e))
driver.get(login_url)
time.sleep(3)
driver.find_element_by_id("login_btn").click()
driver.find_element_by_id("login_email").clear()
driver.find_element_by_id("login_email").send_keys(USER_NAME) #修改为自己的用户名
driver.find_element_by_id("login_password").clear()
driver.find_element_by_id("login_password").send_keys(PASSWD) #修改为自己的密码
#登录url
#url="http://login.jiayuan.com/"
driver.find_element_by_id("login_btn").click()#点击登录按钮
cookies = driver.get_cookies()#获取cookies
for p in range(1,173649):
search_url = "http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d" %(p)
start_urls.append(search_url)
#print("start_urls",len(start_urls))
# start_urls = [
# "http://search.jiayuan.com/v2/search_v2.php",#直接搜索结果,获取个人主页的url(先不登录)
#"https://passport.jiayuan.com/dologin.php?pre_url=http://www.jiayuan.com/usercp",#登录页面post数据
# ]
'''
下载器中间件在下载器和Scrapy引擎之间,每一个request和response都会通过中间件进行处理。
在中间件中,对request进行处理的函数是process_request(request, spider)
'''
def start_requests(self):#
for url in self.start_urls:
yield Request(url=url,callback=self.get_main_info)
# yield scrapy.Request(url=search_url,callback=self.get_main_info)
# return Request(url=url,callback=self.get_main_info)
def get_main_info(self,response):#解析搜索业面的url
#info = response.body.decode("utf-8") #登录后可以查看一下登录响应信息json.loads(
# for url in self.start_urls:
time.sleep(1)
print("当前的url",response.url)
print('重新加载url')
self.driver.get(response.url)
self.driver.implicitly_wait(3)
user_list = self.driver.find_elements_by_xpath('/html//ul[@id="normal_user_container"]/li//div[@class="user_name"]/a[@class="os_stat"]')#得到多个li标签
if user_list==[]:
print("user_list为空了,解析有问题")
#print("user_list",type(user_list),user_list)
url_details = []#详情页面的url
for user in user_list:
main_url_main = user.get_attribute("href")
print("人员主页url",main_url_main)
url_details.append(main_url_main)
# self.redis_pipe.rpush("p",main_url_main)#详情页额外写入redis,也可以不写
# self.redis_pipe.execute()
print("人员详情url2",len(url_details))
if url_details!=[]:
for url in url_details:
yield Request(url=url,cookies=self.cookies,callback=self.get_details)#解析人员详细信息
# yield item
def get_details(self,response):
'''
<class 'str'>
年 龄:
26-29岁之间
身 高:
169-185厘米
民 族:
汉族
学 历:
不限
相 册:
有照片
婚姻状况:
未婚
居 住 地:
湖北十堰
诚 信:
不限
将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库
'''
pass
def parse(str1):
temp_list = str1.split('\n')
result={}
result_str=''
# temp_dict=[]#result_dict这是因为有些项目下面有多个标签,多个标签就需要合并起来
# result_dict = {}#多个dict合并后的结果
if len(temp_list)>1:#大于1说明该项下有值,否则此项未填信息
for i in range(len(temp_list)):
if i%2==0:
result[temp_list[i].replace(" ", "").replace(":", '')] = temp_list[i+1]
return result
#其他则返回str
else:
result_str = str1
return result_str
item = JiayuanItem()
self.driver.get(response.url)
self.driver.implicitly_wait(3)
print('打开浏览器')
print("当前的url",response.url)
age_info = self.driver.find_element_by_xpath('/html//h6[@class="member_name"]').text
person_id = response.url[response.url.rfind('/')+1:response.url.index('?')]
print("年龄地址信息",type(age_info),age_info)
address = self.driver.find_elements_by_xpath('/html//h6[@class="member_name"]/a')#得到多个a标签的text
str_address=''
str_sheng=address[0].get_attribute("text")
str_shi=address[1].get_attribute("text")
print("人员地址",str_sheng+'sssss'+str_shi)
'''
人个信息
'''
person_info = self.driver.find_elements_by_xpath('/html//ul[@class="member_info_list fn-clear"]')
person_dict={}
for i in person_info:
person_dict = parse(i.text)
print("个人信息",person_dict)
'''
处理item,对应mysql的person_info表
'''
item['person_id'] = person_id
item['province'] = str_sheng
item['municipal'] = str_shi
nick_name_info = self.driver.find_elements_by_xpath('/html//div[@class="member_info_r yh"]/h4')
nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index("I")]
print("昵称", nick_name)
item['nike_name'] = nick_name
item['education'] = person_dict['学历']
item['height'] = person_dict['身高']
item['buy_car'] = person_dict['购车']
item['salary'] = person_dict['月薪']
item['housing'] = person_dict['住房']
item['weight'] = person_dict['体重']
item['constellation'] = person_dict['星座']
item['nation'] = person_dict['民族']
item['zodiac'] = person_dict['属相']
item['blood_type'] = person_dict['血型']
item['age'] = age_info[0:age_info.index(',')]
print("年龄",age_info[0:age_info.index(',')])
item['address'] = str_sheng+str_shi
item['age_info'] = age_info
item['image_dir'] = nick_name+'_'+item['age']+'_'+person_id#下载的相片归类
item['url'] = response.url
#个人短语
item['introduce_oneself'] = self.driver.find_element_by_xpath('/html//div[@class="main_1000 mt15 fn-clear"]//div[@class="js_text"]').text
print("个性短语",item['introduce_oneself'])
#个性标签,有些人是没有个性标签的
#需要点击”更多“才能全部显示出来,否则只有4个
item['interest_label']=''
item['personality_label']=''
try:
#link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a')
#link_a.click()
self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a').click()
time.sleep(1)
gexing_info = self.driver.find_elements_by_xpath('/html//div[@class="test4"]//div[@class="list_a fn-clear"]')
print("aaa",type(gexing_info),gexing_info)
gexing_tag=''
for i in gexing_info:
gexing_tag += i.text
# a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text
item['personality_label'] = "".join(gexing_tag)
except Exception as e:
item['personality_label'] = '还没有填写个性元素'
print("个性",item['personality_label'])
#她的兴趣爱好有可能也是找不到的
try:
#link_a = self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more DNA_xq_more_a"]/a')
#link_a.click()
self.driver.find_element_by_xpath('/html//div[@class="d_more DNA_xq_more"]/a').click()
# self.driver.find_element_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[2]/a').click
self.driver.implicitly_wait(1)
aihao_info = self.driver.find_elements_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul')
print("bbb",type(aihao_info),aihao_info)
aihao_tag=''
for i in aihao_info:
aihao_tag += i.text
# a = item.find_element_by_xpath('div[@class="pag_list_grey_c"]').text
item['interest_label'] = "".join(aihao_tag)
except Exception as e:
item['interest_label'] = '还没有填写兴趣爱好'
print("她的兴趣爱好",item['interest_label'])
find_mate = self.driver.find_elements_by_xpath('/html//div[@class="bg_white mt15"]')
'''
择偶要求
'''
mate = find_mate[1].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]')
mate_dict={}
for i in mate:
mate_dict = parse(i.text)
item['person_id_mate'] = person_id
item['age_mate'] = mate_dict['年龄']
item['height_mate'] = mate_dict['身高']
item['nation_mate'] = mate_dict['民族']
item['education_mate'] = mate_dict['学历']
item['image_mate'] = mate_dict['相册']
item['marital_status'] = mate_dict['婚姻状况']
item['address_mate'] = mate_dict['居住地']
item['sincerity_mate'] = mate_dict['诚信']#诚信
print("择偶要求",mate_dict)
'''
生活方式
'''
life = find_mate[2].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]')
life_style={}
for i in life:
temp = parse(i.text)
if isinstance(temp,dict):
life_style.update(parse(i.text))#update就合并两个dict
else:
life_style['吸烟'] = '未填写生活方式'
life_style['饮酒'] = '未填写生活方式'
life_style['锻炼习惯'] = '未填写生活方式'
life_style['饮食习惯'] = '未填写生活方式'
life_style['逛街购物'] = '未填写生活方式'
life_style['宗教信仰'] = '未填写生活方式'
life_style['作息时间'] = '未填写生活方式'
life_style['交际圈子'] = '未填写生活方式'
life_style['最大消费'] = '未填写生活方式'
try:
housework = []
pet = []
jiawu1 = find_mate[2].find_elements_by_xpath('div[@class="js_box"]//div[@class="pt25 fn-clear"]//dd[@class="cur"]')
for i in jiawu1:
housework.append(i.text)#0为家务水平,1为宠物喜欢程度
print("家务1 ",i.text)
jiawu2 = find_mate[2].find_elements_by_xpath('div[@class="js_box"]//div[@class="fl pr"]/em')
for i in jiawu2:
pet.append(i.text)#0为家务分配,1为关于宠物
print("家务2 ",i.text)
except Exception as e:
housework.append('家务水平程度未填写')
housework.append('宠物喜欢程度未填写')
pet.append('家务分配未填写')
pet.append ('关于宠物未填写')
item['person_id_life'] = person_id
item['smoke'] = life_style['吸烟']
item['drink_wine'] = life_style['饮酒']
item['exercise_habits'] = life_style['锻炼习惯']
item['eating_habits'] = life_style['饮食习惯']
item['shopping'] = life_style['逛街购物']
item['religious_belief'] = life_style['宗教信仰']
item['time_table'] = life_style['作息时间']
item['circle_of_communication'] = life_style['交际圈子']
item['maximum_consumption'] = life_style['最大消费']
item['housework'] = housework[0]
item['household_assignment'] = pet[0]
item['pet'] = housework[1]
item['about_pets'] = pet[1]
print("生活方式",life_style)
print("家务",housework[0],pet[0])
print("宠物",housework[1],pet[1])
'''
经济实力
'''
economic_dict={}
economic = find_mate[3].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]')
for i in economic:
economic_dict = parse(i.text)
item['person_id_economic'] = person_id
item['salary_economic'] = economic_dict['月薪']
item['buy_house_economic'] = economic_dict['购房']
item['buy_car_economic'] = economic_dict['购车']
item['economic_concept'] = economic_dict['经济观念']
item['investment_financing'] = economic_dict['投资理财']
item['foreign_debt'] = economic_dict['外债贷款']
print("经济实力",economic_dict)
'''
工作学习
'''
work = find_mate[4].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]')
work_study = {}#
for i in work:
if i.text:
temp = parse(i.text)
if isinstance(temp,dict):
work_study.update(parse(i.text))#update就合并两个dict
else:
work_study['职业职位'] = '未填写工作学习方式'
work_study['公司行业'] = '未填写工作学习方式'
work_study['公司类型'] = '未填写工作学习方式'
work_study['福利待遇'] = '未填写工作学习方式'
work_study['工作状态'] = '未填写工作学习方式'
work_study['调动工作可能性'] = '未填写工作学习方式'
work_study['事业与家庭'] = '未填写工作学习方式'
work_study['海外工作可能性'] = '未填写工作学习方式'
work_study['毕业院校'] = '未填写工作学习方式'
work_study['专业类型'] = '未填写工作学习方式'
work_study['语言能力'] = '未填写工作学习方式'
item['person_id_study'] = person_id
item['position'] = work_study['职业职位']
item['company'] = work_study['公司行业']
item['company_type'] = work_study['公司类型']
item['welfare'] = work_study['福利待遇']
item['working'] = work_study['工作状态']
item['transfer_work'] = work_study['调动工作可能性']
item['work_family'] = work_study['事业与家庭']
item['overseas_job'] = work_study['海外工作可能性']
item['university'] = work_study['毕业院校']
item['major'] = work_study['专业类型']
item['language'] = work_study['语言能力']
print("工作学习",work_study)
'''
婚姻观念
'''
marriage = find_mate[5].find_elements_by_xpath('div[@class="js_box"]/ul[@class="js_list fn-clear"]')
marriage_family={}
for i in marriage:
if i.text:
temp = parse(i.text)
if isinstance(temp,dict):
marriage_family.update(parse(i.text))#update就合并两个dict
else:
marriage_family['籍贯'] = '未填写婚姻观念'
marriage_family['户口'] = '未填写婚姻观念'
marriage_family['国籍'] = '未填写婚姻观念'
marriage_family['个性待征'] = '未填写婚姻观念'
marriage_family['幽默感'] = '未填写婚姻观念'
marriage_family['脾气'] = '未填写婚姻观念'
marriage_family['对待感情'] = '未填写婚姻观念'
marriage_family['是否要小孩'] = '未填写婚姻观念'
marriage_family['何时结婚'] = '未填写婚姻观念'
marriage_family['是否能接受异地恋'] = '未填写婚姻观念'
marriage_family['理想婚姻'] = '未填写婚姻观念'
marriage_family['愿与对方父母同住'] = '未填写婚姻观念'
marriage_family['家中排行'] = '未填写婚姻观念'
marriage_family['父母情况'] = '未填写婚姻观念'
marriage_family['兄弟姐妹'] = '未填写婚姻观念'
marriage_family['父母经济情况'] = '未填写婚姻观念'
marriage_family['父母医保情况'] = '未填写婚姻观念'
marriage_family['父母的工作'] = '未填写婚姻观念'
item['person_id_marriage'] = person_id
item['address_marriage'] = marriage_family['籍贯']
item['registered_residence'] = marriage_family['户口']
item['nationality'] = marriage_family['国籍']
item['personality'] = marriage_family['个性待征']
item['humor'] = marriage_family['幽默感']
item['temper'] = marriage_family['脾气']
item['feelings'] = marriage_family['对待感情']
item['want_child'] = marriage_family['是否要小孩']
item['when_mary'] = marriage_family['何时结婚']
item['strange_love'] = marriage_family['是否能接受异地恋']
item['ideal_marriage'] = marriage_family['理想婚姻']
item['live_parents'] = marriage_family['愿与对方父母同住']
item['rankings_home'] = marriage_family['家中排行']
item['parents_situation'] = marriage_family['父母情况']
item['brothers'] = marriage_family['兄弟姐妹']
item['parents_economic'] = marriage_family['父母经济情况']
item['parents_medical'] = marriage_family['父母医保情况']
item['parents_working'] = marriage_family['父母的工作']
print("婚姻观念",marriage_family)
'''
相片列表
'''
#获取图片
print("相片url",response.url)
list_images = self.driver.find_elements_by_xpath('/html//div[@id="bigImg"]//a')
print("相片列表",type(list_images),list_images)
images= []
for i in list_images:
image = i.find_element_by_xpath('img').get_attribute("src")
images.append(image)
print("相片地址",image)
item['img_urls'] = images#保存相片地址,在person_info表中的text
print("执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后")
yield item
cmdline.execute("scrapy crawl jiayuan_main".split())
|
[
"# -*- coding:utf-8 -*-\r\n'''\r\nCreated on 2018年2月28日\r\n@author: ning.lin\r\n'''\r\n'''\r\n大图地址class或id有big字样 的\r\n<div class=\"pho_big\" id=\"phoBig\" style=\"height: 640px;\">\r\n<div class=\"big_pic fn-clear\" id=\"bigImg\">\r\n小图地址\r\n<div class=\"pho_small_box fn-clear mt25 \" id=\"phoSmallPic\">\r\n'''\r\n\r\nimport json\r\nimport time\r\n\r\nfrom scrapy import log\r\nfrom scrapy import cmdline\r\nimport scrapy\r\nfrom scrapy.http import Request\r\nfrom scrapy.http.request.form import FormRequest\r\nfrom scrapy_redis.spiders import RedisSpider\r\nfrom selenium import webdriver\r\n\r\nfrom jiayuan.settings import IMAGES_STORE,USER_NAME,PASSWD\r\nfrom jiayuan.items import JiayuanItem,MainItem\r\nimport redis \r\n\r\n\r\nclass jiayuan_data(RedisSpider):\r\n pool=redis.ConnectionPool(host='127.0.0.1',port=6379,db=0,decode_responses=True) #427条记录\r\n r = redis.StrictRedis(connection_pool=pool) \r\n name = \"jiayuan_main\"\r\n redis_key = 'jiayuan_main:start_urls'\r\n url_base = 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d'\r\n redis_key = \"sinaspider:start_urls\"\r\n login_url = 'http://login.jiayuan.com/'#登录时的url\r\n start_urls = []\r\n pre_page_num = 25#每个搜索业面有25条记录\r\n #head less模拟登录\r\n option = webdriver.ChromeOptions()\r\n option.add_argument('--headless')\r\n option.add_argument(\"--window-size=1920,1080\")\r\n prefs={\"profile.managed_default_content_settings.images\":2}#禁止加载图片\r\n option.add_experimental_option(\"prefs\",prefs)\r\n try:\r\n driver = webdriver.Chrome(chrome_options=option)\r\n except Exception as e:\r\n driver.close()\r\n print(\"spider出现了异常,关闭\",str(e))\r\n driver.get(login_url)\r\n time.sleep(3)\r\n driver.find_element_by_id(\"login_btn\").click()\r\n driver.find_element_by_id(\"login_email\").clear()\r\n driver.find_element_by_id(\"login_email\").send_keys(USER_NAME) #修改为自己的用户名\r\n driver.find_element_by_id(\"login_password\").clear()\r\n driver.find_element_by_id(\"login_password\").send_keys(PASSWD) #修改为自己的密码\r\n #登录url\r\n #url=\"http://login.jiayuan.com/\"\r\n driver.find_element_by_id(\"login_btn\").click()#点击登录按钮\r\n cookies = driver.get_cookies()#获取cookies\r\n for p in range(1,173649):\r\n search_url = \"http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d\" %(p)\r\n start_urls.append(search_url)\r\n #print(\"start_urls\",len(start_urls))\r\n# start_urls = [\r\n# \"http://search.jiayuan.com/v2/search_v2.php\",#直接搜索结果,获取个人主页的url(先不登录)\r\n #\"https://passport.jiayuan.com/dologin.php?pre_url=http://www.jiayuan.com/usercp\",#登录页面post数据\r\n# ]\r\n '''\r\n 下载器中间件在下载器和Scrapy引擎之间,每一个request和response都会通过中间件进行处理。\r\n 在中间件中,对request进行处理的函数是process_request(request, spider)\r\n '''\r\n def start_requests(self):#\r\n for url in self.start_urls:\r\n yield Request(url=url,callback=self.get_main_info)\r\n# yield scrapy.Request(url=search_url,callback=self.get_main_info)\r\n# return Request(url=url,callback=self.get_main_info)\r\n def get_main_info(self,response):#解析搜索业面的url\r\n #info = response.body.decode(\"utf-8\") #登录后可以查看一下登录响应信息json.loads(\r\n# for url in self.start_urls:\r\n time.sleep(1) \r\n print(\"当前的url\",response.url)\r\n print('重新加载url')\r\n self.driver.get(response.url)\r\n self.driver.implicitly_wait(3)\r\n user_list = self.driver.find_elements_by_xpath('/html//ul[@id=\"normal_user_container\"]/li//div[@class=\"user_name\"]/a[@class=\"os_stat\"]')#得到多个li标签\r\n if user_list==[]:\r\n print(\"user_list为空了,解析有问题\")\r\n #print(\"user_list\",type(user_list),user_list)\r\n url_details = []#详情页面的url\r\n for user in user_list:\r\n main_url_main = user.get_attribute(\"href\")\r\n print(\"人员主页url\",main_url_main)\r\n url_details.append(main_url_main)\r\n# self.redis_pipe.rpush(\"p\",main_url_main)#详情页额外写入redis,也可以不写\r\n# self.redis_pipe.execute()\r\n print(\"人员详情url2\",len(url_details))\r\n if url_details!=[]:\r\n for url in url_details:\r\n yield Request(url=url,cookies=self.cookies,callback=self.get_details)#解析人员详细信息\r\n# yield item\r\n def get_details(self,response):\r\n '''\r\n <class 'str'>\r\n 年 龄:\r\n 26-29岁之间\r\n 身 高:\r\n 169-185厘米\r\n 民 族:\r\n 汉族\r\n 学 历:\r\n 不限\r\n 相 册:\r\n 有照片\r\n 婚姻状况:\r\n 未婚\r\n 居 住 地:\r\n 湖北十堰\r\n 诚 信:\r\n 不限\r\n 将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库\r\n '''\r\n pass\r\n def parse(str1):\r\n temp_list = str1.split('\\n')\r\n result={}\r\n result_str=''\r\n# temp_dict=[]#result_dict这是因为有些项目下面有多个标签,多个标签就需要合并起来\r\n# result_dict = {}#多个dict合并后的结果\r\n if len(temp_list)>1:#大于1说明该项下有值,否则此项未填信息\r\n for i in range(len(temp_list)):\r\n if i%2==0:\r\n result[temp_list[i].replace(\" \", \"\").replace(\":\", '')] = temp_list[i+1]\r\n return result\r\n #其他则返回str\r\n else:\r\n result_str = str1\r\n return result_str\r\n \r\n \r\n item = JiayuanItem()\r\n self.driver.get(response.url)\r\n self.driver.implicitly_wait(3)\r\n print('打开浏览器')\r\n print(\"当前的url\",response.url)\r\n age_info = self.driver.find_element_by_xpath('/html//h6[@class=\"member_name\"]').text\r\n person_id = response.url[response.url.rfind('/')+1:response.url.index('?')]\r\n print(\"年龄地址信息\",type(age_info),age_info)\r\n address = self.driver.find_elements_by_xpath('/html//h6[@class=\"member_name\"]/a')#得到多个a标签的text\r\n str_address=''\r\n str_sheng=address[0].get_attribute(\"text\") \r\n str_shi=address[1].get_attribute(\"text\") \r\n print(\"人员地址\",str_sheng+'sssss'+str_shi)\r\n \r\n '''\r\n 人个信息\r\n '''\r\n person_info = self.driver.find_elements_by_xpath('/html//ul[@class=\"member_info_list fn-clear\"]')\r\n person_dict={}\r\n for i in person_info:\r\n person_dict = parse(i.text)\r\n print(\"个人信息\",person_dict)\r\n '''\r\n 处理item,对应mysql的person_info表\r\n '''\r\n item['person_id'] = person_id\r\n item['province'] = str_sheng\r\n item['municipal'] = str_shi\r\n nick_name_info = self.driver.find_elements_by_xpath('/html//div[@class=\"member_info_r yh\"]/h4')\r\n nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index(\"I\")]\r\n print(\"昵称\", nick_name)\r\n item['nike_name'] = nick_name\r\n item['education'] = person_dict['学历']\r\n item['height'] = person_dict['身高']\r\n item['buy_car'] = person_dict['购车']\r\n item['salary'] = person_dict['月薪']\r\n item['housing'] = person_dict['住房']\r\n item['weight'] = person_dict['体重']\r\n item['constellation'] = person_dict['星座']\r\n item['nation'] = person_dict['民族']\r\n item['zodiac'] = person_dict['属相']\r\n item['blood_type'] = person_dict['血型']\r\n item['age'] = age_info[0:age_info.index(',')]\r\n print(\"年龄\",age_info[0:age_info.index(',')])\r\n item['address'] = str_sheng+str_shi\r\n item['age_info'] = age_info\r\n item['image_dir'] = nick_name+'_'+item['age']+'_'+person_id#下载的相片归类\r\n item['url'] = response.url\r\n \r\n #个人短语\r\n item['introduce_oneself'] = self.driver.find_element_by_xpath('/html//div[@class=\"main_1000 mt15 fn-clear\"]//div[@class=\"js_text\"]').text\r\n print(\"个性短语\",item['introduce_oneself'])\r\n #个性标签,有些人是没有个性标签的\r\n #需要点击”更多“才能全部显示出来,否则只有4个\r\n item['interest_label']=''\r\n item['personality_label']=''\r\n try:\r\n #link_a = self.driver.find_element_by_xpath('/html//div[@class=\"d_more DNA_xq_more DNA_xq_more_a\"]/a')\r\n #link_a.click()\r\n self.driver.find_element_by_xpath('/html//div[@class=\"d_more DNA_xq_more DNA_xq_more_a\"]/a').click()\r\n time.sleep(1)\r\n gexing_info = self.driver.find_elements_by_xpath('/html//div[@class=\"test4\"]//div[@class=\"list_a fn-clear\"]')\r\n print(\"aaa\",type(gexing_info),gexing_info)\r\n gexing_tag=''\r\n for i in gexing_info:\r\n gexing_tag += i.text\r\n # a = item.find_element_by_xpath('div[@class=\"pag_list_grey_c\"]').text\r\n item['personality_label'] = \"\".join(gexing_tag)\r\n except Exception as e:\r\n item['personality_label'] = '还没有填写个性元素'\r\n print(\"个性\",item['personality_label'])\r\n #她的兴趣爱好有可能也是找不到的 \r\n try:\r\n #link_a = self.driver.find_element_by_xpath('/html//div[@class=\"d_more DNA_xq_more DNA_xq_more_a\"]/a')\r\n #link_a.click()\r\n self.driver.find_element_by_xpath('/html//div[@class=\"d_more DNA_xq_more\"]/a').click()\r\n# self.driver.find_element_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[2]/a').click\r\n self.driver.implicitly_wait(1)\r\n aihao_info = self.driver.find_elements_by_xpath('/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul')\r\n print(\"bbb\",type(aihao_info),aihao_info)\r\n aihao_tag=''\r\n for i in aihao_info:\r\n aihao_tag += i.text \r\n # a = item.find_element_by_xpath('div[@class=\"pag_list_grey_c\"]').text\r\n item['interest_label'] = \"\".join(aihao_tag)\r\n except Exception as e:\r\n item['interest_label'] = '还没有填写兴趣爱好'\r\n print(\"她的兴趣爱好\",item['interest_label'])\r\n find_mate = self.driver.find_elements_by_xpath('/html//div[@class=\"bg_white mt15\"]')\r\n '''\r\n 择偶要求\r\n '''\r\n mate = find_mate[1].find_elements_by_xpath('div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\r\n mate_dict={}\r\n for i in mate:\r\n mate_dict = parse(i.text)\r\n item['person_id_mate'] = person_id\r\n item['age_mate'] = mate_dict['年龄']\r\n item['height_mate'] = mate_dict['身高']\r\n item['nation_mate'] = mate_dict['民族']\r\n item['education_mate'] = mate_dict['学历']\r\n item['image_mate'] = mate_dict['相册']\r\n item['marital_status'] = mate_dict['婚姻状况']\r\n item['address_mate'] = mate_dict['居住地']\r\n item['sincerity_mate'] = mate_dict['诚信']#诚信\r\n print(\"择偶要求\",mate_dict)\r\n '''\r\n 生活方式\r\n '''\r\n life = find_mate[2].find_elements_by_xpath('div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\r\n life_style={}\r\n for i in life:\r\n temp = parse(i.text)\r\n if isinstance(temp,dict):\r\n life_style.update(parse(i.text))#update就合并两个dict\r\n else:\r\n life_style['吸烟'] = '未填写生活方式'\r\n life_style['饮酒'] = '未填写生活方式'\r\n life_style['锻炼习惯'] = '未填写生活方式'\r\n life_style['饮食习惯'] = '未填写生活方式'\r\n life_style['逛街购物'] = '未填写生活方式'\r\n life_style['宗教信仰'] = '未填写生活方式'\r\n life_style['作息时间'] = '未填写生活方式'\r\n life_style['交际圈子'] = '未填写生活方式'\r\n life_style['最大消费'] = '未填写生活方式'\r\n try:\r\n housework = []\r\n pet = []\r\n jiawu1 = find_mate[2].find_elements_by_xpath('div[@class=\"js_box\"]//div[@class=\"pt25 fn-clear\"]//dd[@class=\"cur\"]')\r\n for i in jiawu1:\r\n housework.append(i.text)#0为家务水平,1为宠物喜欢程度\r\n print(\"家务1 \",i.text)\r\n jiawu2 = find_mate[2].find_elements_by_xpath('div[@class=\"js_box\"]//div[@class=\"fl pr\"]/em')\r\n for i in jiawu2:\r\n pet.append(i.text)#0为家务分配,1为关于宠物\r\n print(\"家务2 \",i.text)\r\n except Exception as e:\r\n housework.append('家务水平程度未填写')\r\n housework.append('宠物喜欢程度未填写')\r\n pet.append('家务分配未填写')\r\n pet.append ('关于宠物未填写')\r\n item['person_id_life'] = person_id\r\n item['smoke'] = life_style['吸烟']\r\n item['drink_wine'] = life_style['饮酒']\r\n item['exercise_habits'] = life_style['锻炼习惯']\r\n item['eating_habits'] = life_style['饮食习惯']\r\n item['shopping'] = life_style['逛街购物']\r\n item['religious_belief'] = life_style['宗教信仰']\r\n item['time_table'] = life_style['作息时间']\r\n item['circle_of_communication'] = life_style['交际圈子']\r\n item['maximum_consumption'] = life_style['最大消费']\r\n item['housework'] = housework[0]\r\n item['household_assignment'] = pet[0]\r\n item['pet'] = housework[1]\r\n item['about_pets'] = pet[1]\r\n print(\"生活方式\",life_style)\r\n print(\"家务\",housework[0],pet[0])\r\n print(\"宠物\",housework[1],pet[1])\r\n '''\r\n 经济实力\r\n '''\r\n economic_dict={}\r\n economic = find_mate[3].find_elements_by_xpath('div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\r\n for i in economic:\r\n economic_dict = parse(i.text)\r\n item['person_id_economic'] = person_id\r\n item['salary_economic'] = economic_dict['月薪']\r\n item['buy_house_economic'] = economic_dict['购房']\r\n item['buy_car_economic'] = economic_dict['购车']\r\n item['economic_concept'] = economic_dict['经济观念']\r\n item['investment_financing'] = economic_dict['投资理财']\r\n item['foreign_debt'] = economic_dict['外债贷款']\r\n print(\"经济实力\",economic_dict)\r\n '''\r\n 工作学习\r\n '''\r\n work = find_mate[4].find_elements_by_xpath('div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\r\n work_study = {}#\r\n for i in work:\r\n if i.text:\r\n temp = parse(i.text)\r\n if isinstance(temp,dict):\r\n work_study.update(parse(i.text))#update就合并两个dict\r\n else:\r\n work_study['职业职位'] = '未填写工作学习方式'\r\n work_study['公司行业'] = '未填写工作学习方式'\r\n work_study['公司类型'] = '未填写工作学习方式'\r\n work_study['福利待遇'] = '未填写工作学习方式'\r\n work_study['工作状态'] = '未填写工作学习方式'\r\n work_study['调动工作可能性'] = '未填写工作学习方式'\r\n work_study['事业与家庭'] = '未填写工作学习方式'\r\n work_study['海外工作可能性'] = '未填写工作学习方式'\r\n work_study['毕业院校'] = '未填写工作学习方式'\r\n work_study['专业类型'] = '未填写工作学习方式'\r\n work_study['语言能力'] = '未填写工作学习方式'\r\n item['person_id_study'] = person_id\r\n item['position'] = work_study['职业职位']\r\n item['company'] = work_study['公司行业']\r\n item['company_type'] = work_study['公司类型']\r\n item['welfare'] = work_study['福利待遇']\r\n item['working'] = work_study['工作状态']\r\n item['transfer_work'] = work_study['调动工作可能性']\r\n item['work_family'] = work_study['事业与家庭']\r\n item['overseas_job'] = work_study['海外工作可能性']\r\n item['university'] = work_study['毕业院校']\r\n item['major'] = work_study['专业类型']\r\n item['language'] = work_study['语言能力']\r\n print(\"工作学习\",work_study)\r\n '''\r\n 婚姻观念\r\n '''\r\n marriage = find_mate[5].find_elements_by_xpath('div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\r\n marriage_family={}\r\n for i in marriage:\r\n if i.text:\r\n temp = parse(i.text)\r\n if isinstance(temp,dict):\r\n marriage_family.update(parse(i.text))#update就合并两个dict\r\n else:\r\n marriage_family['籍贯'] = '未填写婚姻观念'\r\n marriage_family['户口'] = '未填写婚姻观念'\r\n marriage_family['国籍'] = '未填写婚姻观念'\r\n marriage_family['个性待征'] = '未填写婚姻观念'\r\n marriage_family['幽默感'] = '未填写婚姻观念'\r\n marriage_family['脾气'] = '未填写婚姻观念'\r\n marriage_family['对待感情'] = '未填写婚姻观念'\r\n marriage_family['是否要小孩'] = '未填写婚姻观念'\r\n marriage_family['何时结婚'] = '未填写婚姻观念'\r\n marriage_family['是否能接受异地恋'] = '未填写婚姻观念'\r\n marriage_family['理想婚姻'] = '未填写婚姻观念'\r\n marriage_family['愿与对方父母同住'] = '未填写婚姻观念'\r\n marriage_family['家中排行'] = '未填写婚姻观念'\r\n marriage_family['父母情况'] = '未填写婚姻观念'\r\n marriage_family['兄弟姐妹'] = '未填写婚姻观念'\r\n marriage_family['父母经济情况'] = '未填写婚姻观念'\r\n marriage_family['父母医保情况'] = '未填写婚姻观念'\r\n marriage_family['父母的工作'] = '未填写婚姻观念'\r\n item['person_id_marriage'] = person_id\r\n item['address_marriage'] = marriage_family['籍贯']\r\n item['registered_residence'] = marriage_family['户口']\r\n item['nationality'] = marriage_family['国籍']\r\n item['personality'] = marriage_family['个性待征']\r\n item['humor'] = marriage_family['幽默感']\r\n item['temper'] = marriage_family['脾气']\r\n item['feelings'] = marriage_family['对待感情']\r\n item['want_child'] = marriage_family['是否要小孩']\r\n item['when_mary'] = marriage_family['何时结婚']\r\n item['strange_love'] = marriage_family['是否能接受异地恋']\r\n item['ideal_marriage'] = marriage_family['理想婚姻']\r\n item['live_parents'] = marriage_family['愿与对方父母同住']\r\n item['rankings_home'] = marriage_family['家中排行']\r\n item['parents_situation'] = marriage_family['父母情况']\r\n item['brothers'] = marriage_family['兄弟姐妹']\r\n item['parents_economic'] = marriage_family['父母经济情况']\r\n item['parents_medical'] = marriage_family['父母医保情况']\r\n item['parents_working'] = marriage_family['父母的工作']\r\n print(\"婚姻观念\",marriage_family)\r\n '''\r\n 相片列表\r\n '''\r\n #获取图片\r\n print(\"相片url\",response.url)\r\n list_images = self.driver.find_elements_by_xpath('/html//div[@id=\"bigImg\"]//a')\r\n print(\"相片列表\",type(list_images),list_images)\r\n images= []\r\n for i in list_images:\r\n image = i.find_element_by_xpath('img').get_attribute(\"src\")\r\n images.append(image)\r\n print(\"相片地址\",image)\r\n \r\n item['img_urls'] = images#保存相片地址,在person_info表中的text\r\n print(\"执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后\")\r\n yield item\r\ncmdline.execute(\"scrapy crawl jiayuan_main\".split())",
"<docstring token>\nimport json\nimport time\nfrom scrapy import log\nfrom scrapy import cmdline\nimport scrapy\nfrom scrapy.http import Request\nfrom scrapy.http.request.form import FormRequest\nfrom scrapy_redis.spiders import RedisSpider\nfrom selenium import webdriver\nfrom jiayuan.settings import IMAGES_STORE, USER_NAME, PASSWD\nfrom jiayuan.items import JiayuanItem, MainItem\nimport redis\n\n\nclass jiayuan_data(RedisSpider):\n pool = redis.ConnectionPool(host='127.0.0.1', port=6379, db=0,\n decode_responses=True)\n r = redis.StrictRedis(connection_pool=pool)\n name = 'jiayuan_main'\n redis_key = 'jiayuan_main:start_urls'\n url_base = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d'\n )\n redis_key = 'sinaspider:start_urls'\n login_url = 'http://login.jiayuan.com/'\n start_urls = []\n pre_page_num = 25\n option = webdriver.ChromeOptions()\n option.add_argument('--headless')\n option.add_argument('--window-size=1920,1080')\n prefs = {'profile.managed_default_content_settings.images': 2}\n option.add_experimental_option('prefs', prefs)\n try:\n driver = webdriver.Chrome(chrome_options=option)\n except Exception as e:\n driver.close()\n print('spider出现了异常,关闭', str(e))\n driver.get(login_url)\n time.sleep(3)\n driver.find_element_by_id('login_btn').click()\n driver.find_element_by_id('login_email').clear()\n driver.find_element_by_id('login_email').send_keys(USER_NAME)\n driver.find_element_by_id('login_password').clear()\n driver.find_element_by_id('login_password').send_keys(PASSWD)\n driver.find_element_by_id('login_btn').click()\n cookies = driver.get_cookies()\n for p in range(1, 173649):\n search_url = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d'\n % p)\n start_urls.append(search_url)\n \"\"\"\n 下载器中间件在下载器和Scrapy引擎之间,每一个request和response都会通过中间件进行处理。\n 在中间件中,对request进行处理的函数是process_request(request, spider)\n \"\"\"\n\n def start_requests(self):\n for url in self.start_urls:\n yield Request(url=url, callback=self.get_main_info)\n\n def get_main_info(self, response):\n time.sleep(1)\n print('当前的url', response.url)\n print('重新加载url')\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n user_list = self.driver.find_elements_by_xpath(\n '/html//ul[@id=\"normal_user_container\"]/li//div[@class=\"user_name\"]/a[@class=\"os_stat\"]'\n )\n if user_list == []:\n print('user_list为空了,解析有问题')\n url_details = []\n for user in user_list:\n main_url_main = user.get_attribute('href')\n print('人员主页url', main_url_main)\n url_details.append(main_url_main)\n print('人员详情url2', len(url_details))\n if url_details != []:\n for url in url_details:\n yield Request(url=url, cookies=self.cookies, callback=self.\n get_details)\n\n def get_details(self, response):\n \"\"\"\n <class 'str'>\n 年 龄:\n 26-29岁之间\n 身 高:\n 169-185厘米\n 民 族:\n 汉族\n 学 历:\n 不限\n 相 册:\n 有照片\n 婚姻状况:\n 未婚\n 居 住 地:\n 湖北十堰\n 诚 信:\n 不限\n 将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库\n \"\"\"\n pass\n\n def parse(str1):\n temp_list = str1.split('\\n')\n result = {}\n result_str = ''\n if len(temp_list) > 1:\n for i in range(len(temp_list)):\n if i % 2 == 0:\n result[temp_list[i].replace(' ', '').replace(':', '')\n ] = temp_list[i + 1]\n return result\n else:\n result_str = str1\n return result_str\n item = JiayuanItem()\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n print('打开浏览器')\n print('当前的url', response.url)\n age_info = self.driver.find_element_by_xpath(\n '/html//h6[@class=\"member_name\"]').text\n person_id = response.url[response.url.rfind('/') + 1:response.url.\n index('?')]\n print('年龄地址信息', type(age_info), age_info)\n address = self.driver.find_elements_by_xpath(\n '/html//h6[@class=\"member_name\"]/a')\n str_address = ''\n str_sheng = address[0].get_attribute('text')\n str_shi = address[1].get_attribute('text')\n print('人员地址', str_sheng + 'sssss' + str_shi)\n \"\"\"\n 人个信息\n \"\"\"\n person_info = self.driver.find_elements_by_xpath(\n '/html//ul[@class=\"member_info_list fn-clear\"]')\n person_dict = {}\n for i in person_info:\n person_dict = parse(i.text)\n print('个人信息', person_dict)\n \"\"\"\n 处理item,对应mysql的person_info表\n \"\"\"\n item['person_id'] = person_id\n item['province'] = str_sheng\n item['municipal'] = str_shi\n nick_name_info = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"member_info_r yh\"]/h4')\n nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index('I')]\n print('昵称', nick_name)\n item['nike_name'] = nick_name\n item['education'] = person_dict['学历']\n item['height'] = person_dict['身高']\n item['buy_car'] = person_dict['购车']\n item['salary'] = person_dict['月薪']\n item['housing'] = person_dict['住房']\n item['weight'] = person_dict['体重']\n item['constellation'] = person_dict['星座']\n item['nation'] = person_dict['民族']\n item['zodiac'] = person_dict['属相']\n item['blood_type'] = person_dict['血型']\n item['age'] = age_info[0:age_info.index(',')]\n print('年龄', age_info[0:age_info.index(',')])\n item['address'] = str_sheng + str_shi\n item['age_info'] = age_info\n item['image_dir'] = nick_name + '_' + item['age'] + '_' + person_id\n item['url'] = response.url\n item['introduce_oneself'] = self.driver.find_element_by_xpath(\n '/html//div[@class=\"main_1000 mt15 fn-clear\"]//div[@class=\"js_text\"]'\n ).text\n print('个性短语', item['introduce_oneself'])\n item['interest_label'] = ''\n item['personality_label'] = ''\n try:\n self.driver.find_element_by_xpath(\n '/html//div[@class=\"d_more DNA_xq_more DNA_xq_more_a\"]/a'\n ).click()\n time.sleep(1)\n gexing_info = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"test4\"]//div[@class=\"list_a fn-clear\"]')\n print('aaa', type(gexing_info), gexing_info)\n gexing_tag = ''\n for i in gexing_info:\n gexing_tag += i.text\n item['personality_label'] = ''.join(gexing_tag)\n except Exception as e:\n item['personality_label'] = '还没有填写个性元素'\n print('个性', item['personality_label'])\n try:\n self.driver.find_element_by_xpath(\n '/html//div[@class=\"d_more DNA_xq_more\"]/a').click()\n self.driver.implicitly_wait(1)\n aihao_info = self.driver.find_elements_by_xpath(\n '/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul')\n print('bbb', type(aihao_info), aihao_info)\n aihao_tag = ''\n for i in aihao_info:\n aihao_tag += i.text\n item['interest_label'] = ''.join(aihao_tag)\n except Exception as e:\n item['interest_label'] = '还没有填写兴趣爱好'\n print('她的兴趣爱好', item['interest_label'])\n find_mate = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"bg_white mt15\"]')\n \"\"\"\n 择偶要求\n \"\"\"\n mate = find_mate[1].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n mate_dict = {}\n for i in mate:\n mate_dict = parse(i.text)\n item['person_id_mate'] = person_id\n item['age_mate'] = mate_dict['年龄']\n item['height_mate'] = mate_dict['身高']\n item['nation_mate'] = mate_dict['民族']\n item['education_mate'] = mate_dict['学历']\n item['image_mate'] = mate_dict['相册']\n item['marital_status'] = mate_dict['婚姻状况']\n item['address_mate'] = mate_dict['居住地']\n item['sincerity_mate'] = mate_dict['诚信']\n print('择偶要求', mate_dict)\n \"\"\"\n 生活方式\n \"\"\"\n life = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n life_style = {}\n for i in life:\n temp = parse(i.text)\n if isinstance(temp, dict):\n life_style.update(parse(i.text))\n else:\n life_style['吸烟'] = '未填写生活方式'\n life_style['饮酒'] = '未填写生活方式'\n life_style['锻炼习惯'] = '未填写生活方式'\n life_style['饮食习惯'] = '未填写生活方式'\n life_style['逛街购物'] = '未填写生活方式'\n life_style['宗教信仰'] = '未填写生活方式'\n life_style['作息时间'] = '未填写生活方式'\n life_style['交际圈子'] = '未填写生活方式'\n life_style['最大消费'] = '未填写生活方式'\n try:\n housework = []\n pet = []\n jiawu1 = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]//div[@class=\"pt25 fn-clear\"]//dd[@class=\"cur\"]'\n )\n for i in jiawu1:\n housework.append(i.text)\n print('家务1 ', i.text)\n jiawu2 = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]//div[@class=\"fl pr\"]/em')\n for i in jiawu2:\n pet.append(i.text)\n print('家务2 ', i.text)\n except Exception as e:\n housework.append('家务水平程度未填写')\n housework.append('宠物喜欢程度未填写')\n pet.append('家务分配未填写')\n pet.append('关于宠物未填写')\n item['person_id_life'] = person_id\n item['smoke'] = life_style['吸烟']\n item['drink_wine'] = life_style['饮酒']\n item['exercise_habits'] = life_style['锻炼习惯']\n item['eating_habits'] = life_style['饮食习惯']\n item['shopping'] = life_style['逛街购物']\n item['religious_belief'] = life_style['宗教信仰']\n item['time_table'] = life_style['作息时间']\n item['circle_of_communication'] = life_style['交际圈子']\n item['maximum_consumption'] = life_style['最大消费']\n item['housework'] = housework[0]\n item['household_assignment'] = pet[0]\n item['pet'] = housework[1]\n item['about_pets'] = pet[1]\n print('生活方式', life_style)\n print('家务', housework[0], pet[0])\n print('宠物', housework[1], pet[1])\n \"\"\"\n 经济实力\n \"\"\"\n economic_dict = {}\n economic = find_mate[3].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n for i in economic:\n economic_dict = parse(i.text)\n item['person_id_economic'] = person_id\n item['salary_economic'] = economic_dict['月薪']\n item['buy_house_economic'] = economic_dict['购房']\n item['buy_car_economic'] = economic_dict['购车']\n item['economic_concept'] = economic_dict['经济观念']\n item['investment_financing'] = economic_dict['投资理财']\n item['foreign_debt'] = economic_dict['外债贷款']\n print('经济实力', economic_dict)\n \"\"\"\n 工作学习\n \"\"\"\n work = find_mate[4].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n work_study = {}\n for i in work:\n if i.text:\n temp = parse(i.text)\n if isinstance(temp, dict):\n work_study.update(parse(i.text))\n else:\n work_study['职业职位'] = '未填写工作学习方式'\n work_study['公司行业'] = '未填写工作学习方式'\n work_study['公司类型'] = '未填写工作学习方式'\n work_study['福利待遇'] = '未填写工作学习方式'\n work_study['工作状态'] = '未填写工作学习方式'\n work_study['调动工作可能性'] = '未填写工作学习方式'\n work_study['事业与家庭'] = '未填写工作学习方式'\n work_study['海外工作可能性'] = '未填写工作学习方式'\n work_study['毕业院校'] = '未填写工作学习方式'\n work_study['专业类型'] = '未填写工作学习方式'\n work_study['语言能力'] = '未填写工作学习方式'\n item['person_id_study'] = person_id\n item['position'] = work_study['职业职位']\n item['company'] = work_study['公司行业']\n item['company_type'] = work_study['公司类型']\n item['welfare'] = work_study['福利待遇']\n item['working'] = work_study['工作状态']\n item['transfer_work'] = work_study['调动工作可能性']\n item['work_family'] = work_study['事业与家庭']\n item['overseas_job'] = work_study['海外工作可能性']\n item['university'] = work_study['毕业院校']\n item['major'] = work_study['专业类型']\n item['language'] = work_study['语言能力']\n print('工作学习', work_study)\n \"\"\"\n 婚姻观念\n \"\"\"\n marriage = find_mate[5].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n marriage_family = {}\n for i in marriage:\n if i.text:\n temp = parse(i.text)\n if isinstance(temp, dict):\n marriage_family.update(parse(i.text))\n else:\n marriage_family['籍贯'] = '未填写婚姻观念'\n marriage_family['户口'] = '未填写婚姻观念'\n marriage_family['国籍'] = '未填写婚姻观念'\n marriage_family['个性待征'] = '未填写婚姻观念'\n marriage_family['幽默感'] = '未填写婚姻观念'\n marriage_family['脾气'] = '未填写婚姻观念'\n marriage_family['对待感情'] = '未填写婚姻观念'\n marriage_family['是否要小孩'] = '未填写婚姻观念'\n marriage_family['何时结婚'] = '未填写婚姻观念'\n marriage_family['是否能接受异地恋'] = '未填写婚姻观念'\n marriage_family['理想婚姻'] = '未填写婚姻观念'\n marriage_family['愿与对方父母同住'] = '未填写婚姻观念'\n marriage_family['家中排行'] = '未填写婚姻观念'\n marriage_family['父母情况'] = '未填写婚姻观念'\n marriage_family['兄弟姐妹'] = '未填写婚姻观念'\n marriage_family['父母经济情况'] = '未填写婚姻观念'\n marriage_family['父母医保情况'] = '未填写婚姻观念'\n marriage_family['父母的工作'] = '未填写婚姻观念'\n item['person_id_marriage'] = person_id\n item['address_marriage'] = marriage_family['籍贯']\n item['registered_residence'] = marriage_family['户口']\n item['nationality'] = marriage_family['国籍']\n item['personality'] = marriage_family['个性待征']\n item['humor'] = marriage_family['幽默感']\n item['temper'] = marriage_family['脾气']\n item['feelings'] = marriage_family['对待感情']\n item['want_child'] = marriage_family['是否要小孩']\n item['when_mary'] = marriage_family['何时结婚']\n item['strange_love'] = marriage_family['是否能接受异地恋']\n item['ideal_marriage'] = marriage_family['理想婚姻']\n item['live_parents'] = marriage_family['愿与对方父母同住']\n item['rankings_home'] = marriage_family['家中排行']\n item['parents_situation'] = marriage_family['父母情况']\n item['brothers'] = marriage_family['兄弟姐妹']\n item['parents_economic'] = marriage_family['父母经济情况']\n item['parents_medical'] = marriage_family['父母医保情况']\n item['parents_working'] = marriage_family['父母的工作']\n print('婚姻观念', marriage_family)\n \"\"\"\n 相片列表\n \"\"\"\n print('相片url', response.url)\n list_images = self.driver.find_elements_by_xpath(\n '/html//div[@id=\"bigImg\"]//a')\n print('相片列表', type(list_images), list_images)\n images = []\n for i in list_images:\n image = i.find_element_by_xpath('img').get_attribute('src')\n images.append(image)\n print('相片地址', image)\n item['img_urls'] = images\n print('执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后')\n yield item\n\n\ncmdline.execute('scrapy crawl jiayuan_main'.split())\n",
"<docstring token>\n<import token>\n\n\nclass jiayuan_data(RedisSpider):\n pool = redis.ConnectionPool(host='127.0.0.1', port=6379, db=0,\n decode_responses=True)\n r = redis.StrictRedis(connection_pool=pool)\n name = 'jiayuan_main'\n redis_key = 'jiayuan_main:start_urls'\n url_base = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d'\n )\n redis_key = 'sinaspider:start_urls'\n login_url = 'http://login.jiayuan.com/'\n start_urls = []\n pre_page_num = 25\n option = webdriver.ChromeOptions()\n option.add_argument('--headless')\n option.add_argument('--window-size=1920,1080')\n prefs = {'profile.managed_default_content_settings.images': 2}\n option.add_experimental_option('prefs', prefs)\n try:\n driver = webdriver.Chrome(chrome_options=option)\n except Exception as e:\n driver.close()\n print('spider出现了异常,关闭', str(e))\n driver.get(login_url)\n time.sleep(3)\n driver.find_element_by_id('login_btn').click()\n driver.find_element_by_id('login_email').clear()\n driver.find_element_by_id('login_email').send_keys(USER_NAME)\n driver.find_element_by_id('login_password').clear()\n driver.find_element_by_id('login_password').send_keys(PASSWD)\n driver.find_element_by_id('login_btn').click()\n cookies = driver.get_cookies()\n for p in range(1, 173649):\n search_url = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d'\n % p)\n start_urls.append(search_url)\n \"\"\"\n 下载器中间件在下载器和Scrapy引擎之间,每一个request和response都会通过中间件进行处理。\n 在中间件中,对request进行处理的函数是process_request(request, spider)\n \"\"\"\n\n def start_requests(self):\n for url in self.start_urls:\n yield Request(url=url, callback=self.get_main_info)\n\n def get_main_info(self, response):\n time.sleep(1)\n print('当前的url', response.url)\n print('重新加载url')\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n user_list = self.driver.find_elements_by_xpath(\n '/html//ul[@id=\"normal_user_container\"]/li//div[@class=\"user_name\"]/a[@class=\"os_stat\"]'\n )\n if user_list == []:\n print('user_list为空了,解析有问题')\n url_details = []\n for user in user_list:\n main_url_main = user.get_attribute('href')\n print('人员主页url', main_url_main)\n url_details.append(main_url_main)\n print('人员详情url2', len(url_details))\n if url_details != []:\n for url in url_details:\n yield Request(url=url, cookies=self.cookies, callback=self.\n get_details)\n\n def get_details(self, response):\n \"\"\"\n <class 'str'>\n 年 龄:\n 26-29岁之间\n 身 高:\n 169-185厘米\n 民 族:\n 汉族\n 学 历:\n 不限\n 相 册:\n 有照片\n 婚姻状况:\n 未婚\n 居 住 地:\n 湖北十堰\n 诚 信:\n 不限\n 将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库\n \"\"\"\n pass\n\n def parse(str1):\n temp_list = str1.split('\\n')\n result = {}\n result_str = ''\n if len(temp_list) > 1:\n for i in range(len(temp_list)):\n if i % 2 == 0:\n result[temp_list[i].replace(' ', '').replace(':', '')\n ] = temp_list[i + 1]\n return result\n else:\n result_str = str1\n return result_str\n item = JiayuanItem()\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n print('打开浏览器')\n print('当前的url', response.url)\n age_info = self.driver.find_element_by_xpath(\n '/html//h6[@class=\"member_name\"]').text\n person_id = response.url[response.url.rfind('/') + 1:response.url.\n index('?')]\n print('年龄地址信息', type(age_info), age_info)\n address = self.driver.find_elements_by_xpath(\n '/html//h6[@class=\"member_name\"]/a')\n str_address = ''\n str_sheng = address[0].get_attribute('text')\n str_shi = address[1].get_attribute('text')\n print('人员地址', str_sheng + 'sssss' + str_shi)\n \"\"\"\n 人个信息\n \"\"\"\n person_info = self.driver.find_elements_by_xpath(\n '/html//ul[@class=\"member_info_list fn-clear\"]')\n person_dict = {}\n for i in person_info:\n person_dict = parse(i.text)\n print('个人信息', person_dict)\n \"\"\"\n 处理item,对应mysql的person_info表\n \"\"\"\n item['person_id'] = person_id\n item['province'] = str_sheng\n item['municipal'] = str_shi\n nick_name_info = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"member_info_r yh\"]/h4')\n nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index('I')]\n print('昵称', nick_name)\n item['nike_name'] = nick_name\n item['education'] = person_dict['学历']\n item['height'] = person_dict['身高']\n item['buy_car'] = person_dict['购车']\n item['salary'] = person_dict['月薪']\n item['housing'] = person_dict['住房']\n item['weight'] = person_dict['体重']\n item['constellation'] = person_dict['星座']\n item['nation'] = person_dict['民族']\n item['zodiac'] = person_dict['属相']\n item['blood_type'] = person_dict['血型']\n item['age'] = age_info[0:age_info.index(',')]\n print('年龄', age_info[0:age_info.index(',')])\n item['address'] = str_sheng + str_shi\n item['age_info'] = age_info\n item['image_dir'] = nick_name + '_' + item['age'] + '_' + person_id\n item['url'] = response.url\n item['introduce_oneself'] = self.driver.find_element_by_xpath(\n '/html//div[@class=\"main_1000 mt15 fn-clear\"]//div[@class=\"js_text\"]'\n ).text\n print('个性短语', item['introduce_oneself'])\n item['interest_label'] = ''\n item['personality_label'] = ''\n try:\n self.driver.find_element_by_xpath(\n '/html//div[@class=\"d_more DNA_xq_more DNA_xq_more_a\"]/a'\n ).click()\n time.sleep(1)\n gexing_info = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"test4\"]//div[@class=\"list_a fn-clear\"]')\n print('aaa', type(gexing_info), gexing_info)\n gexing_tag = ''\n for i in gexing_info:\n gexing_tag += i.text\n item['personality_label'] = ''.join(gexing_tag)\n except Exception as e:\n item['personality_label'] = '还没有填写个性元素'\n print('个性', item['personality_label'])\n try:\n self.driver.find_element_by_xpath(\n '/html//div[@class=\"d_more DNA_xq_more\"]/a').click()\n self.driver.implicitly_wait(1)\n aihao_info = self.driver.find_elements_by_xpath(\n '/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul')\n print('bbb', type(aihao_info), aihao_info)\n aihao_tag = ''\n for i in aihao_info:\n aihao_tag += i.text\n item['interest_label'] = ''.join(aihao_tag)\n except Exception as e:\n item['interest_label'] = '还没有填写兴趣爱好'\n print('她的兴趣爱好', item['interest_label'])\n find_mate = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"bg_white mt15\"]')\n \"\"\"\n 择偶要求\n \"\"\"\n mate = find_mate[1].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n mate_dict = {}\n for i in mate:\n mate_dict = parse(i.text)\n item['person_id_mate'] = person_id\n item['age_mate'] = mate_dict['年龄']\n item['height_mate'] = mate_dict['身高']\n item['nation_mate'] = mate_dict['民族']\n item['education_mate'] = mate_dict['学历']\n item['image_mate'] = mate_dict['相册']\n item['marital_status'] = mate_dict['婚姻状况']\n item['address_mate'] = mate_dict['居住地']\n item['sincerity_mate'] = mate_dict['诚信']\n print('择偶要求', mate_dict)\n \"\"\"\n 生活方式\n \"\"\"\n life = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n life_style = {}\n for i in life:\n temp = parse(i.text)\n if isinstance(temp, dict):\n life_style.update(parse(i.text))\n else:\n life_style['吸烟'] = '未填写生活方式'\n life_style['饮酒'] = '未填写生活方式'\n life_style['锻炼习惯'] = '未填写生活方式'\n life_style['饮食习惯'] = '未填写生活方式'\n life_style['逛街购物'] = '未填写生活方式'\n life_style['宗教信仰'] = '未填写生活方式'\n life_style['作息时间'] = '未填写生活方式'\n life_style['交际圈子'] = '未填写生活方式'\n life_style['最大消费'] = '未填写生活方式'\n try:\n housework = []\n pet = []\n jiawu1 = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]//div[@class=\"pt25 fn-clear\"]//dd[@class=\"cur\"]'\n )\n for i in jiawu1:\n housework.append(i.text)\n print('家务1 ', i.text)\n jiawu2 = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]//div[@class=\"fl pr\"]/em')\n for i in jiawu2:\n pet.append(i.text)\n print('家务2 ', i.text)\n except Exception as e:\n housework.append('家务水平程度未填写')\n housework.append('宠物喜欢程度未填写')\n pet.append('家务分配未填写')\n pet.append('关于宠物未填写')\n item['person_id_life'] = person_id\n item['smoke'] = life_style['吸烟']\n item['drink_wine'] = life_style['饮酒']\n item['exercise_habits'] = life_style['锻炼习惯']\n item['eating_habits'] = life_style['饮食习惯']\n item['shopping'] = life_style['逛街购物']\n item['religious_belief'] = life_style['宗教信仰']\n item['time_table'] = life_style['作息时间']\n item['circle_of_communication'] = life_style['交际圈子']\n item['maximum_consumption'] = life_style['最大消费']\n item['housework'] = housework[0]\n item['household_assignment'] = pet[0]\n item['pet'] = housework[1]\n item['about_pets'] = pet[1]\n print('生活方式', life_style)\n print('家务', housework[0], pet[0])\n print('宠物', housework[1], pet[1])\n \"\"\"\n 经济实力\n \"\"\"\n economic_dict = {}\n economic = find_mate[3].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n for i in economic:\n economic_dict = parse(i.text)\n item['person_id_economic'] = person_id\n item['salary_economic'] = economic_dict['月薪']\n item['buy_house_economic'] = economic_dict['购房']\n item['buy_car_economic'] = economic_dict['购车']\n item['economic_concept'] = economic_dict['经济观念']\n item['investment_financing'] = economic_dict['投资理财']\n item['foreign_debt'] = economic_dict['外债贷款']\n print('经济实力', economic_dict)\n \"\"\"\n 工作学习\n \"\"\"\n work = find_mate[4].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n work_study = {}\n for i in work:\n if i.text:\n temp = parse(i.text)\n if isinstance(temp, dict):\n work_study.update(parse(i.text))\n else:\n work_study['职业职位'] = '未填写工作学习方式'\n work_study['公司行业'] = '未填写工作学习方式'\n work_study['公司类型'] = '未填写工作学习方式'\n work_study['福利待遇'] = '未填写工作学习方式'\n work_study['工作状态'] = '未填写工作学习方式'\n work_study['调动工作可能性'] = '未填写工作学习方式'\n work_study['事业与家庭'] = '未填写工作学习方式'\n work_study['海外工作可能性'] = '未填写工作学习方式'\n work_study['毕业院校'] = '未填写工作学习方式'\n work_study['专业类型'] = '未填写工作学习方式'\n work_study['语言能力'] = '未填写工作学习方式'\n item['person_id_study'] = person_id\n item['position'] = work_study['职业职位']\n item['company'] = work_study['公司行业']\n item['company_type'] = work_study['公司类型']\n item['welfare'] = work_study['福利待遇']\n item['working'] = work_study['工作状态']\n item['transfer_work'] = work_study['调动工作可能性']\n item['work_family'] = work_study['事业与家庭']\n item['overseas_job'] = work_study['海外工作可能性']\n item['university'] = work_study['毕业院校']\n item['major'] = work_study['专业类型']\n item['language'] = work_study['语言能力']\n print('工作学习', work_study)\n \"\"\"\n 婚姻观念\n \"\"\"\n marriage = find_mate[5].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n marriage_family = {}\n for i in marriage:\n if i.text:\n temp = parse(i.text)\n if isinstance(temp, dict):\n marriage_family.update(parse(i.text))\n else:\n marriage_family['籍贯'] = '未填写婚姻观念'\n marriage_family['户口'] = '未填写婚姻观念'\n marriage_family['国籍'] = '未填写婚姻观念'\n marriage_family['个性待征'] = '未填写婚姻观念'\n marriage_family['幽默感'] = '未填写婚姻观念'\n marriage_family['脾气'] = '未填写婚姻观念'\n marriage_family['对待感情'] = '未填写婚姻观念'\n marriage_family['是否要小孩'] = '未填写婚姻观念'\n marriage_family['何时结婚'] = '未填写婚姻观念'\n marriage_family['是否能接受异地恋'] = '未填写婚姻观念'\n marriage_family['理想婚姻'] = '未填写婚姻观念'\n marriage_family['愿与对方父母同住'] = '未填写婚姻观念'\n marriage_family['家中排行'] = '未填写婚姻观念'\n marriage_family['父母情况'] = '未填写婚姻观念'\n marriage_family['兄弟姐妹'] = '未填写婚姻观念'\n marriage_family['父母经济情况'] = '未填写婚姻观念'\n marriage_family['父母医保情况'] = '未填写婚姻观念'\n marriage_family['父母的工作'] = '未填写婚姻观念'\n item['person_id_marriage'] = person_id\n item['address_marriage'] = marriage_family['籍贯']\n item['registered_residence'] = marriage_family['户口']\n item['nationality'] = marriage_family['国籍']\n item['personality'] = marriage_family['个性待征']\n item['humor'] = marriage_family['幽默感']\n item['temper'] = marriage_family['脾气']\n item['feelings'] = marriage_family['对待感情']\n item['want_child'] = marriage_family['是否要小孩']\n item['when_mary'] = marriage_family['何时结婚']\n item['strange_love'] = marriage_family['是否能接受异地恋']\n item['ideal_marriage'] = marriage_family['理想婚姻']\n item['live_parents'] = marriage_family['愿与对方父母同住']\n item['rankings_home'] = marriage_family['家中排行']\n item['parents_situation'] = marriage_family['父母情况']\n item['brothers'] = marriage_family['兄弟姐妹']\n item['parents_economic'] = marriage_family['父母经济情况']\n item['parents_medical'] = marriage_family['父母医保情况']\n item['parents_working'] = marriage_family['父母的工作']\n print('婚姻观念', marriage_family)\n \"\"\"\n 相片列表\n \"\"\"\n print('相片url', response.url)\n list_images = self.driver.find_elements_by_xpath(\n '/html//div[@id=\"bigImg\"]//a')\n print('相片列表', type(list_images), list_images)\n images = []\n for i in list_images:\n image = i.find_element_by_xpath('img').get_attribute('src')\n images.append(image)\n print('相片地址', image)\n item['img_urls'] = images\n print('执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后')\n yield item\n\n\ncmdline.execute('scrapy crawl jiayuan_main'.split())\n",
"<docstring token>\n<import token>\n\n\nclass jiayuan_data(RedisSpider):\n pool = redis.ConnectionPool(host='127.0.0.1', port=6379, db=0,\n decode_responses=True)\n r = redis.StrictRedis(connection_pool=pool)\n name = 'jiayuan_main'\n redis_key = 'jiayuan_main:start_urls'\n url_base = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d'\n )\n redis_key = 'sinaspider:start_urls'\n login_url = 'http://login.jiayuan.com/'\n start_urls = []\n pre_page_num = 25\n option = webdriver.ChromeOptions()\n option.add_argument('--headless')\n option.add_argument('--window-size=1920,1080')\n prefs = {'profile.managed_default_content_settings.images': 2}\n option.add_experimental_option('prefs', prefs)\n try:\n driver = webdriver.Chrome(chrome_options=option)\n except Exception as e:\n driver.close()\n print('spider出现了异常,关闭', str(e))\n driver.get(login_url)\n time.sleep(3)\n driver.find_element_by_id('login_btn').click()\n driver.find_element_by_id('login_email').clear()\n driver.find_element_by_id('login_email').send_keys(USER_NAME)\n driver.find_element_by_id('login_password').clear()\n driver.find_element_by_id('login_password').send_keys(PASSWD)\n driver.find_element_by_id('login_btn').click()\n cookies = driver.get_cookies()\n for p in range(1, 173649):\n search_url = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d'\n % p)\n start_urls.append(search_url)\n \"\"\"\n 下载器中间件在下载器和Scrapy引擎之间,每一个request和response都会通过中间件进行处理。\n 在中间件中,对request进行处理的函数是process_request(request, spider)\n \"\"\"\n\n def start_requests(self):\n for url in self.start_urls:\n yield Request(url=url, callback=self.get_main_info)\n\n def get_main_info(self, response):\n time.sleep(1)\n print('当前的url', response.url)\n print('重新加载url')\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n user_list = self.driver.find_elements_by_xpath(\n '/html//ul[@id=\"normal_user_container\"]/li//div[@class=\"user_name\"]/a[@class=\"os_stat\"]'\n )\n if user_list == []:\n print('user_list为空了,解析有问题')\n url_details = []\n for user in user_list:\n main_url_main = user.get_attribute('href')\n print('人员主页url', main_url_main)\n url_details.append(main_url_main)\n print('人员详情url2', len(url_details))\n if url_details != []:\n for url in url_details:\n yield Request(url=url, cookies=self.cookies, callback=self.\n get_details)\n\n def get_details(self, response):\n \"\"\"\n <class 'str'>\n 年 龄:\n 26-29岁之间\n 身 高:\n 169-185厘米\n 民 族:\n 汉族\n 学 历:\n 不限\n 相 册:\n 有照片\n 婚姻状况:\n 未婚\n 居 住 地:\n 湖北十堰\n 诚 信:\n 不限\n 将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库\n \"\"\"\n pass\n\n def parse(str1):\n temp_list = str1.split('\\n')\n result = {}\n result_str = ''\n if len(temp_list) > 1:\n for i in range(len(temp_list)):\n if i % 2 == 0:\n result[temp_list[i].replace(' ', '').replace(':', '')\n ] = temp_list[i + 1]\n return result\n else:\n result_str = str1\n return result_str\n item = JiayuanItem()\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n print('打开浏览器')\n print('当前的url', response.url)\n age_info = self.driver.find_element_by_xpath(\n '/html//h6[@class=\"member_name\"]').text\n person_id = response.url[response.url.rfind('/') + 1:response.url.\n index('?')]\n print('年龄地址信息', type(age_info), age_info)\n address = self.driver.find_elements_by_xpath(\n '/html//h6[@class=\"member_name\"]/a')\n str_address = ''\n str_sheng = address[0].get_attribute('text')\n str_shi = address[1].get_attribute('text')\n print('人员地址', str_sheng + 'sssss' + str_shi)\n \"\"\"\n 人个信息\n \"\"\"\n person_info = self.driver.find_elements_by_xpath(\n '/html//ul[@class=\"member_info_list fn-clear\"]')\n person_dict = {}\n for i in person_info:\n person_dict = parse(i.text)\n print('个人信息', person_dict)\n \"\"\"\n 处理item,对应mysql的person_info表\n \"\"\"\n item['person_id'] = person_id\n item['province'] = str_sheng\n item['municipal'] = str_shi\n nick_name_info = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"member_info_r yh\"]/h4')\n nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index('I')]\n print('昵称', nick_name)\n item['nike_name'] = nick_name\n item['education'] = person_dict['学历']\n item['height'] = person_dict['身高']\n item['buy_car'] = person_dict['购车']\n item['salary'] = person_dict['月薪']\n item['housing'] = person_dict['住房']\n item['weight'] = person_dict['体重']\n item['constellation'] = person_dict['星座']\n item['nation'] = person_dict['民族']\n item['zodiac'] = person_dict['属相']\n item['blood_type'] = person_dict['血型']\n item['age'] = age_info[0:age_info.index(',')]\n print('年龄', age_info[0:age_info.index(',')])\n item['address'] = str_sheng + str_shi\n item['age_info'] = age_info\n item['image_dir'] = nick_name + '_' + item['age'] + '_' + person_id\n item['url'] = response.url\n item['introduce_oneself'] = self.driver.find_element_by_xpath(\n '/html//div[@class=\"main_1000 mt15 fn-clear\"]//div[@class=\"js_text\"]'\n ).text\n print('个性短语', item['introduce_oneself'])\n item['interest_label'] = ''\n item['personality_label'] = ''\n try:\n self.driver.find_element_by_xpath(\n '/html//div[@class=\"d_more DNA_xq_more DNA_xq_more_a\"]/a'\n ).click()\n time.sleep(1)\n gexing_info = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"test4\"]//div[@class=\"list_a fn-clear\"]')\n print('aaa', type(gexing_info), gexing_info)\n gexing_tag = ''\n for i in gexing_info:\n gexing_tag += i.text\n item['personality_label'] = ''.join(gexing_tag)\n except Exception as e:\n item['personality_label'] = '还没有填写个性元素'\n print('个性', item['personality_label'])\n try:\n self.driver.find_element_by_xpath(\n '/html//div[@class=\"d_more DNA_xq_more\"]/a').click()\n self.driver.implicitly_wait(1)\n aihao_info = self.driver.find_elements_by_xpath(\n '/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul')\n print('bbb', type(aihao_info), aihao_info)\n aihao_tag = ''\n for i in aihao_info:\n aihao_tag += i.text\n item['interest_label'] = ''.join(aihao_tag)\n except Exception as e:\n item['interest_label'] = '还没有填写兴趣爱好'\n print('她的兴趣爱好', item['interest_label'])\n find_mate = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"bg_white mt15\"]')\n \"\"\"\n 择偶要求\n \"\"\"\n mate = find_mate[1].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n mate_dict = {}\n for i in mate:\n mate_dict = parse(i.text)\n item['person_id_mate'] = person_id\n item['age_mate'] = mate_dict['年龄']\n item['height_mate'] = mate_dict['身高']\n item['nation_mate'] = mate_dict['民族']\n item['education_mate'] = mate_dict['学历']\n item['image_mate'] = mate_dict['相册']\n item['marital_status'] = mate_dict['婚姻状况']\n item['address_mate'] = mate_dict['居住地']\n item['sincerity_mate'] = mate_dict['诚信']\n print('择偶要求', mate_dict)\n \"\"\"\n 生活方式\n \"\"\"\n life = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n life_style = {}\n for i in life:\n temp = parse(i.text)\n if isinstance(temp, dict):\n life_style.update(parse(i.text))\n else:\n life_style['吸烟'] = '未填写生活方式'\n life_style['饮酒'] = '未填写生活方式'\n life_style['锻炼习惯'] = '未填写生活方式'\n life_style['饮食习惯'] = '未填写生活方式'\n life_style['逛街购物'] = '未填写生活方式'\n life_style['宗教信仰'] = '未填写生活方式'\n life_style['作息时间'] = '未填写生活方式'\n life_style['交际圈子'] = '未填写生活方式'\n life_style['最大消费'] = '未填写生活方式'\n try:\n housework = []\n pet = []\n jiawu1 = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]//div[@class=\"pt25 fn-clear\"]//dd[@class=\"cur\"]'\n )\n for i in jiawu1:\n housework.append(i.text)\n print('家务1 ', i.text)\n jiawu2 = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]//div[@class=\"fl pr\"]/em')\n for i in jiawu2:\n pet.append(i.text)\n print('家务2 ', i.text)\n except Exception as e:\n housework.append('家务水平程度未填写')\n housework.append('宠物喜欢程度未填写')\n pet.append('家务分配未填写')\n pet.append('关于宠物未填写')\n item['person_id_life'] = person_id\n item['smoke'] = life_style['吸烟']\n item['drink_wine'] = life_style['饮酒']\n item['exercise_habits'] = life_style['锻炼习惯']\n item['eating_habits'] = life_style['饮食习惯']\n item['shopping'] = life_style['逛街购物']\n item['religious_belief'] = life_style['宗教信仰']\n item['time_table'] = life_style['作息时间']\n item['circle_of_communication'] = life_style['交际圈子']\n item['maximum_consumption'] = life_style['最大消费']\n item['housework'] = housework[0]\n item['household_assignment'] = pet[0]\n item['pet'] = housework[1]\n item['about_pets'] = pet[1]\n print('生活方式', life_style)\n print('家务', housework[0], pet[0])\n print('宠物', housework[1], pet[1])\n \"\"\"\n 经济实力\n \"\"\"\n economic_dict = {}\n economic = find_mate[3].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n for i in economic:\n economic_dict = parse(i.text)\n item['person_id_economic'] = person_id\n item['salary_economic'] = economic_dict['月薪']\n item['buy_house_economic'] = economic_dict['购房']\n item['buy_car_economic'] = economic_dict['购车']\n item['economic_concept'] = economic_dict['经济观念']\n item['investment_financing'] = economic_dict['投资理财']\n item['foreign_debt'] = economic_dict['外债贷款']\n print('经济实力', economic_dict)\n \"\"\"\n 工作学习\n \"\"\"\n work = find_mate[4].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n work_study = {}\n for i in work:\n if i.text:\n temp = parse(i.text)\n if isinstance(temp, dict):\n work_study.update(parse(i.text))\n else:\n work_study['职业职位'] = '未填写工作学习方式'\n work_study['公司行业'] = '未填写工作学习方式'\n work_study['公司类型'] = '未填写工作学习方式'\n work_study['福利待遇'] = '未填写工作学习方式'\n work_study['工作状态'] = '未填写工作学习方式'\n work_study['调动工作可能性'] = '未填写工作学习方式'\n work_study['事业与家庭'] = '未填写工作学习方式'\n work_study['海外工作可能性'] = '未填写工作学习方式'\n work_study['毕业院校'] = '未填写工作学习方式'\n work_study['专业类型'] = '未填写工作学习方式'\n work_study['语言能力'] = '未填写工作学习方式'\n item['person_id_study'] = person_id\n item['position'] = work_study['职业职位']\n item['company'] = work_study['公司行业']\n item['company_type'] = work_study['公司类型']\n item['welfare'] = work_study['福利待遇']\n item['working'] = work_study['工作状态']\n item['transfer_work'] = work_study['调动工作可能性']\n item['work_family'] = work_study['事业与家庭']\n item['overseas_job'] = work_study['海外工作可能性']\n item['university'] = work_study['毕业院校']\n item['major'] = work_study['专业类型']\n item['language'] = work_study['语言能力']\n print('工作学习', work_study)\n \"\"\"\n 婚姻观念\n \"\"\"\n marriage = find_mate[5].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n marriage_family = {}\n for i in marriage:\n if i.text:\n temp = parse(i.text)\n if isinstance(temp, dict):\n marriage_family.update(parse(i.text))\n else:\n marriage_family['籍贯'] = '未填写婚姻观念'\n marriage_family['户口'] = '未填写婚姻观念'\n marriage_family['国籍'] = '未填写婚姻观念'\n marriage_family['个性待征'] = '未填写婚姻观念'\n marriage_family['幽默感'] = '未填写婚姻观念'\n marriage_family['脾气'] = '未填写婚姻观念'\n marriage_family['对待感情'] = '未填写婚姻观念'\n marriage_family['是否要小孩'] = '未填写婚姻观念'\n marriage_family['何时结婚'] = '未填写婚姻观念'\n marriage_family['是否能接受异地恋'] = '未填写婚姻观念'\n marriage_family['理想婚姻'] = '未填写婚姻观念'\n marriage_family['愿与对方父母同住'] = '未填写婚姻观念'\n marriage_family['家中排行'] = '未填写婚姻观念'\n marriage_family['父母情况'] = '未填写婚姻观念'\n marriage_family['兄弟姐妹'] = '未填写婚姻观念'\n marriage_family['父母经济情况'] = '未填写婚姻观念'\n marriage_family['父母医保情况'] = '未填写婚姻观念'\n marriage_family['父母的工作'] = '未填写婚姻观念'\n item['person_id_marriage'] = person_id\n item['address_marriage'] = marriage_family['籍贯']\n item['registered_residence'] = marriage_family['户口']\n item['nationality'] = marriage_family['国籍']\n item['personality'] = marriage_family['个性待征']\n item['humor'] = marriage_family['幽默感']\n item['temper'] = marriage_family['脾气']\n item['feelings'] = marriage_family['对待感情']\n item['want_child'] = marriage_family['是否要小孩']\n item['when_mary'] = marriage_family['何时结婚']\n item['strange_love'] = marriage_family['是否能接受异地恋']\n item['ideal_marriage'] = marriage_family['理想婚姻']\n item['live_parents'] = marriage_family['愿与对方父母同住']\n item['rankings_home'] = marriage_family['家中排行']\n item['parents_situation'] = marriage_family['父母情况']\n item['brothers'] = marriage_family['兄弟姐妹']\n item['parents_economic'] = marriage_family['父母经济情况']\n item['parents_medical'] = marriage_family['父母医保情况']\n item['parents_working'] = marriage_family['父母的工作']\n print('婚姻观念', marriage_family)\n \"\"\"\n 相片列表\n \"\"\"\n print('相片url', response.url)\n list_images = self.driver.find_elements_by_xpath(\n '/html//div[@id=\"bigImg\"]//a')\n print('相片列表', type(list_images), list_images)\n images = []\n for i in list_images:\n image = i.find_element_by_xpath('img').get_attribute('src')\n images.append(image)\n print('相片地址', image)\n item['img_urls'] = images\n print('执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后')\n yield item\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass jiayuan_data(RedisSpider):\n pool = redis.ConnectionPool(host='127.0.0.1', port=6379, db=0,\n decode_responses=True)\n r = redis.StrictRedis(connection_pool=pool)\n name = 'jiayuan_main'\n redis_key = 'jiayuan_main:start_urls'\n url_base = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=163649&ft=off&f=select&mt=d'\n )\n redis_key = 'sinaspider:start_urls'\n login_url = 'http://login.jiayuan.com/'\n start_urls = []\n pre_page_num = 25\n option = webdriver.ChromeOptions()\n option.add_argument('--headless')\n option.add_argument('--window-size=1920,1080')\n prefs = {'profile.managed_default_content_settings.images': 2}\n option.add_experimental_option('prefs', prefs)\n try:\n driver = webdriver.Chrome(chrome_options=option)\n except Exception as e:\n driver.close()\n print('spider出现了异常,关闭', str(e))\n driver.get(login_url)\n time.sleep(3)\n driver.find_element_by_id('login_btn').click()\n driver.find_element_by_id('login_email').clear()\n driver.find_element_by_id('login_email').send_keys(USER_NAME)\n driver.find_element_by_id('login_password').clear()\n driver.find_element_by_id('login_password').send_keys(PASSWD)\n driver.find_element_by_id('login_btn').click()\n cookies = driver.get_cookies()\n for p in range(1, 173649):\n search_url = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d'\n % p)\n start_urls.append(search_url)\n <docstring token>\n\n def start_requests(self):\n for url in self.start_urls:\n yield Request(url=url, callback=self.get_main_info)\n\n def get_main_info(self, response):\n time.sleep(1)\n print('当前的url', response.url)\n print('重新加载url')\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n user_list = self.driver.find_elements_by_xpath(\n '/html//ul[@id=\"normal_user_container\"]/li//div[@class=\"user_name\"]/a[@class=\"os_stat\"]'\n )\n if user_list == []:\n print('user_list为空了,解析有问题')\n url_details = []\n for user in user_list:\n main_url_main = user.get_attribute('href')\n print('人员主页url', main_url_main)\n url_details.append(main_url_main)\n print('人员详情url2', len(url_details))\n if url_details != []:\n for url in url_details:\n yield Request(url=url, cookies=self.cookies, callback=self.\n get_details)\n\n def get_details(self, response):\n \"\"\"\n <class 'str'>\n 年 龄:\n 26-29岁之间\n 身 高:\n 169-185厘米\n 民 族:\n 汉族\n 学 历:\n 不限\n 相 册:\n 有照片\n 婚姻状况:\n 未婚\n 居 住 地:\n 湖北十堰\n 诚 信:\n 不限\n 将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库\n \"\"\"\n pass\n\n def parse(str1):\n temp_list = str1.split('\\n')\n result = {}\n result_str = ''\n if len(temp_list) > 1:\n for i in range(len(temp_list)):\n if i % 2 == 0:\n result[temp_list[i].replace(' ', '').replace(':', '')\n ] = temp_list[i + 1]\n return result\n else:\n result_str = str1\n return result_str\n item = JiayuanItem()\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n print('打开浏览器')\n print('当前的url', response.url)\n age_info = self.driver.find_element_by_xpath(\n '/html//h6[@class=\"member_name\"]').text\n person_id = response.url[response.url.rfind('/') + 1:response.url.\n index('?')]\n print('年龄地址信息', type(age_info), age_info)\n address = self.driver.find_elements_by_xpath(\n '/html//h6[@class=\"member_name\"]/a')\n str_address = ''\n str_sheng = address[0].get_attribute('text')\n str_shi = address[1].get_attribute('text')\n print('人员地址', str_sheng + 'sssss' + str_shi)\n \"\"\"\n 人个信息\n \"\"\"\n person_info = self.driver.find_elements_by_xpath(\n '/html//ul[@class=\"member_info_list fn-clear\"]')\n person_dict = {}\n for i in person_info:\n person_dict = parse(i.text)\n print('个人信息', person_dict)\n \"\"\"\n 处理item,对应mysql的person_info表\n \"\"\"\n item['person_id'] = person_id\n item['province'] = str_sheng\n item['municipal'] = str_shi\n nick_name_info = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"member_info_r yh\"]/h4')\n nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index('I')]\n print('昵称', nick_name)\n item['nike_name'] = nick_name\n item['education'] = person_dict['学历']\n item['height'] = person_dict['身高']\n item['buy_car'] = person_dict['购车']\n item['salary'] = person_dict['月薪']\n item['housing'] = person_dict['住房']\n item['weight'] = person_dict['体重']\n item['constellation'] = person_dict['星座']\n item['nation'] = person_dict['民族']\n item['zodiac'] = person_dict['属相']\n item['blood_type'] = person_dict['血型']\n item['age'] = age_info[0:age_info.index(',')]\n print('年龄', age_info[0:age_info.index(',')])\n item['address'] = str_sheng + str_shi\n item['age_info'] = age_info\n item['image_dir'] = nick_name + '_' + item['age'] + '_' + person_id\n item['url'] = response.url\n item['introduce_oneself'] = self.driver.find_element_by_xpath(\n '/html//div[@class=\"main_1000 mt15 fn-clear\"]//div[@class=\"js_text\"]'\n ).text\n print('个性短语', item['introduce_oneself'])\n item['interest_label'] = ''\n item['personality_label'] = ''\n try:\n self.driver.find_element_by_xpath(\n '/html//div[@class=\"d_more DNA_xq_more DNA_xq_more_a\"]/a'\n ).click()\n time.sleep(1)\n gexing_info = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"test4\"]//div[@class=\"list_a fn-clear\"]')\n print('aaa', type(gexing_info), gexing_info)\n gexing_tag = ''\n for i in gexing_info:\n gexing_tag += i.text\n item['personality_label'] = ''.join(gexing_tag)\n except Exception as e:\n item['personality_label'] = '还没有填写个性元素'\n print('个性', item['personality_label'])\n try:\n self.driver.find_element_by_xpath(\n '/html//div[@class=\"d_more DNA_xq_more\"]/a').click()\n self.driver.implicitly_wait(1)\n aihao_info = self.driver.find_elements_by_xpath(\n '/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul')\n print('bbb', type(aihao_info), aihao_info)\n aihao_tag = ''\n for i in aihao_info:\n aihao_tag += i.text\n item['interest_label'] = ''.join(aihao_tag)\n except Exception as e:\n item['interest_label'] = '还没有填写兴趣爱好'\n print('她的兴趣爱好', item['interest_label'])\n find_mate = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"bg_white mt15\"]')\n \"\"\"\n 择偶要求\n \"\"\"\n mate = find_mate[1].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n mate_dict = {}\n for i in mate:\n mate_dict = parse(i.text)\n item['person_id_mate'] = person_id\n item['age_mate'] = mate_dict['年龄']\n item['height_mate'] = mate_dict['身高']\n item['nation_mate'] = mate_dict['民族']\n item['education_mate'] = mate_dict['学历']\n item['image_mate'] = mate_dict['相册']\n item['marital_status'] = mate_dict['婚姻状况']\n item['address_mate'] = mate_dict['居住地']\n item['sincerity_mate'] = mate_dict['诚信']\n print('择偶要求', mate_dict)\n \"\"\"\n 生活方式\n \"\"\"\n life = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n life_style = {}\n for i in life:\n temp = parse(i.text)\n if isinstance(temp, dict):\n life_style.update(parse(i.text))\n else:\n life_style['吸烟'] = '未填写生活方式'\n life_style['饮酒'] = '未填写生活方式'\n life_style['锻炼习惯'] = '未填写生活方式'\n life_style['饮食习惯'] = '未填写生活方式'\n life_style['逛街购物'] = '未填写生活方式'\n life_style['宗教信仰'] = '未填写生活方式'\n life_style['作息时间'] = '未填写生活方式'\n life_style['交际圈子'] = '未填写生活方式'\n life_style['最大消费'] = '未填写生活方式'\n try:\n housework = []\n pet = []\n jiawu1 = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]//div[@class=\"pt25 fn-clear\"]//dd[@class=\"cur\"]'\n )\n for i in jiawu1:\n housework.append(i.text)\n print('家务1 ', i.text)\n jiawu2 = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]//div[@class=\"fl pr\"]/em')\n for i in jiawu2:\n pet.append(i.text)\n print('家务2 ', i.text)\n except Exception as e:\n housework.append('家务水平程度未填写')\n housework.append('宠物喜欢程度未填写')\n pet.append('家务分配未填写')\n pet.append('关于宠物未填写')\n item['person_id_life'] = person_id\n item['smoke'] = life_style['吸烟']\n item['drink_wine'] = life_style['饮酒']\n item['exercise_habits'] = life_style['锻炼习惯']\n item['eating_habits'] = life_style['饮食习惯']\n item['shopping'] = life_style['逛街购物']\n item['religious_belief'] = life_style['宗教信仰']\n item['time_table'] = life_style['作息时间']\n item['circle_of_communication'] = life_style['交际圈子']\n item['maximum_consumption'] = life_style['最大消费']\n item['housework'] = housework[0]\n item['household_assignment'] = pet[0]\n item['pet'] = housework[1]\n item['about_pets'] = pet[1]\n print('生活方式', life_style)\n print('家务', housework[0], pet[0])\n print('宠物', housework[1], pet[1])\n \"\"\"\n 经济实力\n \"\"\"\n economic_dict = {}\n economic = find_mate[3].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n for i in economic:\n economic_dict = parse(i.text)\n item['person_id_economic'] = person_id\n item['salary_economic'] = economic_dict['月薪']\n item['buy_house_economic'] = economic_dict['购房']\n item['buy_car_economic'] = economic_dict['购车']\n item['economic_concept'] = economic_dict['经济观念']\n item['investment_financing'] = economic_dict['投资理财']\n item['foreign_debt'] = economic_dict['外债贷款']\n print('经济实力', economic_dict)\n \"\"\"\n 工作学习\n \"\"\"\n work = find_mate[4].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n work_study = {}\n for i in work:\n if i.text:\n temp = parse(i.text)\n if isinstance(temp, dict):\n work_study.update(parse(i.text))\n else:\n work_study['职业职位'] = '未填写工作学习方式'\n work_study['公司行业'] = '未填写工作学习方式'\n work_study['公司类型'] = '未填写工作学习方式'\n work_study['福利待遇'] = '未填写工作学习方式'\n work_study['工作状态'] = '未填写工作学习方式'\n work_study['调动工作可能性'] = '未填写工作学习方式'\n work_study['事业与家庭'] = '未填写工作学习方式'\n work_study['海外工作可能性'] = '未填写工作学习方式'\n work_study['毕业院校'] = '未填写工作学习方式'\n work_study['专业类型'] = '未填写工作学习方式'\n work_study['语言能力'] = '未填写工作学习方式'\n item['person_id_study'] = person_id\n item['position'] = work_study['职业职位']\n item['company'] = work_study['公司行业']\n item['company_type'] = work_study['公司类型']\n item['welfare'] = work_study['福利待遇']\n item['working'] = work_study['工作状态']\n item['transfer_work'] = work_study['调动工作可能性']\n item['work_family'] = work_study['事业与家庭']\n item['overseas_job'] = work_study['海外工作可能性']\n item['university'] = work_study['毕业院校']\n item['major'] = work_study['专业类型']\n item['language'] = work_study['语言能力']\n print('工作学习', work_study)\n \"\"\"\n 婚姻观念\n \"\"\"\n marriage = find_mate[5].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n marriage_family = {}\n for i in marriage:\n if i.text:\n temp = parse(i.text)\n if isinstance(temp, dict):\n marriage_family.update(parse(i.text))\n else:\n marriage_family['籍贯'] = '未填写婚姻观念'\n marriage_family['户口'] = '未填写婚姻观念'\n marriage_family['国籍'] = '未填写婚姻观念'\n marriage_family['个性待征'] = '未填写婚姻观念'\n marriage_family['幽默感'] = '未填写婚姻观念'\n marriage_family['脾气'] = '未填写婚姻观念'\n marriage_family['对待感情'] = '未填写婚姻观念'\n marriage_family['是否要小孩'] = '未填写婚姻观念'\n marriage_family['何时结婚'] = '未填写婚姻观念'\n marriage_family['是否能接受异地恋'] = '未填写婚姻观念'\n marriage_family['理想婚姻'] = '未填写婚姻观念'\n marriage_family['愿与对方父母同住'] = '未填写婚姻观念'\n marriage_family['家中排行'] = '未填写婚姻观念'\n marriage_family['父母情况'] = '未填写婚姻观念'\n marriage_family['兄弟姐妹'] = '未填写婚姻观念'\n marriage_family['父母经济情况'] = '未填写婚姻观念'\n marriage_family['父母医保情况'] = '未填写婚姻观念'\n marriage_family['父母的工作'] = '未填写婚姻观念'\n item['person_id_marriage'] = person_id\n item['address_marriage'] = marriage_family['籍贯']\n item['registered_residence'] = marriage_family['户口']\n item['nationality'] = marriage_family['国籍']\n item['personality'] = marriage_family['个性待征']\n item['humor'] = marriage_family['幽默感']\n item['temper'] = marriage_family['脾气']\n item['feelings'] = marriage_family['对待感情']\n item['want_child'] = marriage_family['是否要小孩']\n item['when_mary'] = marriage_family['何时结婚']\n item['strange_love'] = marriage_family['是否能接受异地恋']\n item['ideal_marriage'] = marriage_family['理想婚姻']\n item['live_parents'] = marriage_family['愿与对方父母同住']\n item['rankings_home'] = marriage_family['家中排行']\n item['parents_situation'] = marriage_family['父母情况']\n item['brothers'] = marriage_family['兄弟姐妹']\n item['parents_economic'] = marriage_family['父母经济情况']\n item['parents_medical'] = marriage_family['父母医保情况']\n item['parents_working'] = marriage_family['父母的工作']\n print('婚姻观念', marriage_family)\n \"\"\"\n 相片列表\n \"\"\"\n print('相片url', response.url)\n list_images = self.driver.find_elements_by_xpath(\n '/html//div[@id=\"bigImg\"]//a')\n print('相片列表', type(list_images), list_images)\n images = []\n for i in list_images:\n image = i.find_element_by_xpath('img').get_attribute('src')\n images.append(image)\n print('相片地址', image)\n item['img_urls'] = images\n print('执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后')\n yield item\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass jiayuan_data(RedisSpider):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n option.add_argument('--headless')\n option.add_argument('--window-size=1920,1080')\n <assignment token>\n option.add_experimental_option('prefs', prefs)\n try:\n driver = webdriver.Chrome(chrome_options=option)\n except Exception as e:\n driver.close()\n print('spider出现了异常,关闭', str(e))\n driver.get(login_url)\n time.sleep(3)\n driver.find_element_by_id('login_btn').click()\n driver.find_element_by_id('login_email').clear()\n driver.find_element_by_id('login_email').send_keys(USER_NAME)\n driver.find_element_by_id('login_password').clear()\n driver.find_element_by_id('login_password').send_keys(PASSWD)\n driver.find_element_by_id('login_btn').click()\n <assignment token>\n for p in range(1, 173649):\n search_url = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d'\n % p)\n start_urls.append(search_url)\n <docstring token>\n\n def start_requests(self):\n for url in self.start_urls:\n yield Request(url=url, callback=self.get_main_info)\n\n def get_main_info(self, response):\n time.sleep(1)\n print('当前的url', response.url)\n print('重新加载url')\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n user_list = self.driver.find_elements_by_xpath(\n '/html//ul[@id=\"normal_user_container\"]/li//div[@class=\"user_name\"]/a[@class=\"os_stat\"]'\n )\n if user_list == []:\n print('user_list为空了,解析有问题')\n url_details = []\n for user in user_list:\n main_url_main = user.get_attribute('href')\n print('人员主页url', main_url_main)\n url_details.append(main_url_main)\n print('人员详情url2', len(url_details))\n if url_details != []:\n for url in url_details:\n yield Request(url=url, cookies=self.cookies, callback=self.\n get_details)\n\n def get_details(self, response):\n \"\"\"\n <class 'str'>\n 年 龄:\n 26-29岁之间\n 身 高:\n 169-185厘米\n 民 族:\n 汉族\n 学 历:\n 不限\n 相 册:\n 有照片\n 婚姻状况:\n 未婚\n 居 住 地:\n 湖北十堰\n 诚 信:\n 不限\n 将这种类型的文字全部转成{'学历': '不限', '婚姻状况': '未婚', '居住地': '湖北十堰', '相册': '有照片', '身高': '169-185厘米', '民族': '汉族', '诚信': '不限', '年龄': '26-29岁之间'}这种dict方便入库\n \"\"\"\n pass\n\n def parse(str1):\n temp_list = str1.split('\\n')\n result = {}\n result_str = ''\n if len(temp_list) > 1:\n for i in range(len(temp_list)):\n if i % 2 == 0:\n result[temp_list[i].replace(' ', '').replace(':', '')\n ] = temp_list[i + 1]\n return result\n else:\n result_str = str1\n return result_str\n item = JiayuanItem()\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n print('打开浏览器')\n print('当前的url', response.url)\n age_info = self.driver.find_element_by_xpath(\n '/html//h6[@class=\"member_name\"]').text\n person_id = response.url[response.url.rfind('/') + 1:response.url.\n index('?')]\n print('年龄地址信息', type(age_info), age_info)\n address = self.driver.find_elements_by_xpath(\n '/html//h6[@class=\"member_name\"]/a')\n str_address = ''\n str_sheng = address[0].get_attribute('text')\n str_shi = address[1].get_attribute('text')\n print('人员地址', str_sheng + 'sssss' + str_shi)\n \"\"\"\n 人个信息\n \"\"\"\n person_info = self.driver.find_elements_by_xpath(\n '/html//ul[@class=\"member_info_list fn-clear\"]')\n person_dict = {}\n for i in person_info:\n person_dict = parse(i.text)\n print('个人信息', person_dict)\n \"\"\"\n 处理item,对应mysql的person_info表\n \"\"\"\n item['person_id'] = person_id\n item['province'] = str_sheng\n item['municipal'] = str_shi\n nick_name_info = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"member_info_r yh\"]/h4')\n nick_name = nick_name_info[0].text[0:nick_name_info[0].text.index('I')]\n print('昵称', nick_name)\n item['nike_name'] = nick_name\n item['education'] = person_dict['学历']\n item['height'] = person_dict['身高']\n item['buy_car'] = person_dict['购车']\n item['salary'] = person_dict['月薪']\n item['housing'] = person_dict['住房']\n item['weight'] = person_dict['体重']\n item['constellation'] = person_dict['星座']\n item['nation'] = person_dict['民族']\n item['zodiac'] = person_dict['属相']\n item['blood_type'] = person_dict['血型']\n item['age'] = age_info[0:age_info.index(',')]\n print('年龄', age_info[0:age_info.index(',')])\n item['address'] = str_sheng + str_shi\n item['age_info'] = age_info\n item['image_dir'] = nick_name + '_' + item['age'] + '_' + person_id\n item['url'] = response.url\n item['introduce_oneself'] = self.driver.find_element_by_xpath(\n '/html//div[@class=\"main_1000 mt15 fn-clear\"]//div[@class=\"js_text\"]'\n ).text\n print('个性短语', item['introduce_oneself'])\n item['interest_label'] = ''\n item['personality_label'] = ''\n try:\n self.driver.find_element_by_xpath(\n '/html//div[@class=\"d_more DNA_xq_more DNA_xq_more_a\"]/a'\n ).click()\n time.sleep(1)\n gexing_info = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"test4\"]//div[@class=\"list_a fn-clear\"]')\n print('aaa', type(gexing_info), gexing_info)\n gexing_tag = ''\n for i in gexing_info:\n gexing_tag += i.text\n item['personality_label'] = ''.join(gexing_tag)\n except Exception as e:\n item['personality_label'] = '还没有填写个性元素'\n print('个性', item['personality_label'])\n try:\n self.driver.find_element_by_xpath(\n '/html//div[@class=\"d_more DNA_xq_more\"]/a').click()\n self.driver.implicitly_wait(1)\n aihao_info = self.driver.find_elements_by_xpath(\n '/html/body/div[6]/div[1]/div[3]/div/div[1]/div[1]/ul')\n print('bbb', type(aihao_info), aihao_info)\n aihao_tag = ''\n for i in aihao_info:\n aihao_tag += i.text\n item['interest_label'] = ''.join(aihao_tag)\n except Exception as e:\n item['interest_label'] = '还没有填写兴趣爱好'\n print('她的兴趣爱好', item['interest_label'])\n find_mate = self.driver.find_elements_by_xpath(\n '/html//div[@class=\"bg_white mt15\"]')\n \"\"\"\n 择偶要求\n \"\"\"\n mate = find_mate[1].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n mate_dict = {}\n for i in mate:\n mate_dict = parse(i.text)\n item['person_id_mate'] = person_id\n item['age_mate'] = mate_dict['年龄']\n item['height_mate'] = mate_dict['身高']\n item['nation_mate'] = mate_dict['民族']\n item['education_mate'] = mate_dict['学历']\n item['image_mate'] = mate_dict['相册']\n item['marital_status'] = mate_dict['婚姻状况']\n item['address_mate'] = mate_dict['居住地']\n item['sincerity_mate'] = mate_dict['诚信']\n print('择偶要求', mate_dict)\n \"\"\"\n 生活方式\n \"\"\"\n life = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n life_style = {}\n for i in life:\n temp = parse(i.text)\n if isinstance(temp, dict):\n life_style.update(parse(i.text))\n else:\n life_style['吸烟'] = '未填写生活方式'\n life_style['饮酒'] = '未填写生活方式'\n life_style['锻炼习惯'] = '未填写生活方式'\n life_style['饮食习惯'] = '未填写生活方式'\n life_style['逛街购物'] = '未填写生活方式'\n life_style['宗教信仰'] = '未填写生活方式'\n life_style['作息时间'] = '未填写生活方式'\n life_style['交际圈子'] = '未填写生活方式'\n life_style['最大消费'] = '未填写生活方式'\n try:\n housework = []\n pet = []\n jiawu1 = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]//div[@class=\"pt25 fn-clear\"]//dd[@class=\"cur\"]'\n )\n for i in jiawu1:\n housework.append(i.text)\n print('家务1 ', i.text)\n jiawu2 = find_mate[2].find_elements_by_xpath(\n 'div[@class=\"js_box\"]//div[@class=\"fl pr\"]/em')\n for i in jiawu2:\n pet.append(i.text)\n print('家务2 ', i.text)\n except Exception as e:\n housework.append('家务水平程度未填写')\n housework.append('宠物喜欢程度未填写')\n pet.append('家务分配未填写')\n pet.append('关于宠物未填写')\n item['person_id_life'] = person_id\n item['smoke'] = life_style['吸烟']\n item['drink_wine'] = life_style['饮酒']\n item['exercise_habits'] = life_style['锻炼习惯']\n item['eating_habits'] = life_style['饮食习惯']\n item['shopping'] = life_style['逛街购物']\n item['religious_belief'] = life_style['宗教信仰']\n item['time_table'] = life_style['作息时间']\n item['circle_of_communication'] = life_style['交际圈子']\n item['maximum_consumption'] = life_style['最大消费']\n item['housework'] = housework[0]\n item['household_assignment'] = pet[0]\n item['pet'] = housework[1]\n item['about_pets'] = pet[1]\n print('生活方式', life_style)\n print('家务', housework[0], pet[0])\n print('宠物', housework[1], pet[1])\n \"\"\"\n 经济实力\n \"\"\"\n economic_dict = {}\n economic = find_mate[3].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n for i in economic:\n economic_dict = parse(i.text)\n item['person_id_economic'] = person_id\n item['salary_economic'] = economic_dict['月薪']\n item['buy_house_economic'] = economic_dict['购房']\n item['buy_car_economic'] = economic_dict['购车']\n item['economic_concept'] = economic_dict['经济观念']\n item['investment_financing'] = economic_dict['投资理财']\n item['foreign_debt'] = economic_dict['外债贷款']\n print('经济实力', economic_dict)\n \"\"\"\n 工作学习\n \"\"\"\n work = find_mate[4].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n work_study = {}\n for i in work:\n if i.text:\n temp = parse(i.text)\n if isinstance(temp, dict):\n work_study.update(parse(i.text))\n else:\n work_study['职业职位'] = '未填写工作学习方式'\n work_study['公司行业'] = '未填写工作学习方式'\n work_study['公司类型'] = '未填写工作学习方式'\n work_study['福利待遇'] = '未填写工作学习方式'\n work_study['工作状态'] = '未填写工作学习方式'\n work_study['调动工作可能性'] = '未填写工作学习方式'\n work_study['事业与家庭'] = '未填写工作学习方式'\n work_study['海外工作可能性'] = '未填写工作学习方式'\n work_study['毕业院校'] = '未填写工作学习方式'\n work_study['专业类型'] = '未填写工作学习方式'\n work_study['语言能力'] = '未填写工作学习方式'\n item['person_id_study'] = person_id\n item['position'] = work_study['职业职位']\n item['company'] = work_study['公司行业']\n item['company_type'] = work_study['公司类型']\n item['welfare'] = work_study['福利待遇']\n item['working'] = work_study['工作状态']\n item['transfer_work'] = work_study['调动工作可能性']\n item['work_family'] = work_study['事业与家庭']\n item['overseas_job'] = work_study['海外工作可能性']\n item['university'] = work_study['毕业院校']\n item['major'] = work_study['专业类型']\n item['language'] = work_study['语言能力']\n print('工作学习', work_study)\n \"\"\"\n 婚姻观念\n \"\"\"\n marriage = find_mate[5].find_elements_by_xpath(\n 'div[@class=\"js_box\"]/ul[@class=\"js_list fn-clear\"]')\n marriage_family = {}\n for i in marriage:\n if i.text:\n temp = parse(i.text)\n if isinstance(temp, dict):\n marriage_family.update(parse(i.text))\n else:\n marriage_family['籍贯'] = '未填写婚姻观念'\n marriage_family['户口'] = '未填写婚姻观念'\n marriage_family['国籍'] = '未填写婚姻观念'\n marriage_family['个性待征'] = '未填写婚姻观念'\n marriage_family['幽默感'] = '未填写婚姻观念'\n marriage_family['脾气'] = '未填写婚姻观念'\n marriage_family['对待感情'] = '未填写婚姻观念'\n marriage_family['是否要小孩'] = '未填写婚姻观念'\n marriage_family['何时结婚'] = '未填写婚姻观念'\n marriage_family['是否能接受异地恋'] = '未填写婚姻观念'\n marriage_family['理想婚姻'] = '未填写婚姻观念'\n marriage_family['愿与对方父母同住'] = '未填写婚姻观念'\n marriage_family['家中排行'] = '未填写婚姻观念'\n marriage_family['父母情况'] = '未填写婚姻观念'\n marriage_family['兄弟姐妹'] = '未填写婚姻观念'\n marriage_family['父母经济情况'] = '未填写婚姻观念'\n marriage_family['父母医保情况'] = '未填写婚姻观念'\n marriage_family['父母的工作'] = '未填写婚姻观念'\n item['person_id_marriage'] = person_id\n item['address_marriage'] = marriage_family['籍贯']\n item['registered_residence'] = marriage_family['户口']\n item['nationality'] = marriage_family['国籍']\n item['personality'] = marriage_family['个性待征']\n item['humor'] = marriage_family['幽默感']\n item['temper'] = marriage_family['脾气']\n item['feelings'] = marriage_family['对待感情']\n item['want_child'] = marriage_family['是否要小孩']\n item['when_mary'] = marriage_family['何时结婚']\n item['strange_love'] = marriage_family['是否能接受异地恋']\n item['ideal_marriage'] = marriage_family['理想婚姻']\n item['live_parents'] = marriage_family['愿与对方父母同住']\n item['rankings_home'] = marriage_family['家中排行']\n item['parents_situation'] = marriage_family['父母情况']\n item['brothers'] = marriage_family['兄弟姐妹']\n item['parents_economic'] = marriage_family['父母经济情况']\n item['parents_medical'] = marriage_family['父母医保情况']\n item['parents_working'] = marriage_family['父母的工作']\n print('婚姻观念', marriage_family)\n \"\"\"\n 相片列表\n \"\"\"\n print('相片url', response.url)\n list_images = self.driver.find_elements_by_xpath(\n '/html//div[@id=\"bigImg\"]//a')\n print('相片列表', type(list_images), list_images)\n images = []\n for i in list_images:\n image = i.find_element_by_xpath('img').get_attribute('src')\n images.append(image)\n print('相片地址', image)\n item['img_urls'] = images\n print('执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后执行到了最后')\n yield item\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass jiayuan_data(RedisSpider):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n option.add_argument('--headless')\n option.add_argument('--window-size=1920,1080')\n <assignment token>\n option.add_experimental_option('prefs', prefs)\n try:\n driver = webdriver.Chrome(chrome_options=option)\n except Exception as e:\n driver.close()\n print('spider出现了异常,关闭', str(e))\n driver.get(login_url)\n time.sleep(3)\n driver.find_element_by_id('login_btn').click()\n driver.find_element_by_id('login_email').clear()\n driver.find_element_by_id('login_email').send_keys(USER_NAME)\n driver.find_element_by_id('login_password').clear()\n driver.find_element_by_id('login_password').send_keys(PASSWD)\n driver.find_element_by_id('login_btn').click()\n <assignment token>\n for p in range(1, 173649):\n search_url = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d'\n % p)\n start_urls.append(search_url)\n <docstring token>\n\n def start_requests(self):\n for url in self.start_urls:\n yield Request(url=url, callback=self.get_main_info)\n\n def get_main_info(self, response):\n time.sleep(1)\n print('当前的url', response.url)\n print('重新加载url')\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n user_list = self.driver.find_elements_by_xpath(\n '/html//ul[@id=\"normal_user_container\"]/li//div[@class=\"user_name\"]/a[@class=\"os_stat\"]'\n )\n if user_list == []:\n print('user_list为空了,解析有问题')\n url_details = []\n for user in user_list:\n main_url_main = user.get_attribute('href')\n print('人员主页url', main_url_main)\n url_details.append(main_url_main)\n print('人员详情url2', len(url_details))\n if url_details != []:\n for url in url_details:\n yield Request(url=url, cookies=self.cookies, callback=self.\n get_details)\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass jiayuan_data(RedisSpider):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n option.add_argument('--headless')\n option.add_argument('--window-size=1920,1080')\n <assignment token>\n option.add_experimental_option('prefs', prefs)\n try:\n driver = webdriver.Chrome(chrome_options=option)\n except Exception as e:\n driver.close()\n print('spider出现了异常,关闭', str(e))\n driver.get(login_url)\n time.sleep(3)\n driver.find_element_by_id('login_btn').click()\n driver.find_element_by_id('login_email').clear()\n driver.find_element_by_id('login_email').send_keys(USER_NAME)\n driver.find_element_by_id('login_password').clear()\n driver.find_element_by_id('login_password').send_keys(PASSWD)\n driver.find_element_by_id('login_btn').click()\n <assignment token>\n for p in range(1, 173649):\n search_url = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d'\n % p)\n start_urls.append(search_url)\n <docstring token>\n <function token>\n\n def get_main_info(self, response):\n time.sleep(1)\n print('当前的url', response.url)\n print('重新加载url')\n self.driver.get(response.url)\n self.driver.implicitly_wait(3)\n user_list = self.driver.find_elements_by_xpath(\n '/html//ul[@id=\"normal_user_container\"]/li//div[@class=\"user_name\"]/a[@class=\"os_stat\"]'\n )\n if user_list == []:\n print('user_list为空了,解析有问题')\n url_details = []\n for user in user_list:\n main_url_main = user.get_attribute('href')\n print('人员主页url', main_url_main)\n url_details.append(main_url_main)\n print('人员详情url2', len(url_details))\n if url_details != []:\n for url in url_details:\n yield Request(url=url, cookies=self.cookies, callback=self.\n get_details)\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass jiayuan_data(RedisSpider):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n option.add_argument('--headless')\n option.add_argument('--window-size=1920,1080')\n <assignment token>\n option.add_experimental_option('prefs', prefs)\n try:\n driver = webdriver.Chrome(chrome_options=option)\n except Exception as e:\n driver.close()\n print('spider出现了异常,关闭', str(e))\n driver.get(login_url)\n time.sleep(3)\n driver.find_element_by_id('login_btn').click()\n driver.find_element_by_id('login_email').clear()\n driver.find_element_by_id('login_email').send_keys(USER_NAME)\n driver.find_element_by_id('login_password').clear()\n driver.find_element_by_id('login_password').send_keys(PASSWD)\n driver.find_element_by_id('login_btn').click()\n <assignment token>\n for p in range(1, 173649):\n search_url = (\n 'http://search.jiayuan.com/v2/index.php?key=&sex=f&stc=&sn=default&sv=1&p=%s&pt=173649&ft=off&f=select&mt=d'\n % p)\n start_urls.append(search_url)\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n<code token>\n"
] | false |
99,388 |
904f0c408b9ef611c6a1650c507d06e05c7c4627
|
print ("alok kumar mishra")
|
[
"print (\"alok kumar mishra\")\n",
"print('alok kumar mishra')\n",
"<code token>\n"
] | false |
99,389 |
2a8020063c58ad5ae5af32c2062b7b3f5d72e05a
|
import os
from click.testing import CliRunner
from cli.script import cli
def get_graph_code():
return '''
from copy import deepcopy as dc
class StringCopier(object):
def __init__(self):
self.copied_strings = set()
def copy(self):
string1 = 'this'
string2 = dc(string1)
string1.add(string1)
return string2
class DoSomething(object):
def something(self):
copier = StringCopier()
copied_string = copier.copy()
'''
def test_produce_graph():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output'])
assert 'code_output' in os.listdir(os.path.curdir)
def test_file_extension():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output', '--output-format', 'png'])
assert 'code_output' in os.listdir(os.path.curdir)
|
[
"import os\n\nfrom click.testing import CliRunner\n\nfrom cli.script import cli\n\n\ndef get_graph_code():\n return '''\nfrom copy import deepcopy as dc\n\nclass StringCopier(object):\n def __init__(self):\n self.copied_strings = set()\n\n def copy(self):\n string1 = 'this'\n string2 = dc(string1)\n string1.add(string1)\n return string2\n\nclass DoSomething(object):\n def something(self):\n copier = StringCopier()\n copied_string = copier.copy()\n'''\n\n\ndef test_produce_graph():\n runner = CliRunner()\n with runner.isolated_filesystem():\n with open('code.py', 'w') as f:\n f.write(get_graph_code())\n\n runner.invoke(cli, ['code.py', '--output', 'code_output'])\n assert 'code_output' in os.listdir(os.path.curdir)\n\n\ndef test_file_extension():\n runner = CliRunner()\n with runner.isolated_filesystem():\n with open('code.py', 'w') as f:\n f.write(get_graph_code())\n\n runner.invoke(cli, ['code.py', '--output', 'code_output', '--output-format', 'png'])\n assert 'code_output' in os.listdir(os.path.curdir)\n",
"import os\nfrom click.testing import CliRunner\nfrom cli.script import cli\n\n\ndef get_graph_code():\n return \"\"\"\nfrom copy import deepcopy as dc\n\nclass StringCopier(object):\n def __init__(self):\n self.copied_strings = set()\n\n def copy(self):\n string1 = 'this'\n string2 = dc(string1)\n string1.add(string1)\n return string2\n\nclass DoSomething(object):\n def something(self):\n copier = StringCopier()\n copied_string = copier.copy()\n\"\"\"\n\n\ndef test_produce_graph():\n runner = CliRunner()\n with runner.isolated_filesystem():\n with open('code.py', 'w') as f:\n f.write(get_graph_code())\n runner.invoke(cli, ['code.py', '--output', 'code_output'])\n assert 'code_output' in os.listdir(os.path.curdir)\n\n\ndef test_file_extension():\n runner = CliRunner()\n with runner.isolated_filesystem():\n with open('code.py', 'w') as f:\n f.write(get_graph_code())\n runner.invoke(cli, ['code.py', '--output', 'code_output',\n '--output-format', 'png'])\n assert 'code_output' in os.listdir(os.path.curdir)\n",
"<import token>\n\n\ndef get_graph_code():\n return \"\"\"\nfrom copy import deepcopy as dc\n\nclass StringCopier(object):\n def __init__(self):\n self.copied_strings = set()\n\n def copy(self):\n string1 = 'this'\n string2 = dc(string1)\n string1.add(string1)\n return string2\n\nclass DoSomething(object):\n def something(self):\n copier = StringCopier()\n copied_string = copier.copy()\n\"\"\"\n\n\ndef test_produce_graph():\n runner = CliRunner()\n with runner.isolated_filesystem():\n with open('code.py', 'w') as f:\n f.write(get_graph_code())\n runner.invoke(cli, ['code.py', '--output', 'code_output'])\n assert 'code_output' in os.listdir(os.path.curdir)\n\n\ndef test_file_extension():\n runner = CliRunner()\n with runner.isolated_filesystem():\n with open('code.py', 'w') as f:\n f.write(get_graph_code())\n runner.invoke(cli, ['code.py', '--output', 'code_output',\n '--output-format', 'png'])\n assert 'code_output' in os.listdir(os.path.curdir)\n",
"<import token>\n\n\ndef get_graph_code():\n return \"\"\"\nfrom copy import deepcopy as dc\n\nclass StringCopier(object):\n def __init__(self):\n self.copied_strings = set()\n\n def copy(self):\n string1 = 'this'\n string2 = dc(string1)\n string1.add(string1)\n return string2\n\nclass DoSomething(object):\n def something(self):\n copier = StringCopier()\n copied_string = copier.copy()\n\"\"\"\n\n\n<function token>\n\n\ndef test_file_extension():\n runner = CliRunner()\n with runner.isolated_filesystem():\n with open('code.py', 'w') as f:\n f.write(get_graph_code())\n runner.invoke(cli, ['code.py', '--output', 'code_output',\n '--output-format', 'png'])\n assert 'code_output' in os.listdir(os.path.curdir)\n",
"<import token>\n<function token>\n<function token>\n\n\ndef test_file_extension():\n runner = CliRunner()\n with runner.isolated_filesystem():\n with open('code.py', 'w') as f:\n f.write(get_graph_code())\n runner.invoke(cli, ['code.py', '--output', 'code_output',\n '--output-format', 'png'])\n assert 'code_output' in os.listdir(os.path.curdir)\n",
"<import token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,390 |
454a76544b63ce9f08b4d36b7607d0a19d678440
|
import sys
from multiprocessing import Queue, Manager, Process
from logger import Logger
class BaseProcessor:
_logger = None
_file_handler = None
_process_list = []
_manager = None
_event_queue = None
_task_queue = None
_process_count = 0
_use_verbose_logging = False
def __init__(self, file_handler, process_count, use_verbose_logging):
self._file_handler = file_handler
self._process_count = process_count
self._use_verbose_logging = use_verbose_logging
self._logger = Logger()
self._manager = Manager()
self._event_queue = self._manager.Queue()
self._task_queue = self._manager.Queue()
def _get_process(self, process_id):
raise AttributeError("not supported")
def _run_processes(self, items_to_process, event_handler_func, event_handler_args):
total_to_process = len(items_to_process)
processes = self._initialize_processes()
self._fill_task_queue(items_to_process)
self._process_events(total_to_process, event_handler_func, event_handler_args)
self._stop_processes(processes)
def _initialize_processes(self):
processes = []
for i in range(self._process_count):
process = self._get_process(i)
processes.append(process)
process.start()
return processes
def _fill_task_queue(self, items):
for item in items:
self._task_queue.put(item)
def _process_events(self, total_to_process, event_handler_func, event_handler_args):
num_processed = 0
num_processed_by_process_list = [0] * self._process_count
while True:
self._write_progress_to_console(num_processed, total_to_process, num_processed_by_process_list)
event = None
try:
event = self._event_queue.get(True, 1)
except:
pass
if event is not None:
args_to_use = (event, num_processed_by_process_list, num_processed, total_to_process)
args_to_use += event_handler_args
num_processed = event_handler_func(*args_to_use)
if num_processed >= total_to_process:
break
def _stop_processes(self, processes):
for i in range(self._process_count):
self._task_queue.put(-1)
for process in processes:
process.join()
def _write_progress_to_console(self, num_processed, total_to_process, num_processed_by_process_list):
output_str = "Progress: " + str(num_processed) + "/" + str(total_to_process) + " "
for i in range(len(num_processed_by_process_list)):
output_str += ("P" + str(i) + ": " + str(num_processed_by_process_list[i]) + " ")
sys.stdout.write(output_str + "\r")
sys.stdout.flush()
def _log_process_message(self, process_id, message):
if self._use_verbose_logging:
self._logger.print_log("[process: " + str(process_id) + "] " + message)
|
[
"import sys\nfrom multiprocessing import Queue, Manager, Process\nfrom logger import Logger\n\nclass BaseProcessor:\n\n _logger = None\n _file_handler = None\n _process_list = []\n _manager = None\n _event_queue = None\n _task_queue = None\n _process_count = 0\n _use_verbose_logging = False\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n \n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n\n def _get_process(self, process_id):\n raise AttributeError(\"not supported\")\n\n def _run_processes(self, items_to_process, event_handler_func, event_handler_args):\n total_to_process = len(items_to_process)\n\n processes = self._initialize_processes()\n self._fill_task_queue(items_to_process)\n\n self._process_events(total_to_process, event_handler_func, event_handler_args)\n\n self._stop_processes(processes)\n\n def _initialize_processes(self):\n processes = []\n for i in range(self._process_count):\n process = self._get_process(i)\n processes.append(process)\n process.start()\n\n return processes\n\n def _fill_task_queue(self, items):\n for item in items:\n self._task_queue.put(item)\n\n def _process_events(self, total_to_process, event_handler_func, event_handler_args):\n num_processed = 0\n num_processed_by_process_list = [0] * self._process_count\n \n while True:\n self._write_progress_to_console(num_processed, total_to_process, num_processed_by_process_list)\n\n event = None\n try:\n event = self._event_queue.get(True, 1) \n except:\n pass\n\n if event is not None:\n args_to_use = (event, num_processed_by_process_list, num_processed, total_to_process)\n args_to_use += event_handler_args\n\n num_processed = event_handler_func(*args_to_use)\n\n if num_processed >= total_to_process:\n break\n\n def _stop_processes(self, processes):\n for i in range(self._process_count):\n self._task_queue.put(-1)\n\n for process in processes:\n process.join()\n\n def _write_progress_to_console(self, num_processed, total_to_process, num_processed_by_process_list):\n output_str = \"Progress: \" + str(num_processed) + \"/\" + str(total_to_process) + \" \"\n\n for i in range(len(num_processed_by_process_list)):\n output_str += (\"P\" + str(i) + \": \" + str(num_processed_by_process_list[i]) + \" \")\n\n sys.stdout.write(output_str + \"\\r\")\n sys.stdout.flush()\n\n def _log_process_message(self, process_id, message):\n if self._use_verbose_logging:\n self._logger.print_log(\"[process: \" + str(process_id) + \"] \" + message)",
"import sys\nfrom multiprocessing import Queue, Manager, Process\nfrom logger import Logger\n\n\nclass BaseProcessor:\n _logger = None\n _file_handler = None\n _process_list = []\n _manager = None\n _event_queue = None\n _task_queue = None\n _process_count = 0\n _use_verbose_logging = False\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n\n def _get_process(self, process_id):\n raise AttributeError('not supported')\n\n def _run_processes(self, items_to_process, event_handler_func,\n event_handler_args):\n total_to_process = len(items_to_process)\n processes = self._initialize_processes()\n self._fill_task_queue(items_to_process)\n self._process_events(total_to_process, event_handler_func,\n event_handler_args)\n self._stop_processes(processes)\n\n def _initialize_processes(self):\n processes = []\n for i in range(self._process_count):\n process = self._get_process(i)\n processes.append(process)\n process.start()\n return processes\n\n def _fill_task_queue(self, items):\n for item in items:\n self._task_queue.put(item)\n\n def _process_events(self, total_to_process, event_handler_func,\n event_handler_args):\n num_processed = 0\n num_processed_by_process_list = [0] * self._process_count\n while True:\n self._write_progress_to_console(num_processed, total_to_process,\n num_processed_by_process_list)\n event = None\n try:\n event = self._event_queue.get(True, 1)\n except:\n pass\n if event is not None:\n args_to_use = (event, num_processed_by_process_list,\n num_processed, total_to_process)\n args_to_use += event_handler_args\n num_processed = event_handler_func(*args_to_use)\n if num_processed >= total_to_process:\n break\n\n def _stop_processes(self, processes):\n for i in range(self._process_count):\n self._task_queue.put(-1)\n for process in processes:\n process.join()\n\n def _write_progress_to_console(self, num_processed, total_to_process,\n num_processed_by_process_list):\n output_str = 'Progress: ' + str(num_processed) + '/' + str(\n total_to_process) + ' '\n for i in range(len(num_processed_by_process_list)):\n output_str += 'P' + str(i) + ': ' + str(\n num_processed_by_process_list[i]) + ' '\n sys.stdout.write(output_str + '\\r')\n sys.stdout.flush()\n\n def _log_process_message(self, process_id, message):\n if self._use_verbose_logging:\n self._logger.print_log('[process: ' + str(process_id) + '] ' +\n message)\n",
"<import token>\n\n\nclass BaseProcessor:\n _logger = None\n _file_handler = None\n _process_list = []\n _manager = None\n _event_queue = None\n _task_queue = None\n _process_count = 0\n _use_verbose_logging = False\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n\n def _get_process(self, process_id):\n raise AttributeError('not supported')\n\n def _run_processes(self, items_to_process, event_handler_func,\n event_handler_args):\n total_to_process = len(items_to_process)\n processes = self._initialize_processes()\n self._fill_task_queue(items_to_process)\n self._process_events(total_to_process, event_handler_func,\n event_handler_args)\n self._stop_processes(processes)\n\n def _initialize_processes(self):\n processes = []\n for i in range(self._process_count):\n process = self._get_process(i)\n processes.append(process)\n process.start()\n return processes\n\n def _fill_task_queue(self, items):\n for item in items:\n self._task_queue.put(item)\n\n def _process_events(self, total_to_process, event_handler_func,\n event_handler_args):\n num_processed = 0\n num_processed_by_process_list = [0] * self._process_count\n while True:\n self._write_progress_to_console(num_processed, total_to_process,\n num_processed_by_process_list)\n event = None\n try:\n event = self._event_queue.get(True, 1)\n except:\n pass\n if event is not None:\n args_to_use = (event, num_processed_by_process_list,\n num_processed, total_to_process)\n args_to_use += event_handler_args\n num_processed = event_handler_func(*args_to_use)\n if num_processed >= total_to_process:\n break\n\n def _stop_processes(self, processes):\n for i in range(self._process_count):\n self._task_queue.put(-1)\n for process in processes:\n process.join()\n\n def _write_progress_to_console(self, num_processed, total_to_process,\n num_processed_by_process_list):\n output_str = 'Progress: ' + str(num_processed) + '/' + str(\n total_to_process) + ' '\n for i in range(len(num_processed_by_process_list)):\n output_str += 'P' + str(i) + ': ' + str(\n num_processed_by_process_list[i]) + ' '\n sys.stdout.write(output_str + '\\r')\n sys.stdout.flush()\n\n def _log_process_message(self, process_id, message):\n if self._use_verbose_logging:\n self._logger.print_log('[process: ' + str(process_id) + '] ' +\n message)\n",
"<import token>\n\n\nclass BaseProcessor:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n\n def _get_process(self, process_id):\n raise AttributeError('not supported')\n\n def _run_processes(self, items_to_process, event_handler_func,\n event_handler_args):\n total_to_process = len(items_to_process)\n processes = self._initialize_processes()\n self._fill_task_queue(items_to_process)\n self._process_events(total_to_process, event_handler_func,\n event_handler_args)\n self._stop_processes(processes)\n\n def _initialize_processes(self):\n processes = []\n for i in range(self._process_count):\n process = self._get_process(i)\n processes.append(process)\n process.start()\n return processes\n\n def _fill_task_queue(self, items):\n for item in items:\n self._task_queue.put(item)\n\n def _process_events(self, total_to_process, event_handler_func,\n event_handler_args):\n num_processed = 0\n num_processed_by_process_list = [0] * self._process_count\n while True:\n self._write_progress_to_console(num_processed, total_to_process,\n num_processed_by_process_list)\n event = None\n try:\n event = self._event_queue.get(True, 1)\n except:\n pass\n if event is not None:\n args_to_use = (event, num_processed_by_process_list,\n num_processed, total_to_process)\n args_to_use += event_handler_args\n num_processed = event_handler_func(*args_to_use)\n if num_processed >= total_to_process:\n break\n\n def _stop_processes(self, processes):\n for i in range(self._process_count):\n self._task_queue.put(-1)\n for process in processes:\n process.join()\n\n def _write_progress_to_console(self, num_processed, total_to_process,\n num_processed_by_process_list):\n output_str = 'Progress: ' + str(num_processed) + '/' + str(\n total_to_process) + ' '\n for i in range(len(num_processed_by_process_list)):\n output_str += 'P' + str(i) + ': ' + str(\n num_processed_by_process_list[i]) + ' '\n sys.stdout.write(output_str + '\\r')\n sys.stdout.flush()\n\n def _log_process_message(self, process_id, message):\n if self._use_verbose_logging:\n self._logger.print_log('[process: ' + str(process_id) + '] ' +\n message)\n",
"<import token>\n\n\nclass BaseProcessor:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n\n def _get_process(self, process_id):\n raise AttributeError('not supported')\n <function token>\n\n def _initialize_processes(self):\n processes = []\n for i in range(self._process_count):\n process = self._get_process(i)\n processes.append(process)\n process.start()\n return processes\n\n def _fill_task_queue(self, items):\n for item in items:\n self._task_queue.put(item)\n\n def _process_events(self, total_to_process, event_handler_func,\n event_handler_args):\n num_processed = 0\n num_processed_by_process_list = [0] * self._process_count\n while True:\n self._write_progress_to_console(num_processed, total_to_process,\n num_processed_by_process_list)\n event = None\n try:\n event = self._event_queue.get(True, 1)\n except:\n pass\n if event is not None:\n args_to_use = (event, num_processed_by_process_list,\n num_processed, total_to_process)\n args_to_use += event_handler_args\n num_processed = event_handler_func(*args_to_use)\n if num_processed >= total_to_process:\n break\n\n def _stop_processes(self, processes):\n for i in range(self._process_count):\n self._task_queue.put(-1)\n for process in processes:\n process.join()\n\n def _write_progress_to_console(self, num_processed, total_to_process,\n num_processed_by_process_list):\n output_str = 'Progress: ' + str(num_processed) + '/' + str(\n total_to_process) + ' '\n for i in range(len(num_processed_by_process_list)):\n output_str += 'P' + str(i) + ': ' + str(\n num_processed_by_process_list[i]) + ' '\n sys.stdout.write(output_str + '\\r')\n sys.stdout.flush()\n\n def _log_process_message(self, process_id, message):\n if self._use_verbose_logging:\n self._logger.print_log('[process: ' + str(process_id) + '] ' +\n message)\n",
"<import token>\n\n\nclass BaseProcessor:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n\n def _get_process(self, process_id):\n raise AttributeError('not supported')\n <function token>\n\n def _initialize_processes(self):\n processes = []\n for i in range(self._process_count):\n process = self._get_process(i)\n processes.append(process)\n process.start()\n return processes\n\n def _fill_task_queue(self, items):\n for item in items:\n self._task_queue.put(item)\n\n def _process_events(self, total_to_process, event_handler_func,\n event_handler_args):\n num_processed = 0\n num_processed_by_process_list = [0] * self._process_count\n while True:\n self._write_progress_to_console(num_processed, total_to_process,\n num_processed_by_process_list)\n event = None\n try:\n event = self._event_queue.get(True, 1)\n except:\n pass\n if event is not None:\n args_to_use = (event, num_processed_by_process_list,\n num_processed, total_to_process)\n args_to_use += event_handler_args\n num_processed = event_handler_func(*args_to_use)\n if num_processed >= total_to_process:\n break\n\n def _stop_processes(self, processes):\n for i in range(self._process_count):\n self._task_queue.put(-1)\n for process in processes:\n process.join()\n <function token>\n\n def _log_process_message(self, process_id, message):\n if self._use_verbose_logging:\n self._logger.print_log('[process: ' + str(process_id) + '] ' +\n message)\n",
"<import token>\n\n\nclass BaseProcessor:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n\n def _get_process(self, process_id):\n raise AttributeError('not supported')\n <function token>\n <function token>\n\n def _fill_task_queue(self, items):\n for item in items:\n self._task_queue.put(item)\n\n def _process_events(self, total_to_process, event_handler_func,\n event_handler_args):\n num_processed = 0\n num_processed_by_process_list = [0] * self._process_count\n while True:\n self._write_progress_to_console(num_processed, total_to_process,\n num_processed_by_process_list)\n event = None\n try:\n event = self._event_queue.get(True, 1)\n except:\n pass\n if event is not None:\n args_to_use = (event, num_processed_by_process_list,\n num_processed, total_to_process)\n args_to_use += event_handler_args\n num_processed = event_handler_func(*args_to_use)\n if num_processed >= total_to_process:\n break\n\n def _stop_processes(self, processes):\n for i in range(self._process_count):\n self._task_queue.put(-1)\n for process in processes:\n process.join()\n <function token>\n\n def _log_process_message(self, process_id, message):\n if self._use_verbose_logging:\n self._logger.print_log('[process: ' + str(process_id) + '] ' +\n message)\n",
"<import token>\n\n\nclass BaseProcessor:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n\n def _get_process(self, process_id):\n raise AttributeError('not supported')\n <function token>\n <function token>\n\n def _fill_task_queue(self, items):\n for item in items:\n self._task_queue.put(item)\n <function token>\n\n def _stop_processes(self, processes):\n for i in range(self._process_count):\n self._task_queue.put(-1)\n for process in processes:\n process.join()\n <function token>\n\n def _log_process_message(self, process_id, message):\n if self._use_verbose_logging:\n self._logger.print_log('[process: ' + str(process_id) + '] ' +\n message)\n",
"<import token>\n\n\nclass BaseProcessor:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n\n def _get_process(self, process_id):\n raise AttributeError('not supported')\n <function token>\n <function token>\n\n def _fill_task_queue(self, items):\n for item in items:\n self._task_queue.put(item)\n <function token>\n <function token>\n <function token>\n\n def _log_process_message(self, process_id, message):\n if self._use_verbose_logging:\n self._logger.print_log('[process: ' + str(process_id) + '] ' +\n message)\n",
"<import token>\n\n\nclass BaseProcessor:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n <function token>\n <function token>\n <function token>\n\n def _fill_task_queue(self, items):\n for item in items:\n self._task_queue.put(item)\n <function token>\n <function token>\n <function token>\n\n def _log_process_message(self, process_id, message):\n if self._use_verbose_logging:\n self._logger.print_log('[process: ' + str(process_id) + '] ' +\n message)\n",
"<import token>\n\n\nclass BaseProcessor:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def _log_process_message(self, process_id, message):\n if self._use_verbose_logging:\n self._logger.print_log('[process: ' + str(process_id) + '] ' +\n message)\n",
"<import token>\n\n\nclass BaseProcessor:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def __init__(self, file_handler, process_count, use_verbose_logging):\n self._file_handler = file_handler\n self._process_count = process_count\n self._use_verbose_logging = use_verbose_logging\n self._logger = Logger()\n self._manager = Manager()\n self._event_queue = self._manager.Queue()\n self._task_queue = self._manager.Queue()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass BaseProcessor:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,391 |
6ba7a8b5132aac7204f72ad1592113d08f54071b
|
from pymongo import MongoClient
from chirpy.config import settings
def connect(**kwargs):
defaults = {
'host': settings.mongo.get('host'),
'port': settings.mongo.get('port')
}
defaults.update(kwargs)
client = MongoClient(**defaults)
return client[settings.mongo.database]
if __name__ == '__main__':
db = connect()
db.tweets.count()
|
[
"from pymongo import MongoClient\nfrom chirpy.config import settings\n\ndef connect(**kwargs):\n defaults = {\n 'host': settings.mongo.get('host'),\n 'port': settings.mongo.get('port')\n }\n defaults.update(kwargs)\n\n client = MongoClient(**defaults)\n return client[settings.mongo.database]\n \nif __name__ == '__main__':\n db = connect()\n db.tweets.count()\n",
"from pymongo import MongoClient\nfrom chirpy.config import settings\n\n\ndef connect(**kwargs):\n defaults = {'host': settings.mongo.get('host'), 'port': settings.mongo.\n get('port')}\n defaults.update(kwargs)\n client = MongoClient(**defaults)\n return client[settings.mongo.database]\n\n\nif __name__ == '__main__':\n db = connect()\n db.tweets.count()\n",
"<import token>\n\n\ndef connect(**kwargs):\n defaults = {'host': settings.mongo.get('host'), 'port': settings.mongo.\n get('port')}\n defaults.update(kwargs)\n client = MongoClient(**defaults)\n return client[settings.mongo.database]\n\n\nif __name__ == '__main__':\n db = connect()\n db.tweets.count()\n",
"<import token>\n\n\ndef connect(**kwargs):\n defaults = {'host': settings.mongo.get('host'), 'port': settings.mongo.\n get('port')}\n defaults.update(kwargs)\n client = MongoClient(**defaults)\n return client[settings.mongo.database]\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
99,392 |
490e3e72f82c96a5627e7efa4015b5484a579d5d
|
from core.forms import AnonymousSubscribeForm, LeadGenerationForm
from django.utils import translation
from django.conf import settings
from directory_constants import urls
def subscribe_form(request):
return {
'subscribe': {
'form': AnonymousSubscribeForm(),
},
}
def lead_generation_form(request):
return {
'lead_generation': {
'form': LeadGenerationForm()
}
}
def html_lang_attribute(request):
return {
'directory_components_html_lang_attribute': translation.get_language()
}
def footer_contact_us_link(request):
if settings.FEATURE_FLAGS.get('INTERNATIONAL_CONTACT_LINK_ON'):
footer_contact_us_link = urls.build_great_url('international/contact/')
else:
footer_contact_us_link = urls.CONTACT_US
return {
'footer_contact_us_link': footer_contact_us_link
}
|
[
"from core.forms import AnonymousSubscribeForm, LeadGenerationForm\nfrom django.utils import translation\nfrom django.conf import settings\nfrom directory_constants import urls\n\n\ndef subscribe_form(request):\n return {\n 'subscribe': {\n 'form': AnonymousSubscribeForm(),\n },\n }\n\n\ndef lead_generation_form(request):\n return {\n 'lead_generation': {\n 'form': LeadGenerationForm()\n }\n }\n\n\ndef html_lang_attribute(request):\n return {\n 'directory_components_html_lang_attribute': translation.get_language()\n }\n\n\ndef footer_contact_us_link(request):\n if settings.FEATURE_FLAGS.get('INTERNATIONAL_CONTACT_LINK_ON'):\n footer_contact_us_link = urls.build_great_url('international/contact/')\n else:\n footer_contact_us_link = urls.CONTACT_US\n\n return {\n 'footer_contact_us_link': footer_contact_us_link\n }\n",
"from core.forms import AnonymousSubscribeForm, LeadGenerationForm\nfrom django.utils import translation\nfrom django.conf import settings\nfrom directory_constants import urls\n\n\ndef subscribe_form(request):\n return {'subscribe': {'form': AnonymousSubscribeForm()}}\n\n\ndef lead_generation_form(request):\n return {'lead_generation': {'form': LeadGenerationForm()}}\n\n\ndef html_lang_attribute(request):\n return {'directory_components_html_lang_attribute': translation.\n get_language()}\n\n\ndef footer_contact_us_link(request):\n if settings.FEATURE_FLAGS.get('INTERNATIONAL_CONTACT_LINK_ON'):\n footer_contact_us_link = urls.build_great_url('international/contact/')\n else:\n footer_contact_us_link = urls.CONTACT_US\n return {'footer_contact_us_link': footer_contact_us_link}\n",
"<import token>\n\n\ndef subscribe_form(request):\n return {'subscribe': {'form': AnonymousSubscribeForm()}}\n\n\ndef lead_generation_form(request):\n return {'lead_generation': {'form': LeadGenerationForm()}}\n\n\ndef html_lang_attribute(request):\n return {'directory_components_html_lang_attribute': translation.\n get_language()}\n\n\ndef footer_contact_us_link(request):\n if settings.FEATURE_FLAGS.get('INTERNATIONAL_CONTACT_LINK_ON'):\n footer_contact_us_link = urls.build_great_url('international/contact/')\n else:\n footer_contact_us_link = urls.CONTACT_US\n return {'footer_contact_us_link': footer_contact_us_link}\n",
"<import token>\n\n\ndef subscribe_form(request):\n return {'subscribe': {'form': AnonymousSubscribeForm()}}\n\n\ndef lead_generation_form(request):\n return {'lead_generation': {'form': LeadGenerationForm()}}\n\n\n<function token>\n\n\ndef footer_contact_us_link(request):\n if settings.FEATURE_FLAGS.get('INTERNATIONAL_CONTACT_LINK_ON'):\n footer_contact_us_link = urls.build_great_url('international/contact/')\n else:\n footer_contact_us_link = urls.CONTACT_US\n return {'footer_contact_us_link': footer_contact_us_link}\n",
"<import token>\n<function token>\n\n\ndef lead_generation_form(request):\n return {'lead_generation': {'form': LeadGenerationForm()}}\n\n\n<function token>\n\n\ndef footer_contact_us_link(request):\n if settings.FEATURE_FLAGS.get('INTERNATIONAL_CONTACT_LINK_ON'):\n footer_contact_us_link = urls.build_great_url('international/contact/')\n else:\n footer_contact_us_link = urls.CONTACT_US\n return {'footer_contact_us_link': footer_contact_us_link}\n",
"<import token>\n<function token>\n\n\ndef lead_generation_form(request):\n return {'lead_generation': {'form': LeadGenerationForm()}}\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,393 |
af176a53c5002e247a09537d9e30456a579fc9f1
|
#!/usr/bin/env python
# Copyright (c) 2011 Vincent Batts, Vienna, VA, USA
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
from optparse import OptionParser
import logging
# Setup our path, if this is running from the src directory
fpath = os.path.join(os.path.dirname(os.path.abspath(__file__)),"../lib")
if os.path.exists(fpath):
sys.path.insert(0,fpath)
import pysubsonic
log = logging.getLogger('subsonic')
def parse_args(args):
usage = "usage: %prog [options]"
parser = OptionParser(usage)
parser.add_option("-s","--search",dest="search",default=None,
help="string to search for")
parser.add_option("-D",dest="debug",action="store_true",
default=False,help="debugging")
parser.add_option("-i",dest="indexes",action="store_true",
default=False,help="show indexes")
return parser.parse_args(args)
def init_logging(level = pysubsonic.DEFAULT_LOG_LEVEL):
hndlr = logging.StreamHandler()
log.addHandler(hndlr)
log.setLevel(level)
def has_cmd(cmd):
if os.system('type -p %s 2>/dev/null >/dev/null' % cmd) == 0:
return True
else:
return False
def check_system():
if not has_cmd('play'): return False
if not has_cmd('wget'): return False
if not has_cmd('curl'): return False
return True
if __name__ == '__main__':
(opts, args) = parse_args(sys.argv[0:])
if opts.debug:
init_logging(logging.DEBUG)
else:
init_logging()
auth = pysubsonic.config.read_config()['auth']
sub = pysubsonic.subsonic.Subsonic(auth['url'], auth['username'], auth['password'])
log.debug( sub.__mkparams__() )
log.debug( sub.getLicense() )
response = sub.getMusicFolders()
# '{"subsonic-response": {\n "license": {\n "date": "2011-05-16T15:18:12",\n "email": "[email protected]",\n "key": "8e2c6485e247b6c2457c8c0bdcaca459",\n "valid": true\n },\n "status": "ok",\n "version": "1.6.0",\n "xmlns": "http://subsonic.org/restapi"\n}}'
log.debug(response)
basedirs = response['subsonic-response']['musicFolders']['musicFolder']
# {u'subsonic-response': {u'musicFolders': {u'musicFolder': [{u'id': 0, u'name': u'Music'}, {u'id': 1, u'name': u'Videos'}]}, u'status': u'ok', u'version': u'1.6.0', u'xmlns': u'http://subsonic.org/restapi'}}
log.debug(basedirs)
if opts.indexes:
response = sub.getIndexes( musicFolderId = '0' )
log.debug(response)
if opts.search:
response = sub.search2( query = opts.search )
log.debug(response)
|
[
"#!/usr/bin/env python\n# Copyright (c) 2011 Vincent Batts, Vienna, VA, USA\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL\n# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n\nimport os\nimport sys\nfrom optparse import OptionParser\nimport logging\n\n# Setup our path, if this is running from the src directory\nfpath = os.path.join(os.path.dirname(os.path.abspath(__file__)),\"../lib\")\nif os.path.exists(fpath):\n sys.path.insert(0,fpath)\n\nimport pysubsonic\n\nlog = logging.getLogger('subsonic')\n\ndef parse_args(args):\n usage = \"usage: %prog [options]\"\n parser = OptionParser(usage)\n parser.add_option(\"-s\",\"--search\",dest=\"search\",default=None,\n help=\"string to search for\")\n parser.add_option(\"-D\",dest=\"debug\",action=\"store_true\",\n default=False,help=\"debugging\")\n parser.add_option(\"-i\",dest=\"indexes\",action=\"store_true\",\n default=False,help=\"show indexes\")\n return parser.parse_args(args)\n\ndef init_logging(level = pysubsonic.DEFAULT_LOG_LEVEL):\n hndlr = logging.StreamHandler()\n log.addHandler(hndlr)\n log.setLevel(level)\n\ndef has_cmd(cmd):\n if os.system('type -p %s 2>/dev/null >/dev/null' % cmd) == 0:\n return True\n else:\n return False\n\ndef check_system():\n if not has_cmd('play'): return False\n if not has_cmd('wget'): return False\n if not has_cmd('curl'): return False\n return True\n\n\nif __name__ == '__main__':\n\n (opts, args) = parse_args(sys.argv[0:])\n if opts.debug:\n init_logging(logging.DEBUG)\n else:\n init_logging()\n\n\n auth = pysubsonic.config.read_config()['auth']\n sub = pysubsonic.subsonic.Subsonic(auth['url'], auth['username'], auth['password'])\n\n log.debug( sub.__mkparams__() )\n\n log.debug( sub.getLicense() )\n\n response = sub.getMusicFolders()\n # '{\"subsonic-response\": {\\n \"license\": {\\n \"date\": \"2011-05-16T15:18:12\",\\n \"email\": \"[email protected]\",\\n \"key\": \"8e2c6485e247b6c2457c8c0bdcaca459\",\\n \"valid\": true\\n },\\n \"status\": \"ok\",\\n \"version\": \"1.6.0\",\\n \"xmlns\": \"http://subsonic.org/restapi\"\\n}}'\n log.debug(response)\n\n basedirs = response['subsonic-response']['musicFolders']['musicFolder']\n # {u'subsonic-response': {u'musicFolders': {u'musicFolder': [{u'id': 0, u'name': u'Music'}, {u'id': 1, u'name': u'Videos'}]}, u'status': u'ok', u'version': u'1.6.0', u'xmlns': u'http://subsonic.org/restapi'}}\n log.debug(basedirs)\n \n if opts.indexes:\n response = sub.getIndexes( musicFolderId = '0' )\n log.debug(response)\n\n if opts.search:\n response = sub.search2( query = opts.search )\n log.debug(response)\n\n",
"import os\nimport sys\nfrom optparse import OptionParser\nimport logging\nfpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../lib')\nif os.path.exists(fpath):\n sys.path.insert(0, fpath)\nimport pysubsonic\nlog = logging.getLogger('subsonic')\n\n\ndef parse_args(args):\n usage = 'usage: %prog [options]'\n parser = OptionParser(usage)\n parser.add_option('-s', '--search', dest='search', default=None, help=\n 'string to search for')\n parser.add_option('-D', dest='debug', action='store_true', default=\n False, help='debugging')\n parser.add_option('-i', dest='indexes', action='store_true', default=\n False, help='show indexes')\n return parser.parse_args(args)\n\n\ndef init_logging(level=pysubsonic.DEFAULT_LOG_LEVEL):\n hndlr = logging.StreamHandler()\n log.addHandler(hndlr)\n log.setLevel(level)\n\n\ndef has_cmd(cmd):\n if os.system('type -p %s 2>/dev/null >/dev/null' % cmd) == 0:\n return True\n else:\n return False\n\n\ndef check_system():\n if not has_cmd('play'):\n return False\n if not has_cmd('wget'):\n return False\n if not has_cmd('curl'):\n return False\n return True\n\n\nif __name__ == '__main__':\n opts, args = parse_args(sys.argv[0:])\n if opts.debug:\n init_logging(logging.DEBUG)\n else:\n init_logging()\n auth = pysubsonic.config.read_config()['auth']\n sub = pysubsonic.subsonic.Subsonic(auth['url'], auth['username'], auth[\n 'password'])\n log.debug(sub.__mkparams__())\n log.debug(sub.getLicense())\n response = sub.getMusicFolders()\n log.debug(response)\n basedirs = response['subsonic-response']['musicFolders']['musicFolder']\n log.debug(basedirs)\n if opts.indexes:\n response = sub.getIndexes(musicFolderId='0')\n log.debug(response)\n if opts.search:\n response = sub.search2(query=opts.search)\n log.debug(response)\n",
"<import token>\nfpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../lib')\nif os.path.exists(fpath):\n sys.path.insert(0, fpath)\n<import token>\nlog = logging.getLogger('subsonic')\n\n\ndef parse_args(args):\n usage = 'usage: %prog [options]'\n parser = OptionParser(usage)\n parser.add_option('-s', '--search', dest='search', default=None, help=\n 'string to search for')\n parser.add_option('-D', dest='debug', action='store_true', default=\n False, help='debugging')\n parser.add_option('-i', dest='indexes', action='store_true', default=\n False, help='show indexes')\n return parser.parse_args(args)\n\n\ndef init_logging(level=pysubsonic.DEFAULT_LOG_LEVEL):\n hndlr = logging.StreamHandler()\n log.addHandler(hndlr)\n log.setLevel(level)\n\n\ndef has_cmd(cmd):\n if os.system('type -p %s 2>/dev/null >/dev/null' % cmd) == 0:\n return True\n else:\n return False\n\n\ndef check_system():\n if not has_cmd('play'):\n return False\n if not has_cmd('wget'):\n return False\n if not has_cmd('curl'):\n return False\n return True\n\n\nif __name__ == '__main__':\n opts, args = parse_args(sys.argv[0:])\n if opts.debug:\n init_logging(logging.DEBUG)\n else:\n init_logging()\n auth = pysubsonic.config.read_config()['auth']\n sub = pysubsonic.subsonic.Subsonic(auth['url'], auth['username'], auth[\n 'password'])\n log.debug(sub.__mkparams__())\n log.debug(sub.getLicense())\n response = sub.getMusicFolders()\n log.debug(response)\n basedirs = response['subsonic-response']['musicFolders']['musicFolder']\n log.debug(basedirs)\n if opts.indexes:\n response = sub.getIndexes(musicFolderId='0')\n log.debug(response)\n if opts.search:\n response = sub.search2(query=opts.search)\n log.debug(response)\n",
"<import token>\n<assignment token>\nif os.path.exists(fpath):\n sys.path.insert(0, fpath)\n<import token>\n<assignment token>\n\n\ndef parse_args(args):\n usage = 'usage: %prog [options]'\n parser = OptionParser(usage)\n parser.add_option('-s', '--search', dest='search', default=None, help=\n 'string to search for')\n parser.add_option('-D', dest='debug', action='store_true', default=\n False, help='debugging')\n parser.add_option('-i', dest='indexes', action='store_true', default=\n False, help='show indexes')\n return parser.parse_args(args)\n\n\ndef init_logging(level=pysubsonic.DEFAULT_LOG_LEVEL):\n hndlr = logging.StreamHandler()\n log.addHandler(hndlr)\n log.setLevel(level)\n\n\ndef has_cmd(cmd):\n if os.system('type -p %s 2>/dev/null >/dev/null' % cmd) == 0:\n return True\n else:\n return False\n\n\ndef check_system():\n if not has_cmd('play'):\n return False\n if not has_cmd('wget'):\n return False\n if not has_cmd('curl'):\n return False\n return True\n\n\nif __name__ == '__main__':\n opts, args = parse_args(sys.argv[0:])\n if opts.debug:\n init_logging(logging.DEBUG)\n else:\n init_logging()\n auth = pysubsonic.config.read_config()['auth']\n sub = pysubsonic.subsonic.Subsonic(auth['url'], auth['username'], auth[\n 'password'])\n log.debug(sub.__mkparams__())\n log.debug(sub.getLicense())\n response = sub.getMusicFolders()\n log.debug(response)\n basedirs = response['subsonic-response']['musicFolders']['musicFolder']\n log.debug(basedirs)\n if opts.indexes:\n response = sub.getIndexes(musicFolderId='0')\n log.debug(response)\n if opts.search:\n response = sub.search2(query=opts.search)\n log.debug(response)\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef parse_args(args):\n usage = 'usage: %prog [options]'\n parser = OptionParser(usage)\n parser.add_option('-s', '--search', dest='search', default=None, help=\n 'string to search for')\n parser.add_option('-D', dest='debug', action='store_true', default=\n False, help='debugging')\n parser.add_option('-i', dest='indexes', action='store_true', default=\n False, help='show indexes')\n return parser.parse_args(args)\n\n\ndef init_logging(level=pysubsonic.DEFAULT_LOG_LEVEL):\n hndlr = logging.StreamHandler()\n log.addHandler(hndlr)\n log.setLevel(level)\n\n\ndef has_cmd(cmd):\n if os.system('type -p %s 2>/dev/null >/dev/null' % cmd) == 0:\n return True\n else:\n return False\n\n\ndef check_system():\n if not has_cmd('play'):\n return False\n if not has_cmd('wget'):\n return False\n if not has_cmd('curl'):\n return False\n return True\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef parse_args(args):\n usage = 'usage: %prog [options]'\n parser = OptionParser(usage)\n parser.add_option('-s', '--search', dest='search', default=None, help=\n 'string to search for')\n parser.add_option('-D', dest='debug', action='store_true', default=\n False, help='debugging')\n parser.add_option('-i', dest='indexes', action='store_true', default=\n False, help='show indexes')\n return parser.parse_args(args)\n\n\ndef init_logging(level=pysubsonic.DEFAULT_LOG_LEVEL):\n hndlr = logging.StreamHandler()\n log.addHandler(hndlr)\n log.setLevel(level)\n\n\n<function token>\n\n\ndef check_system():\n if not has_cmd('play'):\n return False\n if not has_cmd('wget'):\n return False\n if not has_cmd('curl'):\n return False\n return True\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef parse_args(args):\n usage = 'usage: %prog [options]'\n parser = OptionParser(usage)\n parser.add_option('-s', '--search', dest='search', default=None, help=\n 'string to search for')\n parser.add_option('-D', dest='debug', action='store_true', default=\n False, help='debugging')\n parser.add_option('-i', dest='indexes', action='store_true', default=\n False, help='show indexes')\n return parser.parse_args(args)\n\n\ndef init_logging(level=pysubsonic.DEFAULT_LOG_LEVEL):\n hndlr = logging.StreamHandler()\n log.addHandler(hndlr)\n log.setLevel(level)\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n\n\ndef parse_args(args):\n usage = 'usage: %prog [options]'\n parser = OptionParser(usage)\n parser.add_option('-s', '--search', dest='search', default=None, help=\n 'string to search for')\n parser.add_option('-D', dest='debug', action='store_true', default=\n False, help='debugging')\n parser.add_option('-i', dest='indexes', action='store_true', default=\n False, help='show indexes')\n return parser.parse_args(args)\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
99,394 |
4539d89988d16b3b25420b9a1c416209ca8804ff
|
#
# Copyright (c) European Synchrotron Radiation Facility (ESRF)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the 'Software'), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__authors__ = ['O. Svensson']
__license__ = 'MIT'
__date__ = '21/04/2019'
# Corresponding EDNA code:
# https://github.com/olofsvensson/edna-mx
# mxPluginExec/plugins/EDPluginH5ToCBF-v1.1/plugins/EDPluginH5ToCBFv1_1.py
import pathlib
from edna2.tasks.AbstractTask import AbstractTask
from edna2.utils import UtilsImage
from edna2.utils import UtilsConfig
from edna2.utils import UtilsLogging
logger = UtilsLogging.getLogger()
class H5ToCBFTask(AbstractTask):
def getInDataSchema(self):
return {
"type": "object",
"required": ["hdf5File"],
"properties": {
"imageNumber": {"type": "integer"},
"startImageNumber": {"type": "integer"},
"imageNumber": {"type": "integer"},
"hdf5ImageNumber": {"type": "integer"},
"hdf5File": {"type": "string"},
"forcedOutputDirectory": {"type": "string"}
}
}
def getOutDataSchema(self):
return {
"type": "object",
"properties": {
"outputCBFFile": {"type": "string"}
}
}
def run(self, inData):
outData = {}
hdf5File = pathlib.Path(inData['hdf5File'])
directory = hdf5File.parent
prefix = UtilsImage.getPrefix(hdf5File)
if 'imageNumber'in inData:
commandLine, cbfFile = self.generateCommandsWithImageNumber(
inData, directory, prefix, hdf5File)
outData['outputCBFFile'] = str(cbfFile)
elif 'startImageNumber' in inData and 'endImageNumber' in inData:
commandLine, template = self.generateCommandsWithImageRange(
inData, directory, prefix, hdf5File)
outData['outputCBFFileTemplate'] = template
self.setLogFileName('h5ToCBF.log')
self.runCommandLine('/opt/pxsoft/bin/eiger2cbf ' + commandLine, ignoreErrors=True)
return outData
@classmethod
def generateCommandsWithImageNumber(cls, inData, directory, prefix,
hdf5File):
"""
This method creates a list of commands for the converter
"""
imageNumber = inData['imageNumber']
if 'hdf5ImageNumber' in inData:
hdf5ImageNumber = inData['hdf5ImageNumber']
else:
hdf5ImageNumber = imageNumber
if 'master.h5' in str(hdf5File):
masterFile = hdf5File
else:
if UtilsConfig.isEMBL():
fileName = '{0}_master.h5'.format(prefix)
else:
fileName = '{0}_{1}_master.h5'.format(prefix, hdf5ImageNumber)
masterFile = directory / fileName
if 'forcedOutputImageNumber' in inData:
cbfFileName = prefix + \
"_%04d" % inData['forcedOutputImageNumber'] + ".cbf"
imageNumberInHdf5File = imageNumber
else:
cbfFileName = prefix + "_%04d" % imageNumber + ".cbf"
imageNumberInHdf5File = 1
if not 'forcedOutputDirectory' in inData:
cbfFile = directory / cbfFileName
else:
forcedOutputDirectory = \
pathlib.Path(inData['forcedOutputDirectory'])
if not forcedOutputDirectory.exists():
forcedOutputDirectory.mkdir(parents=True, mode=0o755)
cbfFile = forcedOutputDirectory / cbfFileName
commandLine = "{0} {1} {2}".format(
masterFile, imageNumberInHdf5File, cbfFile
)
return commandLine, cbfFile
@classmethod
def generateCommandsWithImageRange(cls, inData, directory, prefix, hdf5File):
startImageNumber = inData['startImageNumber']
endImageNumber = inData['endImageNumber']
if 'hdf5ImageNumber' in inData:
hdf5ImageNumber = inData['hdf5ImageNumber']
else:
hdf5ImageNumber = startImageNumber
if 'master.h5' in str(hdf5File):
masterFile = hdf5File
else:
fileName = prefix + "_{0}_master.h5".format(hdf5ImageNumber)
masterFile = directory / fileName
cbfFileNamePrefix = prefix + '_'
if 'forcedOutputDirectory' in inData:
forcedOutputDirectory = \
pathlib.Path(inData['forcedOutputDirectory'])
if not forcedOutputDirectory.exists():
forcedOutputDirectory.mkdir(mode=0o755, parents=True)
cbfFilePath = forcedOutputDirectory / cbfFileNamePrefix
else:
cbfFilePath = directory / cbfFileNamePrefix
commandLine = "{0} {1}:{2} {3}".format(
masterFile, startImageNumber, endImageNumber, cbfFilePath)
cbfFileTemplate = str(cbfFilePath) + "######.cbf"
return commandLine, cbfFileTemplate
|
[
"#\n# Copyright (c) European Synchrotron Radiation Facility (ESRF)\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of\n# this software and associated documentation files (the 'Software'), to deal in\n# the Software without restriction, including without limitation the rights to\n# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of\n# the Software, and to permit persons to whom the Software is furnished to do so,\n# subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS\n# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR\n# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER\n# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN\n# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n#\n\n__authors__ = ['O. Svensson']\n__license__ = 'MIT'\n__date__ = '21/04/2019'\n\n# Corresponding EDNA code:\n# https://github.com/olofsvensson/edna-mx\n# mxPluginExec/plugins/EDPluginH5ToCBF-v1.1/plugins/EDPluginH5ToCBFv1_1.py\n\nimport pathlib\n\nfrom edna2.tasks.AbstractTask import AbstractTask\n\nfrom edna2.utils import UtilsImage\nfrom edna2.utils import UtilsConfig\nfrom edna2.utils import UtilsLogging\n\nlogger = UtilsLogging.getLogger()\n\n\nclass H5ToCBFTask(AbstractTask):\n\n def getInDataSchema(self):\n return {\n \"type\": \"object\",\n \"required\": [\"hdf5File\"],\n \"properties\": {\n \"imageNumber\": {\"type\": \"integer\"},\n \"startImageNumber\": {\"type\": \"integer\"},\n \"imageNumber\": {\"type\": \"integer\"},\n \"hdf5ImageNumber\": {\"type\": \"integer\"},\n \"hdf5File\": {\"type\": \"string\"},\n \"forcedOutputDirectory\": {\"type\": \"string\"}\n }\n }\n\n def getOutDataSchema(self):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"outputCBFFile\": {\"type\": \"string\"}\n }\n }\n\n def run(self, inData):\n outData = {}\n hdf5File = pathlib.Path(inData['hdf5File'])\n directory = hdf5File.parent\n prefix = UtilsImage.getPrefix(hdf5File)\n if 'imageNumber'in inData:\n commandLine, cbfFile = self.generateCommandsWithImageNumber(\n inData, directory, prefix, hdf5File)\n outData['outputCBFFile'] = str(cbfFile)\n elif 'startImageNumber' in inData and 'endImageNumber' in inData:\n commandLine, template = self.generateCommandsWithImageRange(\n inData, directory, prefix, hdf5File)\n outData['outputCBFFileTemplate'] = template\n self.setLogFileName('h5ToCBF.log')\n self.runCommandLine('/opt/pxsoft/bin/eiger2cbf ' + commandLine, ignoreErrors=True)\n return outData\n\n @classmethod\n def generateCommandsWithImageNumber(cls, inData, directory, prefix,\n hdf5File):\n \"\"\"\n This method creates a list of commands for the converter\n \"\"\"\n imageNumber = inData['imageNumber']\n if 'hdf5ImageNumber' in inData:\n hdf5ImageNumber = inData['hdf5ImageNumber']\n else:\n hdf5ImageNumber = imageNumber\n if 'master.h5' in str(hdf5File):\n masterFile = hdf5File\n else:\n if UtilsConfig.isEMBL():\n fileName = '{0}_master.h5'.format(prefix)\n else:\n fileName = '{0}_{1}_master.h5'.format(prefix, hdf5ImageNumber)\n masterFile = directory / fileName\n if 'forcedOutputImageNumber' in inData:\n cbfFileName = prefix + \\\n \"_%04d\" % inData['forcedOutputImageNumber'] + \".cbf\"\n imageNumberInHdf5File = imageNumber\n else:\n cbfFileName = prefix + \"_%04d\" % imageNumber + \".cbf\"\n imageNumberInHdf5File = 1\n if not 'forcedOutputDirectory' in inData:\n cbfFile = directory / cbfFileName\n else:\n forcedOutputDirectory = \\\n pathlib.Path(inData['forcedOutputDirectory'])\n if not forcedOutputDirectory.exists():\n forcedOutputDirectory.mkdir(parents=True, mode=0o755)\n cbfFile = forcedOutputDirectory / cbfFileName\n commandLine = \"{0} {1} {2}\".format(\n masterFile, imageNumberInHdf5File, cbfFile\n )\n return commandLine, cbfFile\n\n @classmethod\n def generateCommandsWithImageRange(cls, inData, directory, prefix, hdf5File):\n startImageNumber = inData['startImageNumber']\n endImageNumber = inData['endImageNumber']\n if 'hdf5ImageNumber' in inData:\n hdf5ImageNumber = inData['hdf5ImageNumber']\n else:\n hdf5ImageNumber = startImageNumber\n if 'master.h5' in str(hdf5File):\n masterFile = hdf5File\n else:\n fileName = prefix + \"_{0}_master.h5\".format(hdf5ImageNumber)\n masterFile = directory / fileName\n cbfFileNamePrefix = prefix + '_'\n if 'forcedOutputDirectory' in inData:\n forcedOutputDirectory = \\\n pathlib.Path(inData['forcedOutputDirectory'])\n if not forcedOutputDirectory.exists():\n forcedOutputDirectory.mkdir(mode=0o755, parents=True)\n cbfFilePath = forcedOutputDirectory / cbfFileNamePrefix\n else:\n cbfFilePath = directory / cbfFileNamePrefix\n commandLine = \"{0} {1}:{2} {3}\".format(\n masterFile, startImageNumber, endImageNumber, cbfFilePath)\n cbfFileTemplate = str(cbfFilePath) + \"######.cbf\"\n return commandLine, cbfFileTemplate\n\n\n",
"__authors__ = ['O. Svensson']\n__license__ = 'MIT'\n__date__ = '21/04/2019'\nimport pathlib\nfrom edna2.tasks.AbstractTask import AbstractTask\nfrom edna2.utils import UtilsImage\nfrom edna2.utils import UtilsConfig\nfrom edna2.utils import UtilsLogging\nlogger = UtilsLogging.getLogger()\n\n\nclass H5ToCBFTask(AbstractTask):\n\n def getInDataSchema(self):\n return {'type': 'object', 'required': ['hdf5File'], 'properties': {\n 'imageNumber': {'type': 'integer'}, 'startImageNumber': {'type':\n 'integer'}, 'imageNumber': {'type': 'integer'},\n 'hdf5ImageNumber': {'type': 'integer'}, 'hdf5File': {'type':\n 'string'}, 'forcedOutputDirectory': {'type': 'string'}}}\n\n def getOutDataSchema(self):\n return {'type': 'object', 'properties': {'outputCBFFile': {'type':\n 'string'}}}\n\n def run(self, inData):\n outData = {}\n hdf5File = pathlib.Path(inData['hdf5File'])\n directory = hdf5File.parent\n prefix = UtilsImage.getPrefix(hdf5File)\n if 'imageNumber' in inData:\n commandLine, cbfFile = self.generateCommandsWithImageNumber(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFile'] = str(cbfFile)\n elif 'startImageNumber' in inData and 'endImageNumber' in inData:\n commandLine, template = self.generateCommandsWithImageRange(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFileTemplate'] = template\n self.setLogFileName('h5ToCBF.log')\n self.runCommandLine('/opt/pxsoft/bin/eiger2cbf ' + commandLine,\n ignoreErrors=True)\n return outData\n\n @classmethod\n def generateCommandsWithImageNumber(cls, inData, directory, prefix,\n hdf5File):\n \"\"\"\n This method creates a list of commands for the converter\n \"\"\"\n imageNumber = inData['imageNumber']\n if 'hdf5ImageNumber' in inData:\n hdf5ImageNumber = inData['hdf5ImageNumber']\n else:\n hdf5ImageNumber = imageNumber\n if 'master.h5' in str(hdf5File):\n masterFile = hdf5File\n else:\n if UtilsConfig.isEMBL():\n fileName = '{0}_master.h5'.format(prefix)\n else:\n fileName = '{0}_{1}_master.h5'.format(prefix, hdf5ImageNumber)\n masterFile = directory / fileName\n if 'forcedOutputImageNumber' in inData:\n cbfFileName = prefix + '_%04d' % inData['forcedOutputImageNumber'\n ] + '.cbf'\n imageNumberInHdf5File = imageNumber\n else:\n cbfFileName = prefix + '_%04d' % imageNumber + '.cbf'\n imageNumberInHdf5File = 1\n if not 'forcedOutputDirectory' in inData:\n cbfFile = directory / cbfFileName\n else:\n forcedOutputDirectory = pathlib.Path(inData[\n 'forcedOutputDirectory'])\n if not forcedOutputDirectory.exists():\n forcedOutputDirectory.mkdir(parents=True, mode=493)\n cbfFile = forcedOutputDirectory / cbfFileName\n commandLine = '{0} {1} {2}'.format(masterFile,\n imageNumberInHdf5File, cbfFile)\n return commandLine, cbfFile\n\n @classmethod\n def generateCommandsWithImageRange(cls, inData, directory, prefix, hdf5File\n ):\n startImageNumber = inData['startImageNumber']\n endImageNumber = inData['endImageNumber']\n if 'hdf5ImageNumber' in inData:\n hdf5ImageNumber = inData['hdf5ImageNumber']\n else:\n hdf5ImageNumber = startImageNumber\n if 'master.h5' in str(hdf5File):\n masterFile = hdf5File\n else:\n fileName = prefix + '_{0}_master.h5'.format(hdf5ImageNumber)\n masterFile = directory / fileName\n cbfFileNamePrefix = prefix + '_'\n if 'forcedOutputDirectory' in inData:\n forcedOutputDirectory = pathlib.Path(inData[\n 'forcedOutputDirectory'])\n if not forcedOutputDirectory.exists():\n forcedOutputDirectory.mkdir(mode=493, parents=True)\n cbfFilePath = forcedOutputDirectory / cbfFileNamePrefix\n else:\n cbfFilePath = directory / cbfFileNamePrefix\n commandLine = '{0} {1}:{2} {3}'.format(masterFile, startImageNumber,\n endImageNumber, cbfFilePath)\n cbfFileTemplate = str(cbfFilePath) + '######.cbf'\n return commandLine, cbfFileTemplate\n",
"__authors__ = ['O. Svensson']\n__license__ = 'MIT'\n__date__ = '21/04/2019'\n<import token>\nlogger = UtilsLogging.getLogger()\n\n\nclass H5ToCBFTask(AbstractTask):\n\n def getInDataSchema(self):\n return {'type': 'object', 'required': ['hdf5File'], 'properties': {\n 'imageNumber': {'type': 'integer'}, 'startImageNumber': {'type':\n 'integer'}, 'imageNumber': {'type': 'integer'},\n 'hdf5ImageNumber': {'type': 'integer'}, 'hdf5File': {'type':\n 'string'}, 'forcedOutputDirectory': {'type': 'string'}}}\n\n def getOutDataSchema(self):\n return {'type': 'object', 'properties': {'outputCBFFile': {'type':\n 'string'}}}\n\n def run(self, inData):\n outData = {}\n hdf5File = pathlib.Path(inData['hdf5File'])\n directory = hdf5File.parent\n prefix = UtilsImage.getPrefix(hdf5File)\n if 'imageNumber' in inData:\n commandLine, cbfFile = self.generateCommandsWithImageNumber(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFile'] = str(cbfFile)\n elif 'startImageNumber' in inData and 'endImageNumber' in inData:\n commandLine, template = self.generateCommandsWithImageRange(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFileTemplate'] = template\n self.setLogFileName('h5ToCBF.log')\n self.runCommandLine('/opt/pxsoft/bin/eiger2cbf ' + commandLine,\n ignoreErrors=True)\n return outData\n\n @classmethod\n def generateCommandsWithImageNumber(cls, inData, directory, prefix,\n hdf5File):\n \"\"\"\n This method creates a list of commands for the converter\n \"\"\"\n imageNumber = inData['imageNumber']\n if 'hdf5ImageNumber' in inData:\n hdf5ImageNumber = inData['hdf5ImageNumber']\n else:\n hdf5ImageNumber = imageNumber\n if 'master.h5' in str(hdf5File):\n masterFile = hdf5File\n else:\n if UtilsConfig.isEMBL():\n fileName = '{0}_master.h5'.format(prefix)\n else:\n fileName = '{0}_{1}_master.h5'.format(prefix, hdf5ImageNumber)\n masterFile = directory / fileName\n if 'forcedOutputImageNumber' in inData:\n cbfFileName = prefix + '_%04d' % inData['forcedOutputImageNumber'\n ] + '.cbf'\n imageNumberInHdf5File = imageNumber\n else:\n cbfFileName = prefix + '_%04d' % imageNumber + '.cbf'\n imageNumberInHdf5File = 1\n if not 'forcedOutputDirectory' in inData:\n cbfFile = directory / cbfFileName\n else:\n forcedOutputDirectory = pathlib.Path(inData[\n 'forcedOutputDirectory'])\n if not forcedOutputDirectory.exists():\n forcedOutputDirectory.mkdir(parents=True, mode=493)\n cbfFile = forcedOutputDirectory / cbfFileName\n commandLine = '{0} {1} {2}'.format(masterFile,\n imageNumberInHdf5File, cbfFile)\n return commandLine, cbfFile\n\n @classmethod\n def generateCommandsWithImageRange(cls, inData, directory, prefix, hdf5File\n ):\n startImageNumber = inData['startImageNumber']\n endImageNumber = inData['endImageNumber']\n if 'hdf5ImageNumber' in inData:\n hdf5ImageNumber = inData['hdf5ImageNumber']\n else:\n hdf5ImageNumber = startImageNumber\n if 'master.h5' in str(hdf5File):\n masterFile = hdf5File\n else:\n fileName = prefix + '_{0}_master.h5'.format(hdf5ImageNumber)\n masterFile = directory / fileName\n cbfFileNamePrefix = prefix + '_'\n if 'forcedOutputDirectory' in inData:\n forcedOutputDirectory = pathlib.Path(inData[\n 'forcedOutputDirectory'])\n if not forcedOutputDirectory.exists():\n forcedOutputDirectory.mkdir(mode=493, parents=True)\n cbfFilePath = forcedOutputDirectory / cbfFileNamePrefix\n else:\n cbfFilePath = directory / cbfFileNamePrefix\n commandLine = '{0} {1}:{2} {3}'.format(masterFile, startImageNumber,\n endImageNumber, cbfFilePath)\n cbfFileTemplate = str(cbfFilePath) + '######.cbf'\n return commandLine, cbfFileTemplate\n",
"<assignment token>\n<import token>\n<assignment token>\n\n\nclass H5ToCBFTask(AbstractTask):\n\n def getInDataSchema(self):\n return {'type': 'object', 'required': ['hdf5File'], 'properties': {\n 'imageNumber': {'type': 'integer'}, 'startImageNumber': {'type':\n 'integer'}, 'imageNumber': {'type': 'integer'},\n 'hdf5ImageNumber': {'type': 'integer'}, 'hdf5File': {'type':\n 'string'}, 'forcedOutputDirectory': {'type': 'string'}}}\n\n def getOutDataSchema(self):\n return {'type': 'object', 'properties': {'outputCBFFile': {'type':\n 'string'}}}\n\n def run(self, inData):\n outData = {}\n hdf5File = pathlib.Path(inData['hdf5File'])\n directory = hdf5File.parent\n prefix = UtilsImage.getPrefix(hdf5File)\n if 'imageNumber' in inData:\n commandLine, cbfFile = self.generateCommandsWithImageNumber(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFile'] = str(cbfFile)\n elif 'startImageNumber' in inData and 'endImageNumber' in inData:\n commandLine, template = self.generateCommandsWithImageRange(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFileTemplate'] = template\n self.setLogFileName('h5ToCBF.log')\n self.runCommandLine('/opt/pxsoft/bin/eiger2cbf ' + commandLine,\n ignoreErrors=True)\n return outData\n\n @classmethod\n def generateCommandsWithImageNumber(cls, inData, directory, prefix,\n hdf5File):\n \"\"\"\n This method creates a list of commands for the converter\n \"\"\"\n imageNumber = inData['imageNumber']\n if 'hdf5ImageNumber' in inData:\n hdf5ImageNumber = inData['hdf5ImageNumber']\n else:\n hdf5ImageNumber = imageNumber\n if 'master.h5' in str(hdf5File):\n masterFile = hdf5File\n else:\n if UtilsConfig.isEMBL():\n fileName = '{0}_master.h5'.format(prefix)\n else:\n fileName = '{0}_{1}_master.h5'.format(prefix, hdf5ImageNumber)\n masterFile = directory / fileName\n if 'forcedOutputImageNumber' in inData:\n cbfFileName = prefix + '_%04d' % inData['forcedOutputImageNumber'\n ] + '.cbf'\n imageNumberInHdf5File = imageNumber\n else:\n cbfFileName = prefix + '_%04d' % imageNumber + '.cbf'\n imageNumberInHdf5File = 1\n if not 'forcedOutputDirectory' in inData:\n cbfFile = directory / cbfFileName\n else:\n forcedOutputDirectory = pathlib.Path(inData[\n 'forcedOutputDirectory'])\n if not forcedOutputDirectory.exists():\n forcedOutputDirectory.mkdir(parents=True, mode=493)\n cbfFile = forcedOutputDirectory / cbfFileName\n commandLine = '{0} {1} {2}'.format(masterFile,\n imageNumberInHdf5File, cbfFile)\n return commandLine, cbfFile\n\n @classmethod\n def generateCommandsWithImageRange(cls, inData, directory, prefix, hdf5File\n ):\n startImageNumber = inData['startImageNumber']\n endImageNumber = inData['endImageNumber']\n if 'hdf5ImageNumber' in inData:\n hdf5ImageNumber = inData['hdf5ImageNumber']\n else:\n hdf5ImageNumber = startImageNumber\n if 'master.h5' in str(hdf5File):\n masterFile = hdf5File\n else:\n fileName = prefix + '_{0}_master.h5'.format(hdf5ImageNumber)\n masterFile = directory / fileName\n cbfFileNamePrefix = prefix + '_'\n if 'forcedOutputDirectory' in inData:\n forcedOutputDirectory = pathlib.Path(inData[\n 'forcedOutputDirectory'])\n if not forcedOutputDirectory.exists():\n forcedOutputDirectory.mkdir(mode=493, parents=True)\n cbfFilePath = forcedOutputDirectory / cbfFileNamePrefix\n else:\n cbfFilePath = directory / cbfFileNamePrefix\n commandLine = '{0} {1}:{2} {3}'.format(masterFile, startImageNumber,\n endImageNumber, cbfFilePath)\n cbfFileTemplate = str(cbfFilePath) + '######.cbf'\n return commandLine, cbfFileTemplate\n",
"<assignment token>\n<import token>\n<assignment token>\n\n\nclass H5ToCBFTask(AbstractTask):\n <function token>\n\n def getOutDataSchema(self):\n return {'type': 'object', 'properties': {'outputCBFFile': {'type':\n 'string'}}}\n\n def run(self, inData):\n outData = {}\n hdf5File = pathlib.Path(inData['hdf5File'])\n directory = hdf5File.parent\n prefix = UtilsImage.getPrefix(hdf5File)\n if 'imageNumber' in inData:\n commandLine, cbfFile = self.generateCommandsWithImageNumber(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFile'] = str(cbfFile)\n elif 'startImageNumber' in inData and 'endImageNumber' in inData:\n commandLine, template = self.generateCommandsWithImageRange(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFileTemplate'] = template\n self.setLogFileName('h5ToCBF.log')\n self.runCommandLine('/opt/pxsoft/bin/eiger2cbf ' + commandLine,\n ignoreErrors=True)\n return outData\n\n @classmethod\n def generateCommandsWithImageNumber(cls, inData, directory, prefix,\n hdf5File):\n \"\"\"\n This method creates a list of commands for the converter\n \"\"\"\n imageNumber = inData['imageNumber']\n if 'hdf5ImageNumber' in inData:\n hdf5ImageNumber = inData['hdf5ImageNumber']\n else:\n hdf5ImageNumber = imageNumber\n if 'master.h5' in str(hdf5File):\n masterFile = hdf5File\n else:\n if UtilsConfig.isEMBL():\n fileName = '{0}_master.h5'.format(prefix)\n else:\n fileName = '{0}_{1}_master.h5'.format(prefix, hdf5ImageNumber)\n masterFile = directory / fileName\n if 'forcedOutputImageNumber' in inData:\n cbfFileName = prefix + '_%04d' % inData['forcedOutputImageNumber'\n ] + '.cbf'\n imageNumberInHdf5File = imageNumber\n else:\n cbfFileName = prefix + '_%04d' % imageNumber + '.cbf'\n imageNumberInHdf5File = 1\n if not 'forcedOutputDirectory' in inData:\n cbfFile = directory / cbfFileName\n else:\n forcedOutputDirectory = pathlib.Path(inData[\n 'forcedOutputDirectory'])\n if not forcedOutputDirectory.exists():\n forcedOutputDirectory.mkdir(parents=True, mode=493)\n cbfFile = forcedOutputDirectory / cbfFileName\n commandLine = '{0} {1} {2}'.format(masterFile,\n imageNumberInHdf5File, cbfFile)\n return commandLine, cbfFile\n\n @classmethod\n def generateCommandsWithImageRange(cls, inData, directory, prefix, hdf5File\n ):\n startImageNumber = inData['startImageNumber']\n endImageNumber = inData['endImageNumber']\n if 'hdf5ImageNumber' in inData:\n hdf5ImageNumber = inData['hdf5ImageNumber']\n else:\n hdf5ImageNumber = startImageNumber\n if 'master.h5' in str(hdf5File):\n masterFile = hdf5File\n else:\n fileName = prefix + '_{0}_master.h5'.format(hdf5ImageNumber)\n masterFile = directory / fileName\n cbfFileNamePrefix = prefix + '_'\n if 'forcedOutputDirectory' in inData:\n forcedOutputDirectory = pathlib.Path(inData[\n 'forcedOutputDirectory'])\n if not forcedOutputDirectory.exists():\n forcedOutputDirectory.mkdir(mode=493, parents=True)\n cbfFilePath = forcedOutputDirectory / cbfFileNamePrefix\n else:\n cbfFilePath = directory / cbfFileNamePrefix\n commandLine = '{0} {1}:{2} {3}'.format(masterFile, startImageNumber,\n endImageNumber, cbfFilePath)\n cbfFileTemplate = str(cbfFilePath) + '######.cbf'\n return commandLine, cbfFileTemplate\n",
"<assignment token>\n<import token>\n<assignment token>\n\n\nclass H5ToCBFTask(AbstractTask):\n <function token>\n\n def getOutDataSchema(self):\n return {'type': 'object', 'properties': {'outputCBFFile': {'type':\n 'string'}}}\n\n def run(self, inData):\n outData = {}\n hdf5File = pathlib.Path(inData['hdf5File'])\n directory = hdf5File.parent\n prefix = UtilsImage.getPrefix(hdf5File)\n if 'imageNumber' in inData:\n commandLine, cbfFile = self.generateCommandsWithImageNumber(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFile'] = str(cbfFile)\n elif 'startImageNumber' in inData and 'endImageNumber' in inData:\n commandLine, template = self.generateCommandsWithImageRange(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFileTemplate'] = template\n self.setLogFileName('h5ToCBF.log')\n self.runCommandLine('/opt/pxsoft/bin/eiger2cbf ' + commandLine,\n ignoreErrors=True)\n return outData\n\n @classmethod\n def generateCommandsWithImageNumber(cls, inData, directory, prefix,\n hdf5File):\n \"\"\"\n This method creates a list of commands for the converter\n \"\"\"\n imageNumber = inData['imageNumber']\n if 'hdf5ImageNumber' in inData:\n hdf5ImageNumber = inData['hdf5ImageNumber']\n else:\n hdf5ImageNumber = imageNumber\n if 'master.h5' in str(hdf5File):\n masterFile = hdf5File\n else:\n if UtilsConfig.isEMBL():\n fileName = '{0}_master.h5'.format(prefix)\n else:\n fileName = '{0}_{1}_master.h5'.format(prefix, hdf5ImageNumber)\n masterFile = directory / fileName\n if 'forcedOutputImageNumber' in inData:\n cbfFileName = prefix + '_%04d' % inData['forcedOutputImageNumber'\n ] + '.cbf'\n imageNumberInHdf5File = imageNumber\n else:\n cbfFileName = prefix + '_%04d' % imageNumber + '.cbf'\n imageNumberInHdf5File = 1\n if not 'forcedOutputDirectory' in inData:\n cbfFile = directory / cbfFileName\n else:\n forcedOutputDirectory = pathlib.Path(inData[\n 'forcedOutputDirectory'])\n if not forcedOutputDirectory.exists():\n forcedOutputDirectory.mkdir(parents=True, mode=493)\n cbfFile = forcedOutputDirectory / cbfFileName\n commandLine = '{0} {1} {2}'.format(masterFile,\n imageNumberInHdf5File, cbfFile)\n return commandLine, cbfFile\n <function token>\n",
"<assignment token>\n<import token>\n<assignment token>\n\n\nclass H5ToCBFTask(AbstractTask):\n <function token>\n\n def getOutDataSchema(self):\n return {'type': 'object', 'properties': {'outputCBFFile': {'type':\n 'string'}}}\n\n def run(self, inData):\n outData = {}\n hdf5File = pathlib.Path(inData['hdf5File'])\n directory = hdf5File.parent\n prefix = UtilsImage.getPrefix(hdf5File)\n if 'imageNumber' in inData:\n commandLine, cbfFile = self.generateCommandsWithImageNumber(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFile'] = str(cbfFile)\n elif 'startImageNumber' in inData and 'endImageNumber' in inData:\n commandLine, template = self.generateCommandsWithImageRange(inData,\n directory, prefix, hdf5File)\n outData['outputCBFFileTemplate'] = template\n self.setLogFileName('h5ToCBF.log')\n self.runCommandLine('/opt/pxsoft/bin/eiger2cbf ' + commandLine,\n ignoreErrors=True)\n return outData\n <function token>\n <function token>\n",
"<assignment token>\n<import token>\n<assignment token>\n\n\nclass H5ToCBFTask(AbstractTask):\n <function token>\n\n def getOutDataSchema(self):\n return {'type': 'object', 'properties': {'outputCBFFile': {'type':\n 'string'}}}\n <function token>\n <function token>\n <function token>\n",
"<assignment token>\n<import token>\n<assignment token>\n\n\nclass H5ToCBFTask(AbstractTask):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<assignment token>\n<import token>\n<assignment token>\n<class token>\n"
] | false |
99,395 |
eddc8e8cac92279ffe76511c6b57a6a7217c8173
|
# Generated by Django 3.0.2 on 2020-06-23 17:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import text_miner.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filesize', models.IntegerField()),
('filename', models.CharField(max_length=200)),
('date', models.DateTimeField(auto_now_add=True)),
('document', models.FileField(max_length=200, upload_to=text_miner.models.user_directory_path)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ScanPdf',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.TextField(max_length=1000)),
('category_value', models.IntegerField()),
('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='text_miner.Document')),
],
),
migrations.CreateModel(
name='Results',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('prediction', models.IntegerField()),
('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='text_miner.Document')),
],
),
migrations.CreateModel(
name='InformationExtracted',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('information', models.TextField(max_length=100000)),
('result', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='text_miner.Results')),
],
),
]
|
[
"# Generated by Django 3.0.2 on 2020-06-23 17:14\n\nfrom django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport text_miner.models\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Document',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('filesize', models.IntegerField()),\n ('filename', models.CharField(max_length=200)),\n ('date', models.DateTimeField(auto_now_add=True)),\n ('document', models.FileField(max_length=200, upload_to=text_miner.models.user_directory_path)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.CreateModel(\n name='ScanPdf',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('category', models.TextField(max_length=1000)),\n ('category_value', models.IntegerField()),\n ('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='text_miner.Document')),\n ],\n ),\n migrations.CreateModel(\n name='Results',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('prediction', models.IntegerField()),\n ('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='text_miner.Document')),\n ],\n ),\n migrations.CreateModel(\n name='InformationExtracted',\n fields=[\n ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),\n ('information', models.TextField(max_length=100000)),\n ('result', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='text_miner.Results')),\n ],\n ),\n ]\n",
"from django.conf import settings\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport text_miner.models\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Document', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('filesize', models.IntegerField()), (\n 'filename', models.CharField(max_length=200)), ('date', models.\n DateTimeField(auto_now_add=True)), ('document', models.FileField(\n max_length=200, upload_to=text_miner.models.user_directory_path)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to=settings.AUTH_USER_MODEL))]), migrations.CreateModel(\n name='ScanPdf', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('category',\n models.TextField(max_length=1000)), ('category_value', models.\n IntegerField()), ('document', models.ForeignKey(on_delete=django.db\n .models.deletion.CASCADE, to='text_miner.Document'))]), migrations.\n CreateModel(name='Results', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('prediction', models.IntegerField()), ('document', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'text_miner.Document'))]), migrations.CreateModel(name=\n 'InformationExtracted', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('information', models.TextField(max_length=100000)), (\n 'result', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to='text_miner.Results'))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [migrations.swappable_dependency(settings.AUTH_USER_MODEL)]\n operations = [migrations.CreateModel(name='Document', fields=[('id',\n models.AutoField(auto_created=True, primary_key=True, serialize=\n False, verbose_name='ID')), ('filesize', models.IntegerField()), (\n 'filename', models.CharField(max_length=200)), ('date', models.\n DateTimeField(auto_now_add=True)), ('document', models.FileField(\n max_length=200, upload_to=text_miner.models.user_directory_path)),\n ('user', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to=settings.AUTH_USER_MODEL))]), migrations.CreateModel(\n name='ScanPdf', fields=[('id', models.AutoField(auto_created=True,\n primary_key=True, serialize=False, verbose_name='ID')), ('category',\n models.TextField(max_length=1000)), ('category_value', models.\n IntegerField()), ('document', models.ForeignKey(on_delete=django.db\n .models.deletion.CASCADE, to='text_miner.Document'))]), migrations.\n CreateModel(name='Results', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('prediction', models.IntegerField()), ('document', models.\n ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=\n 'text_miner.Document'))]), migrations.CreateModel(name=\n 'InformationExtracted', fields=[('id', models.AutoField(\n auto_created=True, primary_key=True, serialize=False, verbose_name=\n 'ID')), ('information', models.TextField(max_length=100000)), (\n 'result', models.ForeignKey(on_delete=django.db.models.deletion.\n CASCADE, to='text_miner.Results'))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
99,396 |
9ab41e572c9be2c6e9ca36cfbf982c472baedc3c
|
import os
from os import path
from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtWidgets import QLabel, QAbstractItemView
from ui.transfer_window import Ui_TransferWindow
import requests
import json
import configparser
from static import set_text
from time import sleep
from base import find_transfer, success
from static import generate_unique_number, get_organization
from error_window import ErrorWindow
class TransferWindow(QtWidgets.QMainWindow):
def __init__(self):
super(TransferWindow, self).__init__()
self.setFixedSize(482, 340)
# Инициализация окон
self.ui_3 = Ui_TransferWindow()
self.ui_7 = ErrorWindow()
self.ui_3.setupUi(self)
# Пути до папок
self.result_dir = path.join(path.dirname(__file__), 'result')
self.img_dir = path.join(path.dirname(__file__), 'img')
self.config_dir = path.join(path.dirname(__file__), 'config')
self.json_dir = path.join(path.dirname(__file__), 'json')
self.log_dir = path.join(path.dirname(__file__), 'log')
# Список всех файлов в папке result
self.files = os.listdir(self.result_dir)
# Открытие файла конфига
self.config = configparser.RawConfigParser()
self.config.read(path.join(self.config_dir, 'config.ini'), encoding='utf-8')
# Берем имя организации для имени файла
self.date = ''
self.organization_name = get_organization()
# Добавление списка в listView
self.model = QtCore.QStringListModel(self)
self.ui_3.listView.setModel(self.model)
self.ui_3.listView.setWordWrap(True)
self.ui_3.listView.hide()
# Запрет редактирования элементов listView
self.ui_3.listView.setEditTriggers(QAbstractItemView.NoEditTriggers)
# Прогрессбар
self.ui_3.progressBar.hide()
# Подключение кнопок
self.ui_3.pushButton.clicked.connect(self.create_json)
self.ui_3.pushButton_2.clicked.connect(self.close_window)
# Иконка окна
self.setWindowIcon(QtGui.QIcon(path.join(self.img_dir, 'gosuslugi_5.png')))
# Текст по окну
self.setWindowTitle('Выбор даты для отправки')
set_text(self.ui_3.pushButton, 'Отправить')
self.ui_3.pushButton.setStyleSheet("""
background-color: #b2edbf;
""")
set_text(self.ui_3.pushButton_2, 'Закрыть')
self.ui_3.pushButton_2.setStyleSheet("""
background-color: #f7c8c8;
""")
# Закрытие окна
def close_window(self):
self.close()
def show_error_window(self, error):
label = self.ui_7.findChildren(QLabel)
for item in label:
item.setText(error)
self.ui_7.show()
def get_date_for_transfer(self):
date = self.ui_3.calendarWidget.selectedDate()
return date.toString('dd-MM-yyyy')
# Чтение json шаблона
def read_json_template(self):
with open(path.join(self.json_dir, 'template.json'), 'r', encoding='utf-8') as json_file:
json_data = json.load(json_file)
python_json_data = json.loads(json_data)
return python_json_data
def read_json_today(self):
with open(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json'), 'r', encoding='utf-8')\
as json_file:
json_data = json.load(json_file)
python_json_data = json.loads(json_data)
return python_json_data
# Создание и запись json файла
def write_json(self, data):
if os.path.exists(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json')):
json_list = self.read_json_today()
else:
json_list = self.read_json_template()
with open(path.join(self.result_dir,
f'{self.organization_name}-{self.date}.json'), 'w', encoding='utf-8') as json_file:
if json_list[0]['order']['depart'] != '':
json_list.append(data)
else:
json_list = [data]
python_json = str(json_list).replace("'", '\"') # Преобразует ковычки к нужному формату
json.dump(f"{python_json}", json_file, ensure_ascii=False)
def create_json(self):
# Берет дату с календаря
self.date += self.get_date_for_transfer()
depart_number = ''
laboratory_name = ''
laboratory_ogrn = ''
# Чтение конфига
for section in self.config.sections():
if self.config.has_section('json_data'):
if self.config.has_option(section, 'depart_number')\
and self.config.has_option(section, 'laboratory_name')\
and self.config.has_option(section, 'laboratory_ogrn'):
depart_number = self.config.get(section, 'depart_number')
laboratory_name = self.config.get(section, 'laboratory_name')
laboratory_ogrn = self.config.get(section, 'laboratory_ogrn')
if os.path.exists(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json')):
python_json_dict = self.read_json_today()
else:
python_json_dict = self.read_json_template()
python_json_dict = python_json_dict[0]
transfer_list = find_transfer(self.date)
if not transfer_list:
python_json_dict['order']['patient']['surname'] = 'В базе'
python_json_dict['order']['patient']['name'] = 'нет пациентов'
python_json_dict['order']['patient']['patronymic'] = 'для отправки'
self.write_json(python_json_dict)
progress = 0
if transfer_list:
self.ui_3.progressBar.show()
for el in range(len(transfer_list)):
unique_number = generate_unique_number()
python_json_dict['order']['number'] = unique_number
python_json_dict['order']['depart'] = depart_number
python_json_dict['order']['laboratoryName'] = laboratory_name
python_json_dict['order']['laboratoryOgrn'] = laboratory_ogrn
python_json_dict['order']['name'] = transfer_list[el][2]
python_json_dict['order']['ogrn'] = transfer_list[el][3]
python_json_dict['order']['orderDate'] = transfer_list[el][4]
python_json_dict['order']['serv'][0]['code'] = transfer_list[el][5]
python_json_dict['order']['serv'][0]['name'] = transfer_list[el][6]
python_json_dict['order']['serv'][0]['testSystem'] = transfer_list[el][7]
python_json_dict['order']['serv'][0]['biomaterDate'] = transfer_list[el][8]
python_json_dict['order']['serv'][0]['readyDate'] = transfer_list[el][9]
python_json_dict['order']['serv'][0]['result'] = transfer_list[el][10][0]
python_json_dict['order']['serv'][0]['type'] = transfer_list[el][11][0]
python_json_dict['order']['serv'][0]['value'] = transfer_list[el][12]
python_json_dict['order']['patient']['surname'] = transfer_list[el][13]
python_json_dict['order']['patient']['name'] = transfer_list[el][14]
python_json_dict['order']['patient']['patronymic'] = transfer_list[el][15]
python_json_dict['order']['patient']['gender'] = transfer_list[el][16]
python_json_dict['order']['patient']['birthday'] = transfer_list[el][17]
python_json_dict['order']['patient']['phone'] = transfer_list[el][18]
python_json_dict['order']['patient']['email'] = transfer_list[el][19]
python_json_dict['order']['patient']['documentType'] = transfer_list[el][20]
python_json_dict['order']['patient']['documentNumber'] = transfer_list[el][22]
python_json_dict['order']['patient']['documentSerNumber'] = transfer_list[el][21]
python_json_dict['order']['patient']['snils'] = transfer_list[el][23]
python_json_dict['order']['patient']['oms'] = transfer_list[el][24]
python_json_dict['order']['patient']['address']['regAddress']['town'] = transfer_list[el][25]
python_json_dict['order']['patient']['address']['regAddress']['house'] = transfer_list[el][26]
python_json_dict['order']['patient']['address']['regAddress']['region'] = transfer_list[el][27]
python_json_dict['order']['patient']['address']['regAddress']['building'] = transfer_list[el][28]
python_json_dict['order']['patient']['address']['regAddress']['district'] = transfer_list[el][29]
python_json_dict['order']['patient']['address']['regAddress']['appartament'] = transfer_list[el][30]
python_json_dict['order']['patient']['address']['regAddress']['streetName'] = transfer_list[el][31]
python_json_dict['order']['patient']['address']['factAddress']['town'] = transfer_list[el][32]
python_json_dict['order']['patient']['address']['factAddress']['house'] = transfer_list[el][33]
python_json_dict['order']['patient']['address']['factAddress']['region'] = transfer_list[el][34]
python_json_dict['order']['patient']['address']['factAddress']['building'] = transfer_list[el][35]
python_json_dict['order']['patient']['address']['factAddress']['district'] = transfer_list[el][36]
python_json_dict['order']['patient']['address']['factAddress']['appartament'] = transfer_list[el][37]
python_json_dict['order']['patient']['address']['factAddress']['streetName'] = transfer_list[el][38]
self.write_json(python_json_dict)
sleep(1)
progress += 100 / len(transfer_list)
self.ui_3.progressBar.setValue(progress)
self.logging_transfer()
def logging_transfer(self):
# Открытие json файла
with open(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json'), 'r', encoding='utf-8')\
as read_file:
json_file = json.load(read_file)
python_json = json.loads(json_file)
patient_list = []
for patients_dict in python_json:
surname = f"{patients_dict['order']['patient']['surname']}"
name = f"{patients_dict['order']['patient']['name']}"
patronymic = f"{patients_dict['order']['patient']['patronymic']}"
patient = f'{surname} {name} {patronymic}'
patient_list.append(patient)
transfer_json = self.transfer_data()
status_list = []
# Передача статусов в лог, если json не пустой
if patient_list[0] != 'В базе нет пациентов для отправки':
with open(path.join(self.log_dir, 'console_log.txt'), 'a') as log_file:
log_file.write(f'{str(transfer_json)}\n\n')
transfer_list = find_transfer(self.date)
# Добавление ошибок
for elements in range(len(transfer_list)):
if transfer_json['body'][int(elements)]['status'] == 'error':
# Вставка элементов в каждый 2
patient_list.insert(elements * 3 + 1, f"{transfer_json['body'][int(elements)]['message']}")
# Вставка элементов в каждый 3
patient_list.insert(elements * 3 + 2, '----------------------------------------------'
'--------------------------')
status_list.append('error')
elif transfer_json['body'][int(elements)]['status'] == 'ok'\
or transfer_json['body'][int(elements)]['status'] == '':
patient_list.insert(elements * 3 + 1, f"Успешно!")
patient_list.insert(elements * 3 + 2, '----------------------------------------------'
'--------------------------')
status_list.append('ok')
for elem in range(len(status_list)):
if status_list[elem] == 'ok':
success(transfer_list[elem][0], 1)
# Скрывает календарь и показывает листвью
self.ui_3.calendarWidget.hide()
self.ui_3.listView.show()
self.model.setStringList(patient_list)
self.ui_3.pushButton.setEnabled(False)
if os.path.isfile(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json')):
os.remove(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json'))
self.date = ''
# Получение и отправка данных в API
def transfer_data(self):
try:
date = self.get_date_for_transfer()
organization_name = get_organization()
# Открытие json файла
with open(path.join(self.result_dir, f'{organization_name}-{date}.json'), 'r', encoding='utf-8')\
as read_file:
json_file = json.load(read_file)
depart_number = ''
token = ''
address = ''
# Чтение конфига
for section in self.config.sections():
if self.config.has_section('json_data'):
if self.config.has_option(section, 'depart_number'):
depart_number = self.config.get(section, 'depart_number')
if self.config.has_section('transfer_data'):
if self.config.has_option(section, 'token') and self.config.has_option(section, 'address'):
token = self.config.get(section, 'token')
address = self.config.get(section, 'address')
login = {'depart number': depart_number,
'token': token
}
# Получение нового токена
response = requests.post(f'https://{address}/api/v2/order/get-depart-token',
login)
response_json = response.json()
response_token = response_json['body']['token']
# Отправка данных в api
transfer_info = {'depart number': depart_number,
'token': response_token,
'json': json_file}
transfer = requests.post(f'https://{address}/api/v2/order/ext-orders-package',
transfer_info)
transfer_json = transfer.json()
return transfer_json
# Обработка ConnectionError при отключенном Континент АП
except OSError:
self.show_error_window('Нет связи с сервером')
self.close_window()
|
[
"import os\nfrom os import path\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nfrom PyQt5.QtWidgets import QLabel, QAbstractItemView\nfrom ui.transfer_window import Ui_TransferWindow\nimport requests\nimport json\nimport configparser\nfrom static import set_text\nfrom time import sleep\nfrom base import find_transfer, success\nfrom static import generate_unique_number, get_organization\nfrom error_window import ErrorWindow\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super(TransferWindow, self).__init__()\n self.setFixedSize(482, 340)\n # Инициализация окон\n self.ui_3 = Ui_TransferWindow()\n self.ui_7 = ErrorWindow()\n self.ui_3.setupUi(self)\n # Пути до папок\n self.result_dir = path.join(path.dirname(__file__), 'result')\n self.img_dir = path.join(path.dirname(__file__), 'img')\n self.config_dir = path.join(path.dirname(__file__), 'config')\n self.json_dir = path.join(path.dirname(__file__), 'json')\n self.log_dir = path.join(path.dirname(__file__), 'log')\n # Список всех файлов в папке result\n self.files = os.listdir(self.result_dir)\n # Открытие файла конфига\n self.config = configparser.RawConfigParser()\n self.config.read(path.join(self.config_dir, 'config.ini'), encoding='utf-8')\n # Берем имя организации для имени файла\n self.date = ''\n self.organization_name = get_organization()\n # Добавление списка в listView\n self.model = QtCore.QStringListModel(self)\n self.ui_3.listView.setModel(self.model)\n self.ui_3.listView.setWordWrap(True)\n self.ui_3.listView.hide()\n # Запрет редактирования элементов listView\n self.ui_3.listView.setEditTriggers(QAbstractItemView.NoEditTriggers)\n # Прогрессбар\n self.ui_3.progressBar.hide()\n # Подключение кнопок\n self.ui_3.pushButton.clicked.connect(self.create_json)\n self.ui_3.pushButton_2.clicked.connect(self.close_window)\n # Иконка окна\n self.setWindowIcon(QtGui.QIcon(path.join(self.img_dir, 'gosuslugi_5.png')))\n # Текст по окну\n self.setWindowTitle('Выбор даты для отправки')\n set_text(self.ui_3.pushButton, 'Отправить')\n self.ui_3.pushButton.setStyleSheet(\"\"\"\n background-color: #b2edbf;\n \"\"\")\n set_text(self.ui_3.pushButton_2, 'Закрыть')\n self.ui_3.pushButton_2.setStyleSheet(\"\"\"\n background-color: #f7c8c8;\n \"\"\")\n\n # Закрытие окна\n def close_window(self):\n self.close()\n\n def show_error_window(self, error):\n label = self.ui_7.findChildren(QLabel)\n\n for item in label:\n item.setText(error)\n\n self.ui_7.show()\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n\n # Чтение json шаблона\n def read_json_template(self):\n with open(path.join(self.json_dir, 'template.json'), 'r', encoding='utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n\n return python_json_data\n\n def read_json_today(self):\n with open(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json'), 'r', encoding='utf-8')\\\n as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n\n return python_json_data\n\n # Создание и запись json файла\n def write_json(self, data):\n if os.path.exists(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json')):\n json_list = self.read_json_today()\n else:\n json_list = self.read_json_template()\n\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'w', encoding='utf-8') as json_file:\n if json_list[0]['order']['depart'] != '':\n json_list.append(data)\n else:\n json_list = [data]\n python_json = str(json_list).replace(\"'\", '\\\"') # Преобразует ковычки к нужному формату\n\n json.dump(f\"{python_json}\", json_file, ensure_ascii=False)\n\n def create_json(self):\n # Берет дату с календаря\n self.date += self.get_date_for_transfer()\n\n depart_number = ''\n laboratory_name = ''\n laboratory_ogrn = ''\n\n # Чтение конфига\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number')\\\n and self.config.has_option(section, 'laboratory_name')\\\n and self.config.has_option(section, 'laboratory_ogrn'):\n depart_number = self.config.get(section, 'depart_number')\n laboratory_name = self.config.get(section, 'laboratory_name')\n laboratory_ogrn = self.config.get(section, 'laboratory_ogrn')\n\n if os.path.exists(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json')):\n python_json_dict = self.read_json_today()\n else:\n python_json_dict = self.read_json_template()\n\n python_json_dict = python_json_dict[0]\n\n transfer_list = find_transfer(self.date)\n\n if not transfer_list:\n python_json_dict['order']['patient']['surname'] = 'В базе'\n python_json_dict['order']['patient']['name'] = 'нет пациентов'\n python_json_dict['order']['patient']['patronymic'] = 'для отправки'\n self.write_json(python_json_dict)\n\n progress = 0\n if transfer_list:\n self.ui_3.progressBar.show()\n\n for el in range(len(transfer_list)):\n unique_number = generate_unique_number()\n\n python_json_dict['order']['number'] = unique_number\n python_json_dict['order']['depart'] = depart_number\n python_json_dict['order']['laboratoryName'] = laboratory_name\n python_json_dict['order']['laboratoryOgrn'] = laboratory_ogrn\n python_json_dict['order']['name'] = transfer_list[el][2]\n python_json_dict['order']['ogrn'] = transfer_list[el][3]\n python_json_dict['order']['orderDate'] = transfer_list[el][4]\n\n python_json_dict['order']['serv'][0]['code'] = transfer_list[el][5]\n python_json_dict['order']['serv'][0]['name'] = transfer_list[el][6]\n python_json_dict['order']['serv'][0]['testSystem'] = transfer_list[el][7]\n python_json_dict['order']['serv'][0]['biomaterDate'] = transfer_list[el][8]\n python_json_dict['order']['serv'][0]['readyDate'] = transfer_list[el][9]\n python_json_dict['order']['serv'][0]['result'] = transfer_list[el][10][0]\n python_json_dict['order']['serv'][0]['type'] = transfer_list[el][11][0]\n python_json_dict['order']['serv'][0]['value'] = transfer_list[el][12]\n\n python_json_dict['order']['patient']['surname'] = transfer_list[el][13]\n python_json_dict['order']['patient']['name'] = transfer_list[el][14]\n python_json_dict['order']['patient']['patronymic'] = transfer_list[el][15]\n python_json_dict['order']['patient']['gender'] = transfer_list[el][16]\n python_json_dict['order']['patient']['birthday'] = transfer_list[el][17]\n python_json_dict['order']['patient']['phone'] = transfer_list[el][18]\n python_json_dict['order']['patient']['email'] = transfer_list[el][19]\n python_json_dict['order']['patient']['documentType'] = transfer_list[el][20]\n python_json_dict['order']['patient']['documentNumber'] = transfer_list[el][22]\n python_json_dict['order']['patient']['documentSerNumber'] = transfer_list[el][21]\n python_json_dict['order']['patient']['snils'] = transfer_list[el][23]\n python_json_dict['order']['patient']['oms'] = transfer_list[el][24]\n\n python_json_dict['order']['patient']['address']['regAddress']['town'] = transfer_list[el][25]\n python_json_dict['order']['patient']['address']['regAddress']['house'] = transfer_list[el][26]\n python_json_dict['order']['patient']['address']['regAddress']['region'] = transfer_list[el][27]\n python_json_dict['order']['patient']['address']['regAddress']['building'] = transfer_list[el][28]\n python_json_dict['order']['patient']['address']['regAddress']['district'] = transfer_list[el][29]\n python_json_dict['order']['patient']['address']['regAddress']['appartament'] = transfer_list[el][30]\n python_json_dict['order']['patient']['address']['regAddress']['streetName'] = transfer_list[el][31]\n\n python_json_dict['order']['patient']['address']['factAddress']['town'] = transfer_list[el][32]\n python_json_dict['order']['patient']['address']['factAddress']['house'] = transfer_list[el][33]\n python_json_dict['order']['patient']['address']['factAddress']['region'] = transfer_list[el][34]\n python_json_dict['order']['patient']['address']['factAddress']['building'] = transfer_list[el][35]\n python_json_dict['order']['patient']['address']['factAddress']['district'] = transfer_list[el][36]\n python_json_dict['order']['patient']['address']['factAddress']['appartament'] = transfer_list[el][37]\n python_json_dict['order']['patient']['address']['factAddress']['streetName'] = transfer_list[el][38]\n\n self.write_json(python_json_dict)\n sleep(1)\n\n progress += 100 / len(transfer_list)\n self.ui_3.progressBar.setValue(progress)\n\n self.logging_transfer()\n\n def logging_transfer(self):\n # Открытие json файла\n with open(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json'), 'r', encoding='utf-8')\\\n as read_file:\n json_file = json.load(read_file)\n python_json = json.loads(json_file)\n\n patient_list = []\n\n for patients_dict in python_json:\n surname = f\"{patients_dict['order']['patient']['surname']}\"\n name = f\"{patients_dict['order']['patient']['name']}\"\n patronymic = f\"{patients_dict['order']['patient']['patronymic']}\"\n patient = f'{surname} {name} {patronymic}'\n patient_list.append(patient)\n\n transfer_json = self.transfer_data()\n status_list = []\n\n # Передача статусов в лог, если json не пустой\n if patient_list[0] != 'В базе нет пациентов для отправки':\n with open(path.join(self.log_dir, 'console_log.txt'), 'a') as log_file:\n log_file.write(f'{str(transfer_json)}\\n\\n')\n\n transfer_list = find_transfer(self.date)\n\n # Добавление ошибок\n for elements in range(len(transfer_list)):\n if transfer_json['body'][int(elements)]['status'] == 'error':\n # Вставка элементов в каждый 2\n patient_list.insert(elements * 3 + 1, f\"{transfer_json['body'][int(elements)]['message']}\")\n # Вставка элементов в каждый 3\n patient_list.insert(elements * 3 + 2, '----------------------------------------------'\n '--------------------------')\n status_list.append('error')\n elif transfer_json['body'][int(elements)]['status'] == 'ok'\\\n or transfer_json['body'][int(elements)]['status'] == '':\n patient_list.insert(elements * 3 + 1, f\"Успешно!\")\n patient_list.insert(elements * 3 + 2, '----------------------------------------------'\n '--------------------------')\n status_list.append('ok')\n\n for elem in range(len(status_list)):\n if status_list[elem] == 'ok':\n success(transfer_list[elem][0], 1)\n\n # Скрывает календарь и показывает листвью\n self.ui_3.calendarWidget.hide()\n self.ui_3.listView.show()\n self.model.setStringList(patient_list)\n self.ui_3.pushButton.setEnabled(False)\n\n if os.path.isfile(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json')):\n os.remove(path.join(self.result_dir, f'{self.organization_name}-{self.date}.json'))\n\n self.date = ''\n\n # Получение и отправка данных в API\n def transfer_data(self):\n try:\n date = self.get_date_for_transfer()\n organization_name = get_organization()\n # Открытие json файла\n with open(path.join(self.result_dir, f'{organization_name}-{date}.json'), 'r', encoding='utf-8')\\\n as read_file:\n json_file = json.load(read_file)\n\n depart_number = ''\n token = ''\n address = ''\n # Чтение конфига\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'):\n depart_number = self.config.get(section, 'depart_number')\n if self.config.has_section('transfer_data'):\n if self.config.has_option(section, 'token') and self.config.has_option(section, 'address'):\n token = self.config.get(section, 'token')\n address = self.config.get(section, 'address')\n\n login = {'depart number': depart_number,\n 'token': token\n }\n\n # Получение нового токена\n response = requests.post(f'https://{address}/api/v2/order/get-depart-token',\n login)\n\n response_json = response.json()\n response_token = response_json['body']['token']\n\n # Отправка данных в api\n transfer_info = {'depart number': depart_number,\n 'token': response_token,\n 'json': json_file}\n\n transfer = requests.post(f'https://{address}/api/v2/order/ext-orders-package',\n transfer_info)\n transfer_json = transfer.json()\n\n return transfer_json\n # Обработка ConnectionError при отключенном Континент АП\n except OSError:\n self.show_error_window('Нет связи с сервером')\n self.close_window()\n",
"import os\nfrom os import path\nfrom PyQt5 import QtWidgets, QtCore, QtGui\nfrom PyQt5.QtWidgets import QLabel, QAbstractItemView\nfrom ui.transfer_window import Ui_TransferWindow\nimport requests\nimport json\nimport configparser\nfrom static import set_text\nfrom time import sleep\nfrom base import find_transfer, success\nfrom static import generate_unique_number, get_organization\nfrom error_window import ErrorWindow\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n\n def __init__(self):\n super(TransferWindow, self).__init__()\n self.setFixedSize(482, 340)\n self.ui_3 = Ui_TransferWindow()\n self.ui_7 = ErrorWindow()\n self.ui_3.setupUi(self)\n self.result_dir = path.join(path.dirname(__file__), 'result')\n self.img_dir = path.join(path.dirname(__file__), 'img')\n self.config_dir = path.join(path.dirname(__file__), 'config')\n self.json_dir = path.join(path.dirname(__file__), 'json')\n self.log_dir = path.join(path.dirname(__file__), 'log')\n self.files = os.listdir(self.result_dir)\n self.config = configparser.RawConfigParser()\n self.config.read(path.join(self.config_dir, 'config.ini'), encoding\n ='utf-8')\n self.date = ''\n self.organization_name = get_organization()\n self.model = QtCore.QStringListModel(self)\n self.ui_3.listView.setModel(self.model)\n self.ui_3.listView.setWordWrap(True)\n self.ui_3.listView.hide()\n self.ui_3.listView.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.ui_3.progressBar.hide()\n self.ui_3.pushButton.clicked.connect(self.create_json)\n self.ui_3.pushButton_2.clicked.connect(self.close_window)\n self.setWindowIcon(QtGui.QIcon(path.join(self.img_dir,\n 'gosuslugi_5.png')))\n self.setWindowTitle('Выбор даты для отправки')\n set_text(self.ui_3.pushButton, 'Отправить')\n self.ui_3.pushButton.setStyleSheet(\n \"\"\"\n background-color: #b2edbf;\n \"\"\"\n )\n set_text(self.ui_3.pushButton_2, 'Закрыть')\n self.ui_3.pushButton_2.setStyleSheet(\n \"\"\"\n background-color: #f7c8c8;\n \"\"\"\n )\n\n def close_window(self):\n self.close()\n\n def show_error_window(self, error):\n label = self.ui_7.findChildren(QLabel)\n for item in label:\n item.setText(error)\n self.ui_7.show()\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n\n def read_json_template(self):\n with open(path.join(self.json_dir, 'template.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n\n def read_json_today(self):\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n\n def write_json(self, data):\n if os.path.exists(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n json_list = self.read_json_today()\n else:\n json_list = self.read_json_template()\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'w', encoding=\n 'utf-8') as json_file:\n if json_list[0]['order']['depart'] != '':\n json_list.append(data)\n else:\n json_list = [data]\n python_json = str(json_list).replace(\"'\", '\"')\n json.dump(f'{python_json}', json_file, ensure_ascii=False)\n\n def create_json(self):\n self.date += self.get_date_for_transfer()\n depart_number = ''\n laboratory_name = ''\n laboratory_ogrn = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'\n ) and self.config.has_option(section, 'laboratory_name'\n ) and self.config.has_option(section, 'laboratory_ogrn'):\n depart_number = self.config.get(section, 'depart_number')\n laboratory_name = self.config.get(section,\n 'laboratory_name')\n laboratory_ogrn = self.config.get(section,\n 'laboratory_ogrn')\n if os.path.exists(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n python_json_dict = self.read_json_today()\n else:\n python_json_dict = self.read_json_template()\n python_json_dict = python_json_dict[0]\n transfer_list = find_transfer(self.date)\n if not transfer_list:\n python_json_dict['order']['patient']['surname'] = 'В базе'\n python_json_dict['order']['patient']['name'] = 'нет пациентов'\n python_json_dict['order']['patient']['patronymic'] = 'для отправки'\n self.write_json(python_json_dict)\n progress = 0\n if transfer_list:\n self.ui_3.progressBar.show()\n for el in range(len(transfer_list)):\n unique_number = generate_unique_number()\n python_json_dict['order']['number'] = unique_number\n python_json_dict['order']['depart'] = depart_number\n python_json_dict['order']['laboratoryName'] = laboratory_name\n python_json_dict['order']['laboratoryOgrn'] = laboratory_ogrn\n python_json_dict['order']['name'] = transfer_list[el][2]\n python_json_dict['order']['ogrn'] = transfer_list[el][3]\n python_json_dict['order']['orderDate'] = transfer_list[el][4]\n python_json_dict['order']['serv'][0]['code'] = transfer_list[el][5]\n python_json_dict['order']['serv'][0]['name'] = transfer_list[el][6]\n python_json_dict['order']['serv'][0]['testSystem'] = transfer_list[\n el][7]\n python_json_dict['order']['serv'][0]['biomaterDate'\n ] = transfer_list[el][8]\n python_json_dict['order']['serv'][0]['readyDate'] = transfer_list[\n el][9]\n python_json_dict['order']['serv'][0]['result'] = transfer_list[el][\n 10][0]\n python_json_dict['order']['serv'][0]['type'] = transfer_list[el][11\n ][0]\n python_json_dict['order']['serv'][0]['value'] = transfer_list[el][\n 12]\n python_json_dict['order']['patient']['surname'] = transfer_list[el\n ][13]\n python_json_dict['order']['patient']['name'] = transfer_list[el][14\n ]\n python_json_dict['order']['patient']['patronymic'] = transfer_list[\n el][15]\n python_json_dict['order']['patient']['gender'] = transfer_list[el][\n 16]\n python_json_dict['order']['patient']['birthday'] = transfer_list[el\n ][17]\n python_json_dict['order']['patient']['phone'] = transfer_list[el][\n 18]\n python_json_dict['order']['patient']['email'] = transfer_list[el][\n 19]\n python_json_dict['order']['patient']['documentType'\n ] = transfer_list[el][20]\n python_json_dict['order']['patient']['documentNumber'\n ] = transfer_list[el][22]\n python_json_dict['order']['patient']['documentSerNumber'\n ] = transfer_list[el][21]\n python_json_dict['order']['patient']['snils'] = transfer_list[el][\n 23]\n python_json_dict['order']['patient']['oms'] = transfer_list[el][24]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'town'] = transfer_list[el][25]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'house'] = transfer_list[el][26]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'region'] = transfer_list[el][27]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'building'] = transfer_list[el][28]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'district'] = transfer_list[el][29]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'appartament'] = transfer_list[el][30]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'streetName'] = transfer_list[el][31]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'town'] = transfer_list[el][32]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'house'] = transfer_list[el][33]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'region'] = transfer_list[el][34]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'building'] = transfer_list[el][35]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'district'] = transfer_list[el][36]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'appartament'] = transfer_list[el][37]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'streetName'] = transfer_list[el][38]\n self.write_json(python_json_dict)\n sleep(1)\n progress += 100 / len(transfer_list)\n self.ui_3.progressBar.setValue(progress)\n self.logging_transfer()\n\n def logging_transfer(self):\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'r', encoding=\n 'utf-8') as read_file:\n json_file = json.load(read_file)\n python_json = json.loads(json_file)\n patient_list = []\n for patients_dict in python_json:\n surname = f\"{patients_dict['order']['patient']['surname']}\"\n name = f\"{patients_dict['order']['patient']['name']}\"\n patronymic = f\"{patients_dict['order']['patient']['patronymic']}\"\n patient = f'{surname} {name} {patronymic}'\n patient_list.append(patient)\n transfer_json = self.transfer_data()\n status_list = []\n if patient_list[0] != 'В базе нет пациентов для отправки':\n with open(path.join(self.log_dir, 'console_log.txt'), 'a'\n ) as log_file:\n log_file.write(f'{str(transfer_json)}\\n\\n')\n transfer_list = find_transfer(self.date)\n for elements in range(len(transfer_list)):\n if transfer_json['body'][int(elements)]['status'] == 'error':\n patient_list.insert(elements * 3 + 1,\n f\"{transfer_json['body'][int(elements)]['message']}\")\n patient_list.insert(elements * 3 + 2,\n '------------------------------------------------------------------------'\n )\n status_list.append('error')\n elif transfer_json['body'][int(elements)]['status'\n ] == 'ok' or transfer_json['body'][int(elements)]['status'\n ] == '':\n patient_list.insert(elements * 3 + 1, f'Успешно!')\n patient_list.insert(elements * 3 + 2,\n '------------------------------------------------------------------------'\n )\n status_list.append('ok')\n for elem in range(len(status_list)):\n if status_list[elem] == 'ok':\n success(transfer_list[elem][0], 1)\n self.ui_3.calendarWidget.hide()\n self.ui_3.listView.show()\n self.model.setStringList(patient_list)\n self.ui_3.pushButton.setEnabled(False)\n if os.path.isfile(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n os.remove(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'))\n self.date = ''\n\n def transfer_data(self):\n try:\n date = self.get_date_for_transfer()\n organization_name = get_organization()\n with open(path.join(self.result_dir,\n f'{organization_name}-{date}.json'), 'r', encoding='utf-8'\n ) as read_file:\n json_file = json.load(read_file)\n depart_number = ''\n token = ''\n address = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'):\n depart_number = self.config.get(section,\n 'depart_number')\n if self.config.has_section('transfer_data'):\n if self.config.has_option(section, 'token'\n ) and self.config.has_option(section, 'address'):\n token = self.config.get(section, 'token')\n address = self.config.get(section, 'address')\n login = {'depart number': depart_number, 'token': token}\n response = requests.post(\n f'https://{address}/api/v2/order/get-depart-token', login)\n response_json = response.json()\n response_token = response_json['body']['token']\n transfer_info = {'depart number': depart_number, 'token':\n response_token, 'json': json_file}\n transfer = requests.post(\n f'https://{address}/api/v2/order/ext-orders-package',\n transfer_info)\n transfer_json = transfer.json()\n return transfer_json\n except OSError:\n self.show_error_window('Нет связи с сервером')\n self.close_window()\n",
"<import token>\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n\n def __init__(self):\n super(TransferWindow, self).__init__()\n self.setFixedSize(482, 340)\n self.ui_3 = Ui_TransferWindow()\n self.ui_7 = ErrorWindow()\n self.ui_3.setupUi(self)\n self.result_dir = path.join(path.dirname(__file__), 'result')\n self.img_dir = path.join(path.dirname(__file__), 'img')\n self.config_dir = path.join(path.dirname(__file__), 'config')\n self.json_dir = path.join(path.dirname(__file__), 'json')\n self.log_dir = path.join(path.dirname(__file__), 'log')\n self.files = os.listdir(self.result_dir)\n self.config = configparser.RawConfigParser()\n self.config.read(path.join(self.config_dir, 'config.ini'), encoding\n ='utf-8')\n self.date = ''\n self.organization_name = get_organization()\n self.model = QtCore.QStringListModel(self)\n self.ui_3.listView.setModel(self.model)\n self.ui_3.listView.setWordWrap(True)\n self.ui_3.listView.hide()\n self.ui_3.listView.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.ui_3.progressBar.hide()\n self.ui_3.pushButton.clicked.connect(self.create_json)\n self.ui_3.pushButton_2.clicked.connect(self.close_window)\n self.setWindowIcon(QtGui.QIcon(path.join(self.img_dir,\n 'gosuslugi_5.png')))\n self.setWindowTitle('Выбор даты для отправки')\n set_text(self.ui_3.pushButton, 'Отправить')\n self.ui_3.pushButton.setStyleSheet(\n \"\"\"\n background-color: #b2edbf;\n \"\"\"\n )\n set_text(self.ui_3.pushButton_2, 'Закрыть')\n self.ui_3.pushButton_2.setStyleSheet(\n \"\"\"\n background-color: #f7c8c8;\n \"\"\"\n )\n\n def close_window(self):\n self.close()\n\n def show_error_window(self, error):\n label = self.ui_7.findChildren(QLabel)\n for item in label:\n item.setText(error)\n self.ui_7.show()\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n\n def read_json_template(self):\n with open(path.join(self.json_dir, 'template.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n\n def read_json_today(self):\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n\n def write_json(self, data):\n if os.path.exists(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n json_list = self.read_json_today()\n else:\n json_list = self.read_json_template()\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'w', encoding=\n 'utf-8') as json_file:\n if json_list[0]['order']['depart'] != '':\n json_list.append(data)\n else:\n json_list = [data]\n python_json = str(json_list).replace(\"'\", '\"')\n json.dump(f'{python_json}', json_file, ensure_ascii=False)\n\n def create_json(self):\n self.date += self.get_date_for_transfer()\n depart_number = ''\n laboratory_name = ''\n laboratory_ogrn = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'\n ) and self.config.has_option(section, 'laboratory_name'\n ) and self.config.has_option(section, 'laboratory_ogrn'):\n depart_number = self.config.get(section, 'depart_number')\n laboratory_name = self.config.get(section,\n 'laboratory_name')\n laboratory_ogrn = self.config.get(section,\n 'laboratory_ogrn')\n if os.path.exists(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n python_json_dict = self.read_json_today()\n else:\n python_json_dict = self.read_json_template()\n python_json_dict = python_json_dict[0]\n transfer_list = find_transfer(self.date)\n if not transfer_list:\n python_json_dict['order']['patient']['surname'] = 'В базе'\n python_json_dict['order']['patient']['name'] = 'нет пациентов'\n python_json_dict['order']['patient']['patronymic'] = 'для отправки'\n self.write_json(python_json_dict)\n progress = 0\n if transfer_list:\n self.ui_3.progressBar.show()\n for el in range(len(transfer_list)):\n unique_number = generate_unique_number()\n python_json_dict['order']['number'] = unique_number\n python_json_dict['order']['depart'] = depart_number\n python_json_dict['order']['laboratoryName'] = laboratory_name\n python_json_dict['order']['laboratoryOgrn'] = laboratory_ogrn\n python_json_dict['order']['name'] = transfer_list[el][2]\n python_json_dict['order']['ogrn'] = transfer_list[el][3]\n python_json_dict['order']['orderDate'] = transfer_list[el][4]\n python_json_dict['order']['serv'][0]['code'] = transfer_list[el][5]\n python_json_dict['order']['serv'][0]['name'] = transfer_list[el][6]\n python_json_dict['order']['serv'][0]['testSystem'] = transfer_list[\n el][7]\n python_json_dict['order']['serv'][0]['biomaterDate'\n ] = transfer_list[el][8]\n python_json_dict['order']['serv'][0]['readyDate'] = transfer_list[\n el][9]\n python_json_dict['order']['serv'][0]['result'] = transfer_list[el][\n 10][0]\n python_json_dict['order']['serv'][0]['type'] = transfer_list[el][11\n ][0]\n python_json_dict['order']['serv'][0]['value'] = transfer_list[el][\n 12]\n python_json_dict['order']['patient']['surname'] = transfer_list[el\n ][13]\n python_json_dict['order']['patient']['name'] = transfer_list[el][14\n ]\n python_json_dict['order']['patient']['patronymic'] = transfer_list[\n el][15]\n python_json_dict['order']['patient']['gender'] = transfer_list[el][\n 16]\n python_json_dict['order']['patient']['birthday'] = transfer_list[el\n ][17]\n python_json_dict['order']['patient']['phone'] = transfer_list[el][\n 18]\n python_json_dict['order']['patient']['email'] = transfer_list[el][\n 19]\n python_json_dict['order']['patient']['documentType'\n ] = transfer_list[el][20]\n python_json_dict['order']['patient']['documentNumber'\n ] = transfer_list[el][22]\n python_json_dict['order']['patient']['documentSerNumber'\n ] = transfer_list[el][21]\n python_json_dict['order']['patient']['snils'] = transfer_list[el][\n 23]\n python_json_dict['order']['patient']['oms'] = transfer_list[el][24]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'town'] = transfer_list[el][25]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'house'] = transfer_list[el][26]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'region'] = transfer_list[el][27]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'building'] = transfer_list[el][28]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'district'] = transfer_list[el][29]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'appartament'] = transfer_list[el][30]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'streetName'] = transfer_list[el][31]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'town'] = transfer_list[el][32]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'house'] = transfer_list[el][33]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'region'] = transfer_list[el][34]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'building'] = transfer_list[el][35]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'district'] = transfer_list[el][36]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'appartament'] = transfer_list[el][37]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'streetName'] = transfer_list[el][38]\n self.write_json(python_json_dict)\n sleep(1)\n progress += 100 / len(transfer_list)\n self.ui_3.progressBar.setValue(progress)\n self.logging_transfer()\n\n def logging_transfer(self):\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'r', encoding=\n 'utf-8') as read_file:\n json_file = json.load(read_file)\n python_json = json.loads(json_file)\n patient_list = []\n for patients_dict in python_json:\n surname = f\"{patients_dict['order']['patient']['surname']}\"\n name = f\"{patients_dict['order']['patient']['name']}\"\n patronymic = f\"{patients_dict['order']['patient']['patronymic']}\"\n patient = f'{surname} {name} {patronymic}'\n patient_list.append(patient)\n transfer_json = self.transfer_data()\n status_list = []\n if patient_list[0] != 'В базе нет пациентов для отправки':\n with open(path.join(self.log_dir, 'console_log.txt'), 'a'\n ) as log_file:\n log_file.write(f'{str(transfer_json)}\\n\\n')\n transfer_list = find_transfer(self.date)\n for elements in range(len(transfer_list)):\n if transfer_json['body'][int(elements)]['status'] == 'error':\n patient_list.insert(elements * 3 + 1,\n f\"{transfer_json['body'][int(elements)]['message']}\")\n patient_list.insert(elements * 3 + 2,\n '------------------------------------------------------------------------'\n )\n status_list.append('error')\n elif transfer_json['body'][int(elements)]['status'\n ] == 'ok' or transfer_json['body'][int(elements)]['status'\n ] == '':\n patient_list.insert(elements * 3 + 1, f'Успешно!')\n patient_list.insert(elements * 3 + 2,\n '------------------------------------------------------------------------'\n )\n status_list.append('ok')\n for elem in range(len(status_list)):\n if status_list[elem] == 'ok':\n success(transfer_list[elem][0], 1)\n self.ui_3.calendarWidget.hide()\n self.ui_3.listView.show()\n self.model.setStringList(patient_list)\n self.ui_3.pushButton.setEnabled(False)\n if os.path.isfile(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n os.remove(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'))\n self.date = ''\n\n def transfer_data(self):\n try:\n date = self.get_date_for_transfer()\n organization_name = get_organization()\n with open(path.join(self.result_dir,\n f'{organization_name}-{date}.json'), 'r', encoding='utf-8'\n ) as read_file:\n json_file = json.load(read_file)\n depart_number = ''\n token = ''\n address = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'):\n depart_number = self.config.get(section,\n 'depart_number')\n if self.config.has_section('transfer_data'):\n if self.config.has_option(section, 'token'\n ) and self.config.has_option(section, 'address'):\n token = self.config.get(section, 'token')\n address = self.config.get(section, 'address')\n login = {'depart number': depart_number, 'token': token}\n response = requests.post(\n f'https://{address}/api/v2/order/get-depart-token', login)\n response_json = response.json()\n response_token = response_json['body']['token']\n transfer_info = {'depart number': depart_number, 'token':\n response_token, 'json': json_file}\n transfer = requests.post(\n f'https://{address}/api/v2/order/ext-orders-package',\n transfer_info)\n transfer_json = transfer.json()\n return transfer_json\n except OSError:\n self.show_error_window('Нет связи с сервером')\n self.close_window()\n",
"<import token>\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n\n def __init__(self):\n super(TransferWindow, self).__init__()\n self.setFixedSize(482, 340)\n self.ui_3 = Ui_TransferWindow()\n self.ui_7 = ErrorWindow()\n self.ui_3.setupUi(self)\n self.result_dir = path.join(path.dirname(__file__), 'result')\n self.img_dir = path.join(path.dirname(__file__), 'img')\n self.config_dir = path.join(path.dirname(__file__), 'config')\n self.json_dir = path.join(path.dirname(__file__), 'json')\n self.log_dir = path.join(path.dirname(__file__), 'log')\n self.files = os.listdir(self.result_dir)\n self.config = configparser.RawConfigParser()\n self.config.read(path.join(self.config_dir, 'config.ini'), encoding\n ='utf-8')\n self.date = ''\n self.organization_name = get_organization()\n self.model = QtCore.QStringListModel(self)\n self.ui_3.listView.setModel(self.model)\n self.ui_3.listView.setWordWrap(True)\n self.ui_3.listView.hide()\n self.ui_3.listView.setEditTriggers(QAbstractItemView.NoEditTriggers)\n self.ui_3.progressBar.hide()\n self.ui_3.pushButton.clicked.connect(self.create_json)\n self.ui_3.pushButton_2.clicked.connect(self.close_window)\n self.setWindowIcon(QtGui.QIcon(path.join(self.img_dir,\n 'gosuslugi_5.png')))\n self.setWindowTitle('Выбор даты для отправки')\n set_text(self.ui_3.pushButton, 'Отправить')\n self.ui_3.pushButton.setStyleSheet(\n \"\"\"\n background-color: #b2edbf;\n \"\"\"\n )\n set_text(self.ui_3.pushButton_2, 'Закрыть')\n self.ui_3.pushButton_2.setStyleSheet(\n \"\"\"\n background-color: #f7c8c8;\n \"\"\"\n )\n\n def close_window(self):\n self.close()\n\n def show_error_window(self, error):\n label = self.ui_7.findChildren(QLabel)\n for item in label:\n item.setText(error)\n self.ui_7.show()\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n\n def read_json_template(self):\n with open(path.join(self.json_dir, 'template.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n\n def read_json_today(self):\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n <function token>\n\n def create_json(self):\n self.date += self.get_date_for_transfer()\n depart_number = ''\n laboratory_name = ''\n laboratory_ogrn = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'\n ) and self.config.has_option(section, 'laboratory_name'\n ) and self.config.has_option(section, 'laboratory_ogrn'):\n depart_number = self.config.get(section, 'depart_number')\n laboratory_name = self.config.get(section,\n 'laboratory_name')\n laboratory_ogrn = self.config.get(section,\n 'laboratory_ogrn')\n if os.path.exists(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n python_json_dict = self.read_json_today()\n else:\n python_json_dict = self.read_json_template()\n python_json_dict = python_json_dict[0]\n transfer_list = find_transfer(self.date)\n if not transfer_list:\n python_json_dict['order']['patient']['surname'] = 'В базе'\n python_json_dict['order']['patient']['name'] = 'нет пациентов'\n python_json_dict['order']['patient']['patronymic'] = 'для отправки'\n self.write_json(python_json_dict)\n progress = 0\n if transfer_list:\n self.ui_3.progressBar.show()\n for el in range(len(transfer_list)):\n unique_number = generate_unique_number()\n python_json_dict['order']['number'] = unique_number\n python_json_dict['order']['depart'] = depart_number\n python_json_dict['order']['laboratoryName'] = laboratory_name\n python_json_dict['order']['laboratoryOgrn'] = laboratory_ogrn\n python_json_dict['order']['name'] = transfer_list[el][2]\n python_json_dict['order']['ogrn'] = transfer_list[el][3]\n python_json_dict['order']['orderDate'] = transfer_list[el][4]\n python_json_dict['order']['serv'][0]['code'] = transfer_list[el][5]\n python_json_dict['order']['serv'][0]['name'] = transfer_list[el][6]\n python_json_dict['order']['serv'][0]['testSystem'] = transfer_list[\n el][7]\n python_json_dict['order']['serv'][0]['biomaterDate'\n ] = transfer_list[el][8]\n python_json_dict['order']['serv'][0]['readyDate'] = transfer_list[\n el][9]\n python_json_dict['order']['serv'][0]['result'] = transfer_list[el][\n 10][0]\n python_json_dict['order']['serv'][0]['type'] = transfer_list[el][11\n ][0]\n python_json_dict['order']['serv'][0]['value'] = transfer_list[el][\n 12]\n python_json_dict['order']['patient']['surname'] = transfer_list[el\n ][13]\n python_json_dict['order']['patient']['name'] = transfer_list[el][14\n ]\n python_json_dict['order']['patient']['patronymic'] = transfer_list[\n el][15]\n python_json_dict['order']['patient']['gender'] = transfer_list[el][\n 16]\n python_json_dict['order']['patient']['birthday'] = transfer_list[el\n ][17]\n python_json_dict['order']['patient']['phone'] = transfer_list[el][\n 18]\n python_json_dict['order']['patient']['email'] = transfer_list[el][\n 19]\n python_json_dict['order']['patient']['documentType'\n ] = transfer_list[el][20]\n python_json_dict['order']['patient']['documentNumber'\n ] = transfer_list[el][22]\n python_json_dict['order']['patient']['documentSerNumber'\n ] = transfer_list[el][21]\n python_json_dict['order']['patient']['snils'] = transfer_list[el][\n 23]\n python_json_dict['order']['patient']['oms'] = transfer_list[el][24]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'town'] = transfer_list[el][25]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'house'] = transfer_list[el][26]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'region'] = transfer_list[el][27]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'building'] = transfer_list[el][28]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'district'] = transfer_list[el][29]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'appartament'] = transfer_list[el][30]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'streetName'] = transfer_list[el][31]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'town'] = transfer_list[el][32]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'house'] = transfer_list[el][33]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'region'] = transfer_list[el][34]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'building'] = transfer_list[el][35]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'district'] = transfer_list[el][36]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'appartament'] = transfer_list[el][37]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'streetName'] = transfer_list[el][38]\n self.write_json(python_json_dict)\n sleep(1)\n progress += 100 / len(transfer_list)\n self.ui_3.progressBar.setValue(progress)\n self.logging_transfer()\n\n def logging_transfer(self):\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'r', encoding=\n 'utf-8') as read_file:\n json_file = json.load(read_file)\n python_json = json.loads(json_file)\n patient_list = []\n for patients_dict in python_json:\n surname = f\"{patients_dict['order']['patient']['surname']}\"\n name = f\"{patients_dict['order']['patient']['name']}\"\n patronymic = f\"{patients_dict['order']['patient']['patronymic']}\"\n patient = f'{surname} {name} {patronymic}'\n patient_list.append(patient)\n transfer_json = self.transfer_data()\n status_list = []\n if patient_list[0] != 'В базе нет пациентов для отправки':\n with open(path.join(self.log_dir, 'console_log.txt'), 'a'\n ) as log_file:\n log_file.write(f'{str(transfer_json)}\\n\\n')\n transfer_list = find_transfer(self.date)\n for elements in range(len(transfer_list)):\n if transfer_json['body'][int(elements)]['status'] == 'error':\n patient_list.insert(elements * 3 + 1,\n f\"{transfer_json['body'][int(elements)]['message']}\")\n patient_list.insert(elements * 3 + 2,\n '------------------------------------------------------------------------'\n )\n status_list.append('error')\n elif transfer_json['body'][int(elements)]['status'\n ] == 'ok' or transfer_json['body'][int(elements)]['status'\n ] == '':\n patient_list.insert(elements * 3 + 1, f'Успешно!')\n patient_list.insert(elements * 3 + 2,\n '------------------------------------------------------------------------'\n )\n status_list.append('ok')\n for elem in range(len(status_list)):\n if status_list[elem] == 'ok':\n success(transfer_list[elem][0], 1)\n self.ui_3.calendarWidget.hide()\n self.ui_3.listView.show()\n self.model.setStringList(patient_list)\n self.ui_3.pushButton.setEnabled(False)\n if os.path.isfile(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n os.remove(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'))\n self.date = ''\n\n def transfer_data(self):\n try:\n date = self.get_date_for_transfer()\n organization_name = get_organization()\n with open(path.join(self.result_dir,\n f'{organization_name}-{date}.json'), 'r', encoding='utf-8'\n ) as read_file:\n json_file = json.load(read_file)\n depart_number = ''\n token = ''\n address = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'):\n depart_number = self.config.get(section,\n 'depart_number')\n if self.config.has_section('transfer_data'):\n if self.config.has_option(section, 'token'\n ) and self.config.has_option(section, 'address'):\n token = self.config.get(section, 'token')\n address = self.config.get(section, 'address')\n login = {'depart number': depart_number, 'token': token}\n response = requests.post(\n f'https://{address}/api/v2/order/get-depart-token', login)\n response_json = response.json()\n response_token = response_json['body']['token']\n transfer_info = {'depart number': depart_number, 'token':\n response_token, 'json': json_file}\n transfer = requests.post(\n f'https://{address}/api/v2/order/ext-orders-package',\n transfer_info)\n transfer_json = transfer.json()\n return transfer_json\n except OSError:\n self.show_error_window('Нет связи с сервером')\n self.close_window()\n",
"<import token>\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n <function token>\n\n def close_window(self):\n self.close()\n\n def show_error_window(self, error):\n label = self.ui_7.findChildren(QLabel)\n for item in label:\n item.setText(error)\n self.ui_7.show()\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n\n def read_json_template(self):\n with open(path.join(self.json_dir, 'template.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n\n def read_json_today(self):\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n <function token>\n\n def create_json(self):\n self.date += self.get_date_for_transfer()\n depart_number = ''\n laboratory_name = ''\n laboratory_ogrn = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'\n ) and self.config.has_option(section, 'laboratory_name'\n ) and self.config.has_option(section, 'laboratory_ogrn'):\n depart_number = self.config.get(section, 'depart_number')\n laboratory_name = self.config.get(section,\n 'laboratory_name')\n laboratory_ogrn = self.config.get(section,\n 'laboratory_ogrn')\n if os.path.exists(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n python_json_dict = self.read_json_today()\n else:\n python_json_dict = self.read_json_template()\n python_json_dict = python_json_dict[0]\n transfer_list = find_transfer(self.date)\n if not transfer_list:\n python_json_dict['order']['patient']['surname'] = 'В базе'\n python_json_dict['order']['patient']['name'] = 'нет пациентов'\n python_json_dict['order']['patient']['patronymic'] = 'для отправки'\n self.write_json(python_json_dict)\n progress = 0\n if transfer_list:\n self.ui_3.progressBar.show()\n for el in range(len(transfer_list)):\n unique_number = generate_unique_number()\n python_json_dict['order']['number'] = unique_number\n python_json_dict['order']['depart'] = depart_number\n python_json_dict['order']['laboratoryName'] = laboratory_name\n python_json_dict['order']['laboratoryOgrn'] = laboratory_ogrn\n python_json_dict['order']['name'] = transfer_list[el][2]\n python_json_dict['order']['ogrn'] = transfer_list[el][3]\n python_json_dict['order']['orderDate'] = transfer_list[el][4]\n python_json_dict['order']['serv'][0]['code'] = transfer_list[el][5]\n python_json_dict['order']['serv'][0]['name'] = transfer_list[el][6]\n python_json_dict['order']['serv'][0]['testSystem'] = transfer_list[\n el][7]\n python_json_dict['order']['serv'][0]['biomaterDate'\n ] = transfer_list[el][8]\n python_json_dict['order']['serv'][0]['readyDate'] = transfer_list[\n el][9]\n python_json_dict['order']['serv'][0]['result'] = transfer_list[el][\n 10][0]\n python_json_dict['order']['serv'][0]['type'] = transfer_list[el][11\n ][0]\n python_json_dict['order']['serv'][0]['value'] = transfer_list[el][\n 12]\n python_json_dict['order']['patient']['surname'] = transfer_list[el\n ][13]\n python_json_dict['order']['patient']['name'] = transfer_list[el][14\n ]\n python_json_dict['order']['patient']['patronymic'] = transfer_list[\n el][15]\n python_json_dict['order']['patient']['gender'] = transfer_list[el][\n 16]\n python_json_dict['order']['patient']['birthday'] = transfer_list[el\n ][17]\n python_json_dict['order']['patient']['phone'] = transfer_list[el][\n 18]\n python_json_dict['order']['patient']['email'] = transfer_list[el][\n 19]\n python_json_dict['order']['patient']['documentType'\n ] = transfer_list[el][20]\n python_json_dict['order']['patient']['documentNumber'\n ] = transfer_list[el][22]\n python_json_dict['order']['patient']['documentSerNumber'\n ] = transfer_list[el][21]\n python_json_dict['order']['patient']['snils'] = transfer_list[el][\n 23]\n python_json_dict['order']['patient']['oms'] = transfer_list[el][24]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'town'] = transfer_list[el][25]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'house'] = transfer_list[el][26]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'region'] = transfer_list[el][27]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'building'] = transfer_list[el][28]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'district'] = transfer_list[el][29]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'appartament'] = transfer_list[el][30]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'streetName'] = transfer_list[el][31]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'town'] = transfer_list[el][32]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'house'] = transfer_list[el][33]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'region'] = transfer_list[el][34]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'building'] = transfer_list[el][35]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'district'] = transfer_list[el][36]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'appartament'] = transfer_list[el][37]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'streetName'] = transfer_list[el][38]\n self.write_json(python_json_dict)\n sleep(1)\n progress += 100 / len(transfer_list)\n self.ui_3.progressBar.setValue(progress)\n self.logging_transfer()\n\n def logging_transfer(self):\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'r', encoding=\n 'utf-8') as read_file:\n json_file = json.load(read_file)\n python_json = json.loads(json_file)\n patient_list = []\n for patients_dict in python_json:\n surname = f\"{patients_dict['order']['patient']['surname']}\"\n name = f\"{patients_dict['order']['patient']['name']}\"\n patronymic = f\"{patients_dict['order']['patient']['patronymic']}\"\n patient = f'{surname} {name} {patronymic}'\n patient_list.append(patient)\n transfer_json = self.transfer_data()\n status_list = []\n if patient_list[0] != 'В базе нет пациентов для отправки':\n with open(path.join(self.log_dir, 'console_log.txt'), 'a'\n ) as log_file:\n log_file.write(f'{str(transfer_json)}\\n\\n')\n transfer_list = find_transfer(self.date)\n for elements in range(len(transfer_list)):\n if transfer_json['body'][int(elements)]['status'] == 'error':\n patient_list.insert(elements * 3 + 1,\n f\"{transfer_json['body'][int(elements)]['message']}\")\n patient_list.insert(elements * 3 + 2,\n '------------------------------------------------------------------------'\n )\n status_list.append('error')\n elif transfer_json['body'][int(elements)]['status'\n ] == 'ok' or transfer_json['body'][int(elements)]['status'\n ] == '':\n patient_list.insert(elements * 3 + 1, f'Успешно!')\n patient_list.insert(elements * 3 + 2,\n '------------------------------------------------------------------------'\n )\n status_list.append('ok')\n for elem in range(len(status_list)):\n if status_list[elem] == 'ok':\n success(transfer_list[elem][0], 1)\n self.ui_3.calendarWidget.hide()\n self.ui_3.listView.show()\n self.model.setStringList(patient_list)\n self.ui_3.pushButton.setEnabled(False)\n if os.path.isfile(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n os.remove(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'))\n self.date = ''\n\n def transfer_data(self):\n try:\n date = self.get_date_for_transfer()\n organization_name = get_organization()\n with open(path.join(self.result_dir,\n f'{organization_name}-{date}.json'), 'r', encoding='utf-8'\n ) as read_file:\n json_file = json.load(read_file)\n depart_number = ''\n token = ''\n address = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'):\n depart_number = self.config.get(section,\n 'depart_number')\n if self.config.has_section('transfer_data'):\n if self.config.has_option(section, 'token'\n ) and self.config.has_option(section, 'address'):\n token = self.config.get(section, 'token')\n address = self.config.get(section, 'address')\n login = {'depart number': depart_number, 'token': token}\n response = requests.post(\n f'https://{address}/api/v2/order/get-depart-token', login)\n response_json = response.json()\n response_token = response_json['body']['token']\n transfer_info = {'depart number': depart_number, 'token':\n response_token, 'json': json_file}\n transfer = requests.post(\n f'https://{address}/api/v2/order/ext-orders-package',\n transfer_info)\n transfer_json = transfer.json()\n return transfer_json\n except OSError:\n self.show_error_window('Нет связи с сервером')\n self.close_window()\n",
"<import token>\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n <function token>\n\n def close_window(self):\n self.close()\n\n def show_error_window(self, error):\n label = self.ui_7.findChildren(QLabel)\n for item in label:\n item.setText(error)\n self.ui_7.show()\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n\n def read_json_template(self):\n with open(path.join(self.json_dir, 'template.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n\n def read_json_today(self):\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n <function token>\n\n def create_json(self):\n self.date += self.get_date_for_transfer()\n depart_number = ''\n laboratory_name = ''\n laboratory_ogrn = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'\n ) and self.config.has_option(section, 'laboratory_name'\n ) and self.config.has_option(section, 'laboratory_ogrn'):\n depart_number = self.config.get(section, 'depart_number')\n laboratory_name = self.config.get(section,\n 'laboratory_name')\n laboratory_ogrn = self.config.get(section,\n 'laboratory_ogrn')\n if os.path.exists(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n python_json_dict = self.read_json_today()\n else:\n python_json_dict = self.read_json_template()\n python_json_dict = python_json_dict[0]\n transfer_list = find_transfer(self.date)\n if not transfer_list:\n python_json_dict['order']['patient']['surname'] = 'В базе'\n python_json_dict['order']['patient']['name'] = 'нет пациентов'\n python_json_dict['order']['patient']['patronymic'] = 'для отправки'\n self.write_json(python_json_dict)\n progress = 0\n if transfer_list:\n self.ui_3.progressBar.show()\n for el in range(len(transfer_list)):\n unique_number = generate_unique_number()\n python_json_dict['order']['number'] = unique_number\n python_json_dict['order']['depart'] = depart_number\n python_json_dict['order']['laboratoryName'] = laboratory_name\n python_json_dict['order']['laboratoryOgrn'] = laboratory_ogrn\n python_json_dict['order']['name'] = transfer_list[el][2]\n python_json_dict['order']['ogrn'] = transfer_list[el][3]\n python_json_dict['order']['orderDate'] = transfer_list[el][4]\n python_json_dict['order']['serv'][0]['code'] = transfer_list[el][5]\n python_json_dict['order']['serv'][0]['name'] = transfer_list[el][6]\n python_json_dict['order']['serv'][0]['testSystem'] = transfer_list[\n el][7]\n python_json_dict['order']['serv'][0]['biomaterDate'\n ] = transfer_list[el][8]\n python_json_dict['order']['serv'][0]['readyDate'] = transfer_list[\n el][9]\n python_json_dict['order']['serv'][0]['result'] = transfer_list[el][\n 10][0]\n python_json_dict['order']['serv'][0]['type'] = transfer_list[el][11\n ][0]\n python_json_dict['order']['serv'][0]['value'] = transfer_list[el][\n 12]\n python_json_dict['order']['patient']['surname'] = transfer_list[el\n ][13]\n python_json_dict['order']['patient']['name'] = transfer_list[el][14\n ]\n python_json_dict['order']['patient']['patronymic'] = transfer_list[\n el][15]\n python_json_dict['order']['patient']['gender'] = transfer_list[el][\n 16]\n python_json_dict['order']['patient']['birthday'] = transfer_list[el\n ][17]\n python_json_dict['order']['patient']['phone'] = transfer_list[el][\n 18]\n python_json_dict['order']['patient']['email'] = transfer_list[el][\n 19]\n python_json_dict['order']['patient']['documentType'\n ] = transfer_list[el][20]\n python_json_dict['order']['patient']['documentNumber'\n ] = transfer_list[el][22]\n python_json_dict['order']['patient']['documentSerNumber'\n ] = transfer_list[el][21]\n python_json_dict['order']['patient']['snils'] = transfer_list[el][\n 23]\n python_json_dict['order']['patient']['oms'] = transfer_list[el][24]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'town'] = transfer_list[el][25]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'house'] = transfer_list[el][26]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'region'] = transfer_list[el][27]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'building'] = transfer_list[el][28]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'district'] = transfer_list[el][29]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'appartament'] = transfer_list[el][30]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'streetName'] = transfer_list[el][31]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'town'] = transfer_list[el][32]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'house'] = transfer_list[el][33]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'region'] = transfer_list[el][34]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'building'] = transfer_list[el][35]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'district'] = transfer_list[el][36]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'appartament'] = transfer_list[el][37]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'streetName'] = transfer_list[el][38]\n self.write_json(python_json_dict)\n sleep(1)\n progress += 100 / len(transfer_list)\n self.ui_3.progressBar.setValue(progress)\n self.logging_transfer()\n <function token>\n\n def transfer_data(self):\n try:\n date = self.get_date_for_transfer()\n organization_name = get_organization()\n with open(path.join(self.result_dir,\n f'{organization_name}-{date}.json'), 'r', encoding='utf-8'\n ) as read_file:\n json_file = json.load(read_file)\n depart_number = ''\n token = ''\n address = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'):\n depart_number = self.config.get(section,\n 'depart_number')\n if self.config.has_section('transfer_data'):\n if self.config.has_option(section, 'token'\n ) and self.config.has_option(section, 'address'):\n token = self.config.get(section, 'token')\n address = self.config.get(section, 'address')\n login = {'depart number': depart_number, 'token': token}\n response = requests.post(\n f'https://{address}/api/v2/order/get-depart-token', login)\n response_json = response.json()\n response_token = response_json['body']['token']\n transfer_info = {'depart number': depart_number, 'token':\n response_token, 'json': json_file}\n transfer = requests.post(\n f'https://{address}/api/v2/order/ext-orders-package',\n transfer_info)\n transfer_json = transfer.json()\n return transfer_json\n except OSError:\n self.show_error_window('Нет связи с сервером')\n self.close_window()\n",
"<import token>\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n <function token>\n\n def close_window(self):\n self.close()\n\n def show_error_window(self, error):\n label = self.ui_7.findChildren(QLabel)\n for item in label:\n item.setText(error)\n self.ui_7.show()\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n\n def read_json_template(self):\n with open(path.join(self.json_dir, 'template.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n\n def read_json_today(self):\n with open(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n <function token>\n\n def create_json(self):\n self.date += self.get_date_for_transfer()\n depart_number = ''\n laboratory_name = ''\n laboratory_ogrn = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'\n ) and self.config.has_option(section, 'laboratory_name'\n ) and self.config.has_option(section, 'laboratory_ogrn'):\n depart_number = self.config.get(section, 'depart_number')\n laboratory_name = self.config.get(section,\n 'laboratory_name')\n laboratory_ogrn = self.config.get(section,\n 'laboratory_ogrn')\n if os.path.exists(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n python_json_dict = self.read_json_today()\n else:\n python_json_dict = self.read_json_template()\n python_json_dict = python_json_dict[0]\n transfer_list = find_transfer(self.date)\n if not transfer_list:\n python_json_dict['order']['patient']['surname'] = 'В базе'\n python_json_dict['order']['patient']['name'] = 'нет пациентов'\n python_json_dict['order']['patient']['patronymic'] = 'для отправки'\n self.write_json(python_json_dict)\n progress = 0\n if transfer_list:\n self.ui_3.progressBar.show()\n for el in range(len(transfer_list)):\n unique_number = generate_unique_number()\n python_json_dict['order']['number'] = unique_number\n python_json_dict['order']['depart'] = depart_number\n python_json_dict['order']['laboratoryName'] = laboratory_name\n python_json_dict['order']['laboratoryOgrn'] = laboratory_ogrn\n python_json_dict['order']['name'] = transfer_list[el][2]\n python_json_dict['order']['ogrn'] = transfer_list[el][3]\n python_json_dict['order']['orderDate'] = transfer_list[el][4]\n python_json_dict['order']['serv'][0]['code'] = transfer_list[el][5]\n python_json_dict['order']['serv'][0]['name'] = transfer_list[el][6]\n python_json_dict['order']['serv'][0]['testSystem'] = transfer_list[\n el][7]\n python_json_dict['order']['serv'][0]['biomaterDate'\n ] = transfer_list[el][8]\n python_json_dict['order']['serv'][0]['readyDate'] = transfer_list[\n el][9]\n python_json_dict['order']['serv'][0]['result'] = transfer_list[el][\n 10][0]\n python_json_dict['order']['serv'][0]['type'] = transfer_list[el][11\n ][0]\n python_json_dict['order']['serv'][0]['value'] = transfer_list[el][\n 12]\n python_json_dict['order']['patient']['surname'] = transfer_list[el\n ][13]\n python_json_dict['order']['patient']['name'] = transfer_list[el][14\n ]\n python_json_dict['order']['patient']['patronymic'] = transfer_list[\n el][15]\n python_json_dict['order']['patient']['gender'] = transfer_list[el][\n 16]\n python_json_dict['order']['patient']['birthday'] = transfer_list[el\n ][17]\n python_json_dict['order']['patient']['phone'] = transfer_list[el][\n 18]\n python_json_dict['order']['patient']['email'] = transfer_list[el][\n 19]\n python_json_dict['order']['patient']['documentType'\n ] = transfer_list[el][20]\n python_json_dict['order']['patient']['documentNumber'\n ] = transfer_list[el][22]\n python_json_dict['order']['patient']['documentSerNumber'\n ] = transfer_list[el][21]\n python_json_dict['order']['patient']['snils'] = transfer_list[el][\n 23]\n python_json_dict['order']['patient']['oms'] = transfer_list[el][24]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'town'] = transfer_list[el][25]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'house'] = transfer_list[el][26]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'region'] = transfer_list[el][27]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'building'] = transfer_list[el][28]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'district'] = transfer_list[el][29]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'appartament'] = transfer_list[el][30]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'streetName'] = transfer_list[el][31]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'town'] = transfer_list[el][32]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'house'] = transfer_list[el][33]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'region'] = transfer_list[el][34]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'building'] = transfer_list[el][35]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'district'] = transfer_list[el][36]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'appartament'] = transfer_list[el][37]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'streetName'] = transfer_list[el][38]\n self.write_json(python_json_dict)\n sleep(1)\n progress += 100 / len(transfer_list)\n self.ui_3.progressBar.setValue(progress)\n self.logging_transfer()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n <function token>\n\n def close_window(self):\n self.close()\n\n def show_error_window(self, error):\n label = self.ui_7.findChildren(QLabel)\n for item in label:\n item.setText(error)\n self.ui_7.show()\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n\n def read_json_template(self):\n with open(path.join(self.json_dir, 'template.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n <function token>\n <function token>\n\n def create_json(self):\n self.date += self.get_date_for_transfer()\n depart_number = ''\n laboratory_name = ''\n laboratory_ogrn = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'\n ) and self.config.has_option(section, 'laboratory_name'\n ) and self.config.has_option(section, 'laboratory_ogrn'):\n depart_number = self.config.get(section, 'depart_number')\n laboratory_name = self.config.get(section,\n 'laboratory_name')\n laboratory_ogrn = self.config.get(section,\n 'laboratory_ogrn')\n if os.path.exists(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n python_json_dict = self.read_json_today()\n else:\n python_json_dict = self.read_json_template()\n python_json_dict = python_json_dict[0]\n transfer_list = find_transfer(self.date)\n if not transfer_list:\n python_json_dict['order']['patient']['surname'] = 'В базе'\n python_json_dict['order']['patient']['name'] = 'нет пациентов'\n python_json_dict['order']['patient']['patronymic'] = 'для отправки'\n self.write_json(python_json_dict)\n progress = 0\n if transfer_list:\n self.ui_3.progressBar.show()\n for el in range(len(transfer_list)):\n unique_number = generate_unique_number()\n python_json_dict['order']['number'] = unique_number\n python_json_dict['order']['depart'] = depart_number\n python_json_dict['order']['laboratoryName'] = laboratory_name\n python_json_dict['order']['laboratoryOgrn'] = laboratory_ogrn\n python_json_dict['order']['name'] = transfer_list[el][2]\n python_json_dict['order']['ogrn'] = transfer_list[el][3]\n python_json_dict['order']['orderDate'] = transfer_list[el][4]\n python_json_dict['order']['serv'][0]['code'] = transfer_list[el][5]\n python_json_dict['order']['serv'][0]['name'] = transfer_list[el][6]\n python_json_dict['order']['serv'][0]['testSystem'] = transfer_list[\n el][7]\n python_json_dict['order']['serv'][0]['biomaterDate'\n ] = transfer_list[el][8]\n python_json_dict['order']['serv'][0]['readyDate'] = transfer_list[\n el][9]\n python_json_dict['order']['serv'][0]['result'] = transfer_list[el][\n 10][0]\n python_json_dict['order']['serv'][0]['type'] = transfer_list[el][11\n ][0]\n python_json_dict['order']['serv'][0]['value'] = transfer_list[el][\n 12]\n python_json_dict['order']['patient']['surname'] = transfer_list[el\n ][13]\n python_json_dict['order']['patient']['name'] = transfer_list[el][14\n ]\n python_json_dict['order']['patient']['patronymic'] = transfer_list[\n el][15]\n python_json_dict['order']['patient']['gender'] = transfer_list[el][\n 16]\n python_json_dict['order']['patient']['birthday'] = transfer_list[el\n ][17]\n python_json_dict['order']['patient']['phone'] = transfer_list[el][\n 18]\n python_json_dict['order']['patient']['email'] = transfer_list[el][\n 19]\n python_json_dict['order']['patient']['documentType'\n ] = transfer_list[el][20]\n python_json_dict['order']['patient']['documentNumber'\n ] = transfer_list[el][22]\n python_json_dict['order']['patient']['documentSerNumber'\n ] = transfer_list[el][21]\n python_json_dict['order']['patient']['snils'] = transfer_list[el][\n 23]\n python_json_dict['order']['patient']['oms'] = transfer_list[el][24]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'town'] = transfer_list[el][25]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'house'] = transfer_list[el][26]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'region'] = transfer_list[el][27]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'building'] = transfer_list[el][28]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'district'] = transfer_list[el][29]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'appartament'] = transfer_list[el][30]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'streetName'] = transfer_list[el][31]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'town'] = transfer_list[el][32]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'house'] = transfer_list[el][33]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'region'] = transfer_list[el][34]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'building'] = transfer_list[el][35]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'district'] = transfer_list[el][36]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'appartament'] = transfer_list[el][37]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'streetName'] = transfer_list[el][38]\n self.write_json(python_json_dict)\n sleep(1)\n progress += 100 / len(transfer_list)\n self.ui_3.progressBar.setValue(progress)\n self.logging_transfer()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n <function token>\n <function token>\n\n def show_error_window(self, error):\n label = self.ui_7.findChildren(QLabel)\n for item in label:\n item.setText(error)\n self.ui_7.show()\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n\n def read_json_template(self):\n with open(path.join(self.json_dir, 'template.json'), 'r', encoding=\n 'utf-8') as json_file:\n json_data = json.load(json_file)\n python_json_data = json.loads(json_data)\n return python_json_data\n <function token>\n <function token>\n\n def create_json(self):\n self.date += self.get_date_for_transfer()\n depart_number = ''\n laboratory_name = ''\n laboratory_ogrn = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'\n ) and self.config.has_option(section, 'laboratory_name'\n ) and self.config.has_option(section, 'laboratory_ogrn'):\n depart_number = self.config.get(section, 'depart_number')\n laboratory_name = self.config.get(section,\n 'laboratory_name')\n laboratory_ogrn = self.config.get(section,\n 'laboratory_ogrn')\n if os.path.exists(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n python_json_dict = self.read_json_today()\n else:\n python_json_dict = self.read_json_template()\n python_json_dict = python_json_dict[0]\n transfer_list = find_transfer(self.date)\n if not transfer_list:\n python_json_dict['order']['patient']['surname'] = 'В базе'\n python_json_dict['order']['patient']['name'] = 'нет пациентов'\n python_json_dict['order']['patient']['patronymic'] = 'для отправки'\n self.write_json(python_json_dict)\n progress = 0\n if transfer_list:\n self.ui_3.progressBar.show()\n for el in range(len(transfer_list)):\n unique_number = generate_unique_number()\n python_json_dict['order']['number'] = unique_number\n python_json_dict['order']['depart'] = depart_number\n python_json_dict['order']['laboratoryName'] = laboratory_name\n python_json_dict['order']['laboratoryOgrn'] = laboratory_ogrn\n python_json_dict['order']['name'] = transfer_list[el][2]\n python_json_dict['order']['ogrn'] = transfer_list[el][3]\n python_json_dict['order']['orderDate'] = transfer_list[el][4]\n python_json_dict['order']['serv'][0]['code'] = transfer_list[el][5]\n python_json_dict['order']['serv'][0]['name'] = transfer_list[el][6]\n python_json_dict['order']['serv'][0]['testSystem'] = transfer_list[\n el][7]\n python_json_dict['order']['serv'][0]['biomaterDate'\n ] = transfer_list[el][8]\n python_json_dict['order']['serv'][0]['readyDate'] = transfer_list[\n el][9]\n python_json_dict['order']['serv'][0]['result'] = transfer_list[el][\n 10][0]\n python_json_dict['order']['serv'][0]['type'] = transfer_list[el][11\n ][0]\n python_json_dict['order']['serv'][0]['value'] = transfer_list[el][\n 12]\n python_json_dict['order']['patient']['surname'] = transfer_list[el\n ][13]\n python_json_dict['order']['patient']['name'] = transfer_list[el][14\n ]\n python_json_dict['order']['patient']['patronymic'] = transfer_list[\n el][15]\n python_json_dict['order']['patient']['gender'] = transfer_list[el][\n 16]\n python_json_dict['order']['patient']['birthday'] = transfer_list[el\n ][17]\n python_json_dict['order']['patient']['phone'] = transfer_list[el][\n 18]\n python_json_dict['order']['patient']['email'] = transfer_list[el][\n 19]\n python_json_dict['order']['patient']['documentType'\n ] = transfer_list[el][20]\n python_json_dict['order']['patient']['documentNumber'\n ] = transfer_list[el][22]\n python_json_dict['order']['patient']['documentSerNumber'\n ] = transfer_list[el][21]\n python_json_dict['order']['patient']['snils'] = transfer_list[el][\n 23]\n python_json_dict['order']['patient']['oms'] = transfer_list[el][24]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'town'] = transfer_list[el][25]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'house'] = transfer_list[el][26]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'region'] = transfer_list[el][27]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'building'] = transfer_list[el][28]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'district'] = transfer_list[el][29]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'appartament'] = transfer_list[el][30]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'streetName'] = transfer_list[el][31]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'town'] = transfer_list[el][32]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'house'] = transfer_list[el][33]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'region'] = transfer_list[el][34]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'building'] = transfer_list[el][35]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'district'] = transfer_list[el][36]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'appartament'] = transfer_list[el][37]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'streetName'] = transfer_list[el][38]\n self.write_json(python_json_dict)\n sleep(1)\n progress += 100 / len(transfer_list)\n self.ui_3.progressBar.setValue(progress)\n self.logging_transfer()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n <function token>\n <function token>\n\n def show_error_window(self, error):\n label = self.ui_7.findChildren(QLabel)\n for item in label:\n item.setText(error)\n self.ui_7.show()\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n <function token>\n <function token>\n <function token>\n\n def create_json(self):\n self.date += self.get_date_for_transfer()\n depart_number = ''\n laboratory_name = ''\n laboratory_ogrn = ''\n for section in self.config.sections():\n if self.config.has_section('json_data'):\n if self.config.has_option(section, 'depart_number'\n ) and self.config.has_option(section, 'laboratory_name'\n ) and self.config.has_option(section, 'laboratory_ogrn'):\n depart_number = self.config.get(section, 'depart_number')\n laboratory_name = self.config.get(section,\n 'laboratory_name')\n laboratory_ogrn = self.config.get(section,\n 'laboratory_ogrn')\n if os.path.exists(path.join(self.result_dir,\n f'{self.organization_name}-{self.date}.json')):\n python_json_dict = self.read_json_today()\n else:\n python_json_dict = self.read_json_template()\n python_json_dict = python_json_dict[0]\n transfer_list = find_transfer(self.date)\n if not transfer_list:\n python_json_dict['order']['patient']['surname'] = 'В базе'\n python_json_dict['order']['patient']['name'] = 'нет пациентов'\n python_json_dict['order']['patient']['patronymic'] = 'для отправки'\n self.write_json(python_json_dict)\n progress = 0\n if transfer_list:\n self.ui_3.progressBar.show()\n for el in range(len(transfer_list)):\n unique_number = generate_unique_number()\n python_json_dict['order']['number'] = unique_number\n python_json_dict['order']['depart'] = depart_number\n python_json_dict['order']['laboratoryName'] = laboratory_name\n python_json_dict['order']['laboratoryOgrn'] = laboratory_ogrn\n python_json_dict['order']['name'] = transfer_list[el][2]\n python_json_dict['order']['ogrn'] = transfer_list[el][3]\n python_json_dict['order']['orderDate'] = transfer_list[el][4]\n python_json_dict['order']['serv'][0]['code'] = transfer_list[el][5]\n python_json_dict['order']['serv'][0]['name'] = transfer_list[el][6]\n python_json_dict['order']['serv'][0]['testSystem'] = transfer_list[\n el][7]\n python_json_dict['order']['serv'][0]['biomaterDate'\n ] = transfer_list[el][8]\n python_json_dict['order']['serv'][0]['readyDate'] = transfer_list[\n el][9]\n python_json_dict['order']['serv'][0]['result'] = transfer_list[el][\n 10][0]\n python_json_dict['order']['serv'][0]['type'] = transfer_list[el][11\n ][0]\n python_json_dict['order']['serv'][0]['value'] = transfer_list[el][\n 12]\n python_json_dict['order']['patient']['surname'] = transfer_list[el\n ][13]\n python_json_dict['order']['patient']['name'] = transfer_list[el][14\n ]\n python_json_dict['order']['patient']['patronymic'] = transfer_list[\n el][15]\n python_json_dict['order']['patient']['gender'] = transfer_list[el][\n 16]\n python_json_dict['order']['patient']['birthday'] = transfer_list[el\n ][17]\n python_json_dict['order']['patient']['phone'] = transfer_list[el][\n 18]\n python_json_dict['order']['patient']['email'] = transfer_list[el][\n 19]\n python_json_dict['order']['patient']['documentType'\n ] = transfer_list[el][20]\n python_json_dict['order']['patient']['documentNumber'\n ] = transfer_list[el][22]\n python_json_dict['order']['patient']['documentSerNumber'\n ] = transfer_list[el][21]\n python_json_dict['order']['patient']['snils'] = transfer_list[el][\n 23]\n python_json_dict['order']['patient']['oms'] = transfer_list[el][24]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'town'] = transfer_list[el][25]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'house'] = transfer_list[el][26]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'region'] = transfer_list[el][27]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'building'] = transfer_list[el][28]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'district'] = transfer_list[el][29]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'appartament'] = transfer_list[el][30]\n python_json_dict['order']['patient']['address']['regAddress'][\n 'streetName'] = transfer_list[el][31]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'town'] = transfer_list[el][32]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'house'] = transfer_list[el][33]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'region'] = transfer_list[el][34]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'building'] = transfer_list[el][35]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'district'] = transfer_list[el][36]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'appartament'] = transfer_list[el][37]\n python_json_dict['order']['patient']['address']['factAddress'][\n 'streetName'] = transfer_list[el][38]\n self.write_json(python_json_dict)\n sleep(1)\n progress += 100 / len(transfer_list)\n self.ui_3.progressBar.setValue(progress)\n self.logging_transfer()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n <function token>\n <function token>\n\n def show_error_window(self, error):\n label = self.ui_7.findChildren(QLabel)\n for item in label:\n item.setText(error)\n self.ui_7.show()\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n <function token>\n <function token>\n <function token>\n\n def get_date_for_transfer(self):\n date = self.ui_3.calendarWidget.selectedDate()\n return date.toString('dd-MM-yyyy')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n\n\nclass TransferWindow(QtWidgets.QMainWindow):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,397 |
1bd5231c9cdf59fe2e2900d2bdd454d979000fcb
|
import komand
from .schema import UnisolateMachineInput, UnisolateMachineOutput
# Custom imports below
class UnisolateMachine(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='unisolate_machine',
description='Restore network connectivity to a machine',
input=UnisolateMachineInput(),
output=UnisolateMachineOutput())
def run(self, params={}):
self.logger.info("Running...")
machine_id = params.get("machine_id")
comment = params.get("comment")
self.logger.info("Attempting to unisolate machine id: " + machine_id)
response = self.connection.unisolate_machine(machine_id, comment)
return {"machine_isolation_response": komand.helper.clean(response)}
def test(self):
self.connection.test()
payload = self.connection.fake_isolation_response()
return {"machine_isolation_response": payload}
|
[
"import komand\nfrom .schema import UnisolateMachineInput, UnisolateMachineOutput\n# Custom imports below\n\n\nclass UnisolateMachine(komand.Action):\n\n def __init__(self):\n super(self.__class__, self).__init__(\n name='unisolate_machine',\n description='Restore network connectivity to a machine',\n input=UnisolateMachineInput(),\n output=UnisolateMachineOutput())\n\n def run(self, params={}):\n self.logger.info(\"Running...\")\n\n machine_id = params.get(\"machine_id\")\n comment = params.get(\"comment\")\n\n self.logger.info(\"Attempting to unisolate machine id: \" + machine_id)\n response = self.connection.unisolate_machine(machine_id, comment)\n return {\"machine_isolation_response\": komand.helper.clean(response)}\n\n def test(self):\n self.connection.test()\n payload = self.connection.fake_isolation_response()\n return {\"machine_isolation_response\": payload}\n",
"import komand\nfrom .schema import UnisolateMachineInput, UnisolateMachineOutput\n\n\nclass UnisolateMachine(komand.Action):\n\n def __init__(self):\n super(self.__class__, self).__init__(name='unisolate_machine',\n description='Restore network connectivity to a machine', input=\n UnisolateMachineInput(), output=UnisolateMachineOutput())\n\n def run(self, params={}):\n self.logger.info('Running...')\n machine_id = params.get('machine_id')\n comment = params.get('comment')\n self.logger.info('Attempting to unisolate machine id: ' + machine_id)\n response = self.connection.unisolate_machine(machine_id, comment)\n return {'machine_isolation_response': komand.helper.clean(response)}\n\n def test(self):\n self.connection.test()\n payload = self.connection.fake_isolation_response()\n return {'machine_isolation_response': payload}\n",
"<import token>\n\n\nclass UnisolateMachine(komand.Action):\n\n def __init__(self):\n super(self.__class__, self).__init__(name='unisolate_machine',\n description='Restore network connectivity to a machine', input=\n UnisolateMachineInput(), output=UnisolateMachineOutput())\n\n def run(self, params={}):\n self.logger.info('Running...')\n machine_id = params.get('machine_id')\n comment = params.get('comment')\n self.logger.info('Attempting to unisolate machine id: ' + machine_id)\n response = self.connection.unisolate_machine(machine_id, comment)\n return {'machine_isolation_response': komand.helper.clean(response)}\n\n def test(self):\n self.connection.test()\n payload = self.connection.fake_isolation_response()\n return {'machine_isolation_response': payload}\n",
"<import token>\n\n\nclass UnisolateMachine(komand.Action):\n\n def __init__(self):\n super(self.__class__, self).__init__(name='unisolate_machine',\n description='Restore network connectivity to a machine', input=\n UnisolateMachineInput(), output=UnisolateMachineOutput())\n\n def run(self, params={}):\n self.logger.info('Running...')\n machine_id = params.get('machine_id')\n comment = params.get('comment')\n self.logger.info('Attempting to unisolate machine id: ' + machine_id)\n response = self.connection.unisolate_machine(machine_id, comment)\n return {'machine_isolation_response': komand.helper.clean(response)}\n <function token>\n",
"<import token>\n\n\nclass UnisolateMachine(komand.Action):\n\n def __init__(self):\n super(self.__class__, self).__init__(name='unisolate_machine',\n description='Restore network connectivity to a machine', input=\n UnisolateMachineInput(), output=UnisolateMachineOutput())\n <function token>\n <function token>\n",
"<import token>\n\n\nclass UnisolateMachine(komand.Action):\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
99,398 |
616dd09300f1a4220e1fa1826debf297b5db543a
|
import re
from random import randrange
from fixture.contact import Contact
def test_phones_on_homepage(app):
contact_from_home_page = app.contact.get_contact_list()[0]
contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
assert contact_from_home_page.lastname == clear(contact_from_edit_page.lastname)
assert contact_from_home_page.firstname == clear(contact_from_edit_page.firstname)
assert contact_from_home_page.address == contact_from_edit_page.address
assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
assert contact_from_home_page.all_email_from_home_page == merge_email_like_on_home_page(contact_from_edit_page)
# def test_phones_on_view_homepage(app):
# contact_from_view_page = app.contact.get_contact_from_view_page(0)
# contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)
# assert contact_from_view_page.home == contact_from_edit_page.home
# assert contact_from_view_page.mobile == contact_from_edit_page.mobile
# assert contact_from_view_page.work == contact_from_edit_page.work
#сравнение списка контактов с главной страницы со списком из ДБ
def test_phones_on_homepage_check_db(app, db):
contact_from_home_page = app.contact.get_contact_list()
contact_from_db = db.get_contact_list()
assert len(contact_from_home_page) == len(contact_from_db)
assert sorted(contact_from_home_page, key=Contact.id_or_max) == sorted(contact_from_db, key=Contact.id_or_max)
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x!="",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home, contact.mobile, contact.work]))))
def merge_email_like_on_home_page(contact):
return "\n".join(filter(lambda x: x!="",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.email, contact.email2, contact.email3]))))
|
[
"import re\nfrom random import randrange\nfrom fixture.contact import Contact\n\ndef test_phones_on_homepage(app):\n contact_from_home_page = app.contact.get_contact_list()[0]\n contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)\n assert contact_from_home_page.lastname == clear(contact_from_edit_page.lastname)\n assert contact_from_home_page.firstname == clear(contact_from_edit_page.firstname)\n assert contact_from_home_page.address == contact_from_edit_page.address\n assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)\n assert contact_from_home_page.all_email_from_home_page == merge_email_like_on_home_page(contact_from_edit_page)\n\n# def test_phones_on_view_homepage(app):\n# contact_from_view_page = app.contact.get_contact_from_view_page(0)\n# contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)\n# assert contact_from_view_page.home == contact_from_edit_page.home\n# assert contact_from_view_page.mobile == contact_from_edit_page.mobile\n# assert contact_from_view_page.work == contact_from_edit_page.work\n\n#сравнение списка контактов с главной страницы со списком из ДБ\ndef test_phones_on_homepage_check_db(app, db):\n contact_from_home_page = app.contact.get_contact_list()\n contact_from_db = db.get_contact_list()\n assert len(contact_from_home_page) == len(contact_from_db)\n assert sorted(contact_from_home_page, key=Contact.id_or_max) == sorted(contact_from_db, key=Contact.id_or_max)\n\ndef clear(s):\n return re.sub(\"[() -]\", \"\", s)\n\ndef merge_phones_like_on_home_page(contact):\n return \"\\n\".join(filter(lambda x: x!=\"\",\n map(lambda x: clear(x),\n filter(lambda x: x is not None,\n [contact.home, contact.mobile, contact.work]))))\n\ndef merge_email_like_on_home_page(contact):\n return \"\\n\".join(filter(lambda x: x!=\"\",\n map(lambda x: clear(x),\n filter(lambda x: x is not None,\n [contact.email, contact.email2, contact.email3]))))\n\n\n\n\n\n\n",
"import re\nfrom random import randrange\nfrom fixture.contact import Contact\n\n\ndef test_phones_on_homepage(app):\n contact_from_home_page = app.contact.get_contact_list()[0]\n contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)\n assert contact_from_home_page.lastname == clear(contact_from_edit_page.\n lastname)\n assert contact_from_home_page.firstname == clear(contact_from_edit_page\n .firstname)\n assert contact_from_home_page.address == contact_from_edit_page.address\n assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(\n contact_from_edit_page)\n assert contact_from_home_page.all_email_from_home_page == merge_email_like_on_home_page(\n contact_from_edit_page)\n\n\ndef test_phones_on_homepage_check_db(app, db):\n contact_from_home_page = app.contact.get_contact_list()\n contact_from_db = db.get_contact_list()\n assert len(contact_from_home_page) == len(contact_from_db)\n assert sorted(contact_from_home_page, key=Contact.id_or_max) == sorted(\n contact_from_db, key=Contact.id_or_max)\n\n\ndef clear(s):\n return re.sub('[() -]', '', s)\n\n\ndef merge_phones_like_on_home_page(contact):\n return '\\n'.join(filter(lambda x: x != '', map(lambda x: clear(x),\n filter(lambda x: x is not None, [contact.home, contact.mobile,\n contact.work]))))\n\n\ndef merge_email_like_on_home_page(contact):\n return '\\n'.join(filter(lambda x: x != '', map(lambda x: clear(x),\n filter(lambda x: x is not None, [contact.email, contact.email2,\n contact.email3]))))\n",
"<import token>\n\n\ndef test_phones_on_homepage(app):\n contact_from_home_page = app.contact.get_contact_list()[0]\n contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)\n assert contact_from_home_page.lastname == clear(contact_from_edit_page.\n lastname)\n assert contact_from_home_page.firstname == clear(contact_from_edit_page\n .firstname)\n assert contact_from_home_page.address == contact_from_edit_page.address\n assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(\n contact_from_edit_page)\n assert contact_from_home_page.all_email_from_home_page == merge_email_like_on_home_page(\n contact_from_edit_page)\n\n\ndef test_phones_on_homepage_check_db(app, db):\n contact_from_home_page = app.contact.get_contact_list()\n contact_from_db = db.get_contact_list()\n assert len(contact_from_home_page) == len(contact_from_db)\n assert sorted(contact_from_home_page, key=Contact.id_or_max) == sorted(\n contact_from_db, key=Contact.id_or_max)\n\n\ndef clear(s):\n return re.sub('[() -]', '', s)\n\n\ndef merge_phones_like_on_home_page(contact):\n return '\\n'.join(filter(lambda x: x != '', map(lambda x: clear(x),\n filter(lambda x: x is not None, [contact.home, contact.mobile,\n contact.work]))))\n\n\ndef merge_email_like_on_home_page(contact):\n return '\\n'.join(filter(lambda x: x != '', map(lambda x: clear(x),\n filter(lambda x: x is not None, [contact.email, contact.email2,\n contact.email3]))))\n",
"<import token>\n\n\ndef test_phones_on_homepage(app):\n contact_from_home_page = app.contact.get_contact_list()[0]\n contact_from_edit_page = app.contact.get_contact_info_from_edit_page(0)\n assert contact_from_home_page.lastname == clear(contact_from_edit_page.\n lastname)\n assert contact_from_home_page.firstname == clear(contact_from_edit_page\n .firstname)\n assert contact_from_home_page.address == contact_from_edit_page.address\n assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(\n contact_from_edit_page)\n assert contact_from_home_page.all_email_from_home_page == merge_email_like_on_home_page(\n contact_from_edit_page)\n\n\ndef test_phones_on_homepage_check_db(app, db):\n contact_from_home_page = app.contact.get_contact_list()\n contact_from_db = db.get_contact_list()\n assert len(contact_from_home_page) == len(contact_from_db)\n assert sorted(contact_from_home_page, key=Contact.id_or_max) == sorted(\n contact_from_db, key=Contact.id_or_max)\n\n\ndef clear(s):\n return re.sub('[() -]', '', s)\n\n\n<function token>\n\n\ndef merge_email_like_on_home_page(contact):\n return '\\n'.join(filter(lambda x: x != '', map(lambda x: clear(x),\n filter(lambda x: x is not None, [contact.email, contact.email2,\n contact.email3]))))\n",
"<import token>\n<function token>\n\n\ndef test_phones_on_homepage_check_db(app, db):\n contact_from_home_page = app.contact.get_contact_list()\n contact_from_db = db.get_contact_list()\n assert len(contact_from_home_page) == len(contact_from_db)\n assert sorted(contact_from_home_page, key=Contact.id_or_max) == sorted(\n contact_from_db, key=Contact.id_or_max)\n\n\ndef clear(s):\n return re.sub('[() -]', '', s)\n\n\n<function token>\n\n\ndef merge_email_like_on_home_page(contact):\n return '\\n'.join(filter(lambda x: x != '', map(lambda x: clear(x),\n filter(lambda x: x is not None, [contact.email, contact.email2,\n contact.email3]))))\n",
"<import token>\n<function token>\n<function token>\n\n\ndef clear(s):\n return re.sub('[() -]', '', s)\n\n\n<function token>\n\n\ndef merge_email_like_on_home_page(contact):\n return '\\n'.join(filter(lambda x: x != '', map(lambda x: clear(x),\n filter(lambda x: x is not None, [contact.email, contact.email2,\n contact.email3]))))\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef merge_email_like_on_home_page(contact):\n return '\\n'.join(filter(lambda x: x != '', map(lambda x: clear(x),\n filter(lambda x: x is not None, [contact.email, contact.email2,\n contact.email3]))))\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
99,399 |
dbea66187cb132299c8a20e98db2dc674d59c3d0
|
# Generated by Django 2.1.5 on 2019-01-10 12:37
import backend.utils
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('posts', '0003_auto_20190110_0604'),
]
operations = [
migrations.AddField(
model_name='post',
name='unique_identifier',
field=models.CharField(default=backend.utils.id_generator, max_length=8),
),
]
|
[
"# Generated by Django 2.1.5 on 2019-01-10 12:37\n\nimport backend.utils\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('posts', '0003_auto_20190110_0604'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='post',\n name='unique_identifier',\n field=models.CharField(default=backend.utils.id_generator, max_length=8),\n ),\n ]\n",
"import backend.utils\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('posts', '0003_auto_20190110_0604')]\n operations = [migrations.AddField(model_name='post', name=\n 'unique_identifier', field=models.CharField(default=backend.utils.\n id_generator, max_length=8))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('posts', '0003_auto_20190110_0604')]\n operations = [migrations.AddField(model_name='post', name=\n 'unique_identifier', field=models.CharField(default=backend.utils.\n id_generator, max_length=8))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.