index
int64 0
100k
| blob_id
stringlengths 40
40
| code
stringlengths 7
7.27M
| steps
listlengths 1
1.25k
| error
bool 2
classes |
---|---|---|---|---|
900 |
502e0f0c6376617dc094fcdd47bea9773d011864
|
def filter_lines(in_filename, in_filename2,out_filename):
"""Read records from in_filename and write records to out_filename if
the beginning of the line (taken up to the first comma at or after
position 11) is found in keys (which must be a set of byte strings).
"""
proper_convert = 0
missing_convert = 0
fourteen_set = set()
with open(in_filename, 'r') as in_f, open(in_filename2, 'r') as in_f2, open(out_filename, 'w') as out_f:
for line in in_f:
vals = line.strip().split(",")
fips = vals[0]
if(fips not in fourteen_set):
fourteen_set.add(fips)
for line in in_f2:
vals = line.strip().split(",")
fips = vals[0]
count = vals[1]
proper_convert += 1
if(fips not in fourteen_set):
new_line = str(fips)+","+str(count)+"\n"
out_f.write(new_line)
missing_convert += 1
return (proper_convert, missing_convert)
in_filename = "/Users/VamsiG/Music/2014_Data/FCC_Final_Output.csv"
in_filename1 = "/Users/VamsiG/Music/2016_Data/FCC_Final_Output.csv"
out_filename= "/Users/VamsiG/Music/FCC_Overlap_CompleteFips.csv"
counter1, new_vals1 = filter_lines(in_filename,in_filename1,out_filename)
print(counter1)
print(new_vals1)
|
[
"def filter_lines(in_filename, in_filename2,out_filename):\n \"\"\"Read records from in_filename and write records to out_filename if\n the beginning of the line (taken up to the first comma at or after\n position 11) is found in keys (which must be a set of byte strings).\n\n \"\"\"\n proper_convert = 0\n missing_convert = 0\n fourteen_set = set()\n with open(in_filename, 'r') as in_f, open(in_filename2, 'r') as in_f2, open(out_filename, 'w') as out_f:\n for line in in_f:\n vals = line.strip().split(\",\")\n fips = vals[0]\n if(fips not in fourteen_set):\n fourteen_set.add(fips)\n \n for line in in_f2:\n vals = line.strip().split(\",\")\n fips = vals[0]\n count = vals[1]\n proper_convert += 1\n if(fips not in fourteen_set):\n new_line = str(fips)+\",\"+str(count)+\"\\n\"\n out_f.write(new_line)\n missing_convert += 1\n\n return (proper_convert, missing_convert)\n\nin_filename = \"/Users/VamsiG/Music/2014_Data/FCC_Final_Output.csv\"\nin_filename1 = \"/Users/VamsiG/Music/2016_Data/FCC_Final_Output.csv\"\nout_filename= \"/Users/VamsiG/Music/FCC_Overlap_CompleteFips.csv\"\n\ncounter1, new_vals1 = filter_lines(in_filename,in_filename1,out_filename)\nprint(counter1)\nprint(new_vals1)",
"def filter_lines(in_filename, in_filename2, out_filename):\n \"\"\"Read records from in_filename and write records to out_filename if\n the beginning of the line (taken up to the first comma at or after\n position 11) is found in keys (which must be a set of byte strings).\n\n \"\"\"\n proper_convert = 0\n missing_convert = 0\n fourteen_set = set()\n with open(in_filename, 'r') as in_f, open(in_filename2, 'r'\n ) as in_f2, open(out_filename, 'w') as out_f:\n for line in in_f:\n vals = line.strip().split(',')\n fips = vals[0]\n if fips not in fourteen_set:\n fourteen_set.add(fips)\n for line in in_f2:\n vals = line.strip().split(',')\n fips = vals[0]\n count = vals[1]\n proper_convert += 1\n if fips not in fourteen_set:\n new_line = str(fips) + ',' + str(count) + '\\n'\n out_f.write(new_line)\n missing_convert += 1\n return proper_convert, missing_convert\n\n\nin_filename = '/Users/VamsiG/Music/2014_Data/FCC_Final_Output.csv'\nin_filename1 = '/Users/VamsiG/Music/2016_Data/FCC_Final_Output.csv'\nout_filename = '/Users/VamsiG/Music/FCC_Overlap_CompleteFips.csv'\ncounter1, new_vals1 = filter_lines(in_filename, in_filename1, out_filename)\nprint(counter1)\nprint(new_vals1)\n",
"def filter_lines(in_filename, in_filename2, out_filename):\n \"\"\"Read records from in_filename and write records to out_filename if\n the beginning of the line (taken up to the first comma at or after\n position 11) is found in keys (which must be a set of byte strings).\n\n \"\"\"\n proper_convert = 0\n missing_convert = 0\n fourteen_set = set()\n with open(in_filename, 'r') as in_f, open(in_filename2, 'r'\n ) as in_f2, open(out_filename, 'w') as out_f:\n for line in in_f:\n vals = line.strip().split(',')\n fips = vals[0]\n if fips not in fourteen_set:\n fourteen_set.add(fips)\n for line in in_f2:\n vals = line.strip().split(',')\n fips = vals[0]\n count = vals[1]\n proper_convert += 1\n if fips not in fourteen_set:\n new_line = str(fips) + ',' + str(count) + '\\n'\n out_f.write(new_line)\n missing_convert += 1\n return proper_convert, missing_convert\n\n\n<assignment token>\nprint(counter1)\nprint(new_vals1)\n",
"def filter_lines(in_filename, in_filename2, out_filename):\n \"\"\"Read records from in_filename and write records to out_filename if\n the beginning of the line (taken up to the first comma at or after\n position 11) is found in keys (which must be a set of byte strings).\n\n \"\"\"\n proper_convert = 0\n missing_convert = 0\n fourteen_set = set()\n with open(in_filename, 'r') as in_f, open(in_filename2, 'r'\n ) as in_f2, open(out_filename, 'w') as out_f:\n for line in in_f:\n vals = line.strip().split(',')\n fips = vals[0]\n if fips not in fourteen_set:\n fourteen_set.add(fips)\n for line in in_f2:\n vals = line.strip().split(',')\n fips = vals[0]\n count = vals[1]\n proper_convert += 1\n if fips not in fourteen_set:\n new_line = str(fips) + ',' + str(count) + '\\n'\n out_f.write(new_line)\n missing_convert += 1\n return proper_convert, missing_convert\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<assignment token>\n<code token>\n"
] | false |
901 |
a17abd3947a946daf2c453c120f2e79d2ba60778
|
# 赛场统分
# 【问题】在编程竞赛中,有10个评委为参赛的选手打分,分数为0 ~ 100分。
# 选手最后得分为:去掉一个最高分和一个最低分后其余8个分数的平均值。请编写一个程序实现。
sc_lst = []
i = 1
while len(sc_lst) < 10:
try:
sc = int(input('请第%d位评委打分:' % i))
if sc > 0 and sc < 101:
sc_lst.append(sc)
i += 1
else:
print('超出范围,输入无效')
except:
print('请输入1-100以内的数字')
max_sc = max(sc_lst)
min_sc = min(sc_lst)
sc_lst.remove(max_sc)
sc_lst.remove(min_sc)
ave_sc = sum(sc_lst) / len(sc_lst)
print('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))
print('end')
|
[
"# 赛场统分\n# 【问题】在编程竞赛中,有10个评委为参赛的选手打分,分数为0 ~ 100分。\n# 选手最后得分为:去掉一个最高分和一个最低分后其余8个分数的平均值。请编写一个程序实现。\n\nsc_lst = []\ni = 1\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\n\nmax_sc = max(sc_lst)\nmin_sc = min(sc_lst)\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\nave_sc = sum(sc_lst) / len(sc_lst)\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"sc_lst = []\ni = 1\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\nmax_sc = max(sc_lst)\nmin_sc = min(sc_lst)\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\nave_sc = sum(sc_lst) / len(sc_lst)\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"<assignment token>\nwhile len(sc_lst) < 10:\n try:\n sc = int(input('请第%d位评委打分:' % i))\n if sc > 0 and sc < 101:\n sc_lst.append(sc)\n i += 1\n else:\n print('超出范围,输入无效')\n except:\n print('请输入1-100以内的数字')\n<assignment token>\nsc_lst.remove(max_sc)\nsc_lst.remove(min_sc)\n<assignment token>\nprint('去除最高分%d,最低分%d,平均分为%d' % (max_sc, min_sc, ave_sc))\nprint('end')\n",
"<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
902 |
04670041dab49f8c2d4a0415030356e7ea92925f
|
from tempfile import mkdtemp
from shutil import rmtree
from os.path import join
import os
MAX_UNCOMPRESSED_SIZE = 100e6 # 100MB
# Extracts a zipfile into a directory safely
class ModelExtractor(object):
def __init__(self, modelzip):
self.modelzip = modelzip
def __enter__(self):
if not self.__is_model_good():
raise ValueError('Invalid model zip file')
obj = self.__get_obj_filename()
if obj is None:
raise ValueError('No obj file present in model zip')
self.path = mkdtemp()
try:
self.modelzip.extractall(self.path)
except:
raise ValueError('Error while extracting zip file')
return {
'path': self.path,
'obj': join(self.path, obj)
}
def __exit__(self, type, value, tb):
rmtree(self.path, ignore_errors=True)
def __is_model_good(self):
total_size_uncompressed = 0
for path in self.modelzip.namelist():
if '..' in path or path.startswith('/'):
return False
info = self.modelzip.getinfo(path)
uncompressed_size = info.file_size
total_size_uncompressed += uncompressed_size
return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE
def __get_obj_filename(self):
for info in self.modelzip.infolist():
if info.filename.endswith('.obj'):
return info.filename
return None
|
[
"from tempfile import mkdtemp\nfrom shutil import rmtree\nfrom os.path import join\nimport os\n\nMAX_UNCOMPRESSED_SIZE = 100e6 # 100MB\n\n# Extracts a zipfile into a directory safely\nclass ModelExtractor(object):\n def __init__(self, modelzip):\n self.modelzip = modelzip \n\n def __enter__(self):\n if not self.__is_model_good():\n raise ValueError('Invalid model zip file')\n\n obj = self.__get_obj_filename()\n if obj is None:\n raise ValueError('No obj file present in model zip')\n\n self.path = mkdtemp()\n\n try:\n self.modelzip.extractall(self.path)\n except:\n raise ValueError('Error while extracting zip file')\n\n return {\n 'path': self.path,\n 'obj': join(self.path, obj)\n }\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n\n def __is_model_good(self):\n total_size_uncompressed = 0\n\n for path in self.modelzip.namelist():\n if '..' in path or path.startswith('/'):\n return False\n\n info = self.modelzip.getinfo(path)\n\n uncompressed_size = info.file_size\n total_size_uncompressed += uncompressed_size\n\n return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n\n return None\n",
"from tempfile import mkdtemp\nfrom shutil import rmtree\nfrom os.path import join\nimport os\nMAX_UNCOMPRESSED_SIZE = 100000000.0\n\n\nclass ModelExtractor(object):\n\n def __init__(self, modelzip):\n self.modelzip = modelzip\n\n def __enter__(self):\n if not self.__is_model_good():\n raise ValueError('Invalid model zip file')\n obj = self.__get_obj_filename()\n if obj is None:\n raise ValueError('No obj file present in model zip')\n self.path = mkdtemp()\n try:\n self.modelzip.extractall(self.path)\n except:\n raise ValueError('Error while extracting zip file')\n return {'path': self.path, 'obj': join(self.path, obj)}\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n\n def __is_model_good(self):\n total_size_uncompressed = 0\n for path in self.modelzip.namelist():\n if '..' in path or path.startswith('/'):\n return False\n info = self.modelzip.getinfo(path)\n uncompressed_size = info.file_size\n total_size_uncompressed += uncompressed_size\n return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n return None\n",
"<import token>\nMAX_UNCOMPRESSED_SIZE = 100000000.0\n\n\nclass ModelExtractor(object):\n\n def __init__(self, modelzip):\n self.modelzip = modelzip\n\n def __enter__(self):\n if not self.__is_model_good():\n raise ValueError('Invalid model zip file')\n obj = self.__get_obj_filename()\n if obj is None:\n raise ValueError('No obj file present in model zip')\n self.path = mkdtemp()\n try:\n self.modelzip.extractall(self.path)\n except:\n raise ValueError('Error while extracting zip file')\n return {'path': self.path, 'obj': join(self.path, obj)}\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n\n def __is_model_good(self):\n total_size_uncompressed = 0\n for path in self.modelzip.namelist():\n if '..' in path or path.startswith('/'):\n return False\n info = self.modelzip.getinfo(path)\n uncompressed_size = info.file_size\n total_size_uncompressed += uncompressed_size\n return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n return None\n",
"<import token>\n<assignment token>\n\n\nclass ModelExtractor(object):\n\n def __init__(self, modelzip):\n self.modelzip = modelzip\n\n def __enter__(self):\n if not self.__is_model_good():\n raise ValueError('Invalid model zip file')\n obj = self.__get_obj_filename()\n if obj is None:\n raise ValueError('No obj file present in model zip')\n self.path = mkdtemp()\n try:\n self.modelzip.extractall(self.path)\n except:\n raise ValueError('Error while extracting zip file')\n return {'path': self.path, 'obj': join(self.path, obj)}\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n\n def __is_model_good(self):\n total_size_uncompressed = 0\n for path in self.modelzip.namelist():\n if '..' in path or path.startswith('/'):\n return False\n info = self.modelzip.getinfo(path)\n uncompressed_size = info.file_size\n total_size_uncompressed += uncompressed_size\n return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n return None\n",
"<import token>\n<assignment token>\n\n\nclass ModelExtractor(object):\n\n def __init__(self, modelzip):\n self.modelzip = modelzip\n <function token>\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n\n def __is_model_good(self):\n total_size_uncompressed = 0\n for path in self.modelzip.namelist():\n if '..' in path or path.startswith('/'):\n return False\n info = self.modelzip.getinfo(path)\n uncompressed_size = info.file_size\n total_size_uncompressed += uncompressed_size\n return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n return None\n",
"<import token>\n<assignment token>\n\n\nclass ModelExtractor(object):\n <function token>\n <function token>\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n\n def __is_model_good(self):\n total_size_uncompressed = 0\n for path in self.modelzip.namelist():\n if '..' in path or path.startswith('/'):\n return False\n info = self.modelzip.getinfo(path)\n uncompressed_size = info.file_size\n total_size_uncompressed += uncompressed_size\n return total_size_uncompressed < MAX_UNCOMPRESSED_SIZE\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n return None\n",
"<import token>\n<assignment token>\n\n\nclass ModelExtractor(object):\n <function token>\n <function token>\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n <function token>\n\n def __get_obj_filename(self):\n for info in self.modelzip.infolist():\n if info.filename.endswith('.obj'):\n return info.filename\n return None\n",
"<import token>\n<assignment token>\n\n\nclass ModelExtractor(object):\n <function token>\n <function token>\n\n def __exit__(self, type, value, tb):\n rmtree(self.path, ignore_errors=True)\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n\n\nclass ModelExtractor(object):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n"
] | false |
903 |
cf3b66a635c6549553af738f263b035217e75a7a
|
count=0
def merge(a, b):
global count
c = []
h = j = 0
while j < len(a) and h < len(b):
if a[j] <= b[h]:
c.append(a[j])
j += 1
else:
count+=(len(a[j:]))
c.append(b[h])
h += 1
if j == len(a):
for i in b[h:]:
c.append(i)
else:
for i in a[j:]:
c.append(i)
# count += h+1
return c
def merge_sort(lists):
if len(lists) <= 1:
return lists
middle = len(lists)//2
left = merge_sort(lists[:middle])
right = merge_sort(lists[middle:])
return merge(left, right)
if __name__ == '__main__':
a = [7, 6, 5,9, 10, 11]
print(merge_sort(a))
print(count)
hash(i)
|
[
"count=0\ndef merge(a, b):\n global count\n c = []\n h = j = 0\n while j < len(a) and h < len(b):\n if a[j] <= b[h]:\n c.append(a[j])\n j += 1\n else:\n count+=(len(a[j:]))\n c.append(b[h])\n h += 1\n\n if j == len(a):\n for i in b[h:]:\n c.append(i)\n else:\n for i in a[j:]:\n c.append(i)\n # count += h+1\n\n return c\n\ndef merge_sort(lists):\n if len(lists) <= 1:\n return lists\n middle = len(lists)//2\n left = merge_sort(lists[:middle])\n right = merge_sort(lists[middle:])\n return merge(left, right)\n\n\nif __name__ == '__main__':\n a = [7, 6, 5,9, 10, 11]\n print(merge_sort(a))\n print(count)\n hash(i)",
"count = 0\n\n\ndef merge(a, b):\n global count\n c = []\n h = j = 0\n while j < len(a) and h < len(b):\n if a[j] <= b[h]:\n c.append(a[j])\n j += 1\n else:\n count += len(a[j:])\n c.append(b[h])\n h += 1\n if j == len(a):\n for i in b[h:]:\n c.append(i)\n else:\n for i in a[j:]:\n c.append(i)\n return c\n\n\ndef merge_sort(lists):\n if len(lists) <= 1:\n return lists\n middle = len(lists) // 2\n left = merge_sort(lists[:middle])\n right = merge_sort(lists[middle:])\n return merge(left, right)\n\n\nif __name__ == '__main__':\n a = [7, 6, 5, 9, 10, 11]\n print(merge_sort(a))\n print(count)\n hash(i)\n",
"<assignment token>\n\n\ndef merge(a, b):\n global count\n c = []\n h = j = 0\n while j < len(a) and h < len(b):\n if a[j] <= b[h]:\n c.append(a[j])\n j += 1\n else:\n count += len(a[j:])\n c.append(b[h])\n h += 1\n if j == len(a):\n for i in b[h:]:\n c.append(i)\n else:\n for i in a[j:]:\n c.append(i)\n return c\n\n\ndef merge_sort(lists):\n if len(lists) <= 1:\n return lists\n middle = len(lists) // 2\n left = merge_sort(lists[:middle])\n right = merge_sort(lists[middle:])\n return merge(left, right)\n\n\nif __name__ == '__main__':\n a = [7, 6, 5, 9, 10, 11]\n print(merge_sort(a))\n print(count)\n hash(i)\n",
"<assignment token>\n\n\ndef merge(a, b):\n global count\n c = []\n h = j = 0\n while j < len(a) and h < len(b):\n if a[j] <= b[h]:\n c.append(a[j])\n j += 1\n else:\n count += len(a[j:])\n c.append(b[h])\n h += 1\n if j == len(a):\n for i in b[h:]:\n c.append(i)\n else:\n for i in a[j:]:\n c.append(i)\n return c\n\n\ndef merge_sort(lists):\n if len(lists) <= 1:\n return lists\n middle = len(lists) // 2\n left = merge_sort(lists[:middle])\n right = merge_sort(lists[middle:])\n return merge(left, right)\n\n\n<code token>\n",
"<assignment token>\n\n\ndef merge(a, b):\n global count\n c = []\n h = j = 0\n while j < len(a) and h < len(b):\n if a[j] <= b[h]:\n c.append(a[j])\n j += 1\n else:\n count += len(a[j:])\n c.append(b[h])\n h += 1\n if j == len(a):\n for i in b[h:]:\n c.append(i)\n else:\n for i in a[j:]:\n c.append(i)\n return c\n\n\n<function token>\n<code token>\n",
"<assignment token>\n<function token>\n<function token>\n<code token>\n"
] | false |
904 |
d2298ad1e4737b983ba6d1f2fff59750137510b5
|
import json
import os
import uuid
from django.core.files.uploadedfile import SimpleUploadedFile
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from nautobot.dcim.models import Site
from nautobot.extras.choices import JobResultStatusChoices
from nautobot.extras.jobs import get_job, run_job
from nautobot.extras.models import FileAttachment, FileProxy, JobResult
from nautobot.utilities.testing import TestCase
class JobTest(TestCase):
"""
Test basic jobs to ensure importing works.
"""
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.job_content_type = ContentType.objects.get(app_label="extras", model="job")
def test_job_pass(self):
"""
Job test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_pass"
name = "TestPass"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
def test_job_fail(self):
"""
Job test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_fail"
name = "TestFail"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)
def test_field_order(self):
"""
Job test with field order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_field_order"
name = "TestFieldOrder"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>""",
)
def test_no_field_order(self):
"""
Job test without field_order.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_no_field_order"
name = "TestNoFieldOrder"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var23">Var23:</label></th><td>
<input class="form-control form-control" id="id_var23" name="var23" placeholder="None" required type="text">
<br><span class="helptext">I want to be second</span></td></tr>
<tr><th><label for="id_var2">Var2:</label></th><td>
<input class="form-control form-control" id="id_var2" name="var2" placeholder="None" required type="text">
<br><span class="helptext">Hello</span></td></tr>
<tr><th><label for="id__commit">Commit changes:</label></th><td>
<input checked id="id__commit" name="_commit" placeholder="Commit changes" type="checkbox">
<br><span class="helptext">Commit changes to the database (uncheck for a dry-run)</span></td></tr>""",
)
def test_ready_only_job_pass(self):
"""
Job read only test with pass result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_pass"
name = "TestReadOnlyPass"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted
def test_read_only_job_fail(self):
"""
Job read only test with fail result.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_fail"
name = "TestReadOnlyFail"
job_class = get_job(f"local/{module}/{name}")
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)
self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted
# Also ensure the standard log message about aborting the transaction is *not* present
self.assertNotEqual(
job_result.data["run"]["log"][-1][-1], "Database changes have been reverted due to error."
)
def test_read_only_no_commit_field(self):
"""
Job read only test commit field is not shown.
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_read_only_no_commit_field"
name = "TestReadOnlyNoCommitField"
job_class = get_job(f"local/{module}/{name}")
form = job_class().as_form()
self.assertHTMLEqual(
form.as_table(),
"""<tr><th><label for="id_var">Var:</label></th><td>
<input class="form-control form-control" id="id_var" name="var" placeholder="None" required type="text">
<br><span class="helptext">Hello</span><input id="id__commit" name="_commit" type="hidden" value="False"></td></tr>""",
)
def test_ip_address_vars(self):
"""
Test that IPAddress variable fields behave as expected.
This test case exercises the following types for both IPv4 and IPv6:
- IPAddressVar
- IPAddressWithMaskVar
- IPNetworkVar
"""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
module = "test_ipaddress_vars"
name = "TestIPAddresses"
job_class = get_job(f"local/{module}/{name}")
# Fill out the form
form_data = dict(
ipv4_address="1.2.3.4",
ipv4_with_mask="1.2.3.4/32",
ipv4_network="1.2.3.0/24",
ipv6_address="2001:db8::1",
ipv6_with_mask="2001:db8::1/64",
ipv6_network="2001:db8::/64",
)
form = job_class().as_form(form_data)
self.assertTrue(form.is_valid())
# Prepare the job data
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
data = job_class.serialize_data(form.cleaned_data)
# Run the job and extract the job payload data
run_job(data=data, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
job_payload = job_result.data["run"]["log"][0][2] # Indexing makes me sad.
job_result_data = json.loads(job_payload)
# Assert stuff
self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)
self.assertEqual(form_data, job_result_data)
class JobFileUploadTest(TestCase):
"""Test a job that uploads/deletes files."""
@classmethod
def setUpTestData(cls):
cls.file_contents = b"I am content.\n"
cls.dummy_file = SimpleUploadedFile(name="dummy.txt", content=cls.file_contents)
cls.job_content_type = ContentType.objects.get(app_label="extras", model="job")
def setUp(self):
self.dummy_file.seek(0) # Reset cursor so we can read it again.
def test_run_job_pass(self):
"""Test that file upload succeeds; job SUCCEEDS; and files are deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
job_name = "local/test_file_upload_pass/TestFileUploadPass"
job_class = get_job(job_name)
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
# Serialize the file to FileProxy
data = {"file": self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
# Assert that the file was serialized to a FileProxy
self.assertTrue(isinstance(serialized_data["file"], uuid.UUID))
self.assertEqual(serialized_data["file"], FileProxy.objects.latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
# Run the job
run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
# Assert that file contents were correctly read
self.assertEqual(
job_result.data["run"]["log"][0][2], f"File contents: {self.file_contents}" # "File contents: ..."
)
# Assert that FileProxy was cleaned up
self.assertEqual(FileProxy.objects.count(), 0)
def test_run_job_fail(self):
"""Test that file upload succeeds; job FAILS; files deleted."""
with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, "extras/tests/dummy_jobs")):
job_name = "local/test_file_upload_fail/TestFileUploadFail"
job_class = get_job(job_name)
job_result = JobResult.objects.create(
name=job_class.class_path,
obj_type=self.job_content_type,
user=None,
job_id=uuid.uuid4(),
)
# Serialize the file to FileProxy
data = {"file": self.dummy_file}
form = job_class().as_form(files=data)
self.assertTrue(form.is_valid())
serialized_data = job_class.serialize_data(form.cleaned_data)
# Assert that the file was serialized to a FileProxy
self.assertTrue(isinstance(serialized_data["file"], uuid.UUID))
self.assertEqual(serialized_data["file"], FileProxy.objects.latest().pk)
self.assertEqual(FileProxy.objects.count(), 1)
# Run the job
run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)
job_result.refresh_from_db()
# Assert that file contents were correctly read
self.assertEqual(
job_result.data["run"]["log"][0][2], f"File contents: {self.file_contents}" # "File contents: ..."
)
# Also ensure the standard log message about aborting the transaction is present
self.assertEqual(job_result.data["run"]["log"][-1][-1], "Database changes have been reverted due to error.")
# Assert that FileProxy was cleaned up
self.assertEqual(FileProxy.objects.count(), 0)
|
[
"import json\nimport os\nimport uuid\n\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom nautobot.dcim.models import Site\nfrom nautobot.extras.choices import JobResultStatusChoices\nfrom nautobot.extras.jobs import get_job, run_job\nfrom nautobot.extras.models import FileAttachment, FileProxy, JobResult\nfrom nautobot.utilities.testing import TestCase\n\n\nclass JobTest(TestCase):\n \"\"\"\n Test basic jobs to ensure importing works.\n \"\"\"\n\n maxDiff = None\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label=\"extras\", model=\"job\")\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_pass\"\n name = \"TestPass\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_fail\"\n name = \"TestFail\"\n job_class = get_job(f\"local/{module}/{name}\")\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_field_order\"\n name = \"TestFieldOrder\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n form = job_class().as_form()\n\n self.assertHTMLEqual(\n form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\",\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_no_field_order\"\n name = \"TestNoFieldOrder\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n form = job_class().as_form()\n\n self.assertHTMLEqual(\n form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\",\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_read_only_pass\"\n name = \"TestReadOnlyPass\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_read_only_fail\"\n name = \"TestReadOnlyFail\"\n job_class = get_job(f\"local/{module}/{name}\")\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n run_job(data={}, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0) # Ensure DB transaction was aborted\n # Also ensure the standard log message about aborting the transaction is *not* present\n self.assertNotEqual(\n job_result.data[\"run\"][\"log\"][-1][-1], \"Database changes have been reverted due to error.\"\n )\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_read_only_no_commit_field\"\n name = \"TestReadOnlyNoCommitField\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n form = job_class().as_form()\n\n self.assertHTMLEqual(\n form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\",\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n\n module = \"test_ipaddress_vars\"\n name = \"TestIPAddresses\"\n job_class = get_job(f\"local/{module}/{name}\")\n\n # Fill out the form\n form_data = dict(\n ipv4_address=\"1.2.3.4\",\n ipv4_with_mask=\"1.2.3.4/32\",\n ipv4_network=\"1.2.3.0/24\",\n ipv6_address=\"2001:db8::1\",\n ipv6_with_mask=\"2001:db8::1/64\",\n ipv6_network=\"2001:db8::/64\",\n )\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n\n # Prepare the job data\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n data = job_class.serialize_data(form.cleaned_data)\n\n # Run the job and extract the job payload data\n run_job(data=data, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data[\"run\"][\"log\"][0][2] # Indexing makes me sad.\n job_result_data = json.loads(job_payload)\n\n # Assert stuff\n self.assertEqual(job_result.status, JobResultStatusChoices.STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b\"I am content.\\n\"\n cls.dummy_file = SimpleUploadedFile(name=\"dummy.txt\", content=cls.file_contents)\n cls.job_content_type = ContentType.objects.get(app_label=\"extras\", model=\"job\")\n\n def setUp(self):\n self.dummy_file.seek(0) # Reset cursor so we can read it again.\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n job_name = \"local/test_file_upload_pass/TestFileUploadPass\"\n job_class = get_job(job_name)\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n # Serialize the file to FileProxy\n data = {\"file\": self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n\n # Assert that the file was serialized to a FileProxy\n self.assertTrue(isinstance(serialized_data[\"file\"], uuid.UUID))\n self.assertEqual(serialized_data[\"file\"], FileProxy.objects.latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n\n # Run the job\n run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n\n # Assert that file contents were correctly read\n self.assertEqual(\n job_result.data[\"run\"][\"log\"][0][2], f\"File contents: {self.file_contents}\" # \"File contents: ...\"\n )\n\n # Assert that FileProxy was cleaned up\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR, \"extras/tests/dummy_jobs\")):\n job_name = \"local/test_file_upload_fail/TestFileUploadFail\"\n job_class = get_job(job_name)\n\n job_result = JobResult.objects.create(\n name=job_class.class_path,\n obj_type=self.job_content_type,\n user=None,\n job_id=uuid.uuid4(),\n )\n\n # Serialize the file to FileProxy\n data = {\"file\": self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n\n # Assert that the file was serialized to a FileProxy\n self.assertTrue(isinstance(serialized_data[\"file\"], uuid.UUID))\n self.assertEqual(serialized_data[\"file\"], FileProxy.objects.latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n\n # Run the job\n run_job(data=serialized_data, request=None, commit=False, job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n\n # Assert that file contents were correctly read\n self.assertEqual(\n job_result.data[\"run\"][\"log\"][0][2], f\"File contents: {self.file_contents}\" # \"File contents: ...\"\n )\n # Also ensure the standard log message about aborting the transaction is present\n self.assertEqual(job_result.data[\"run\"][\"log\"][-1][-1], \"Database changes have been reverted due to error.\")\n\n # Assert that FileProxy was cleaned up\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"import json\nimport os\nimport uuid\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom nautobot.dcim.models import Site\nfrom nautobot.extras.choices import JobResultStatusChoices\nfrom nautobot.extras.jobs import get_job, run_job\nfrom nautobot.extras.models import FileAttachment, FileProxy, JobResult\nfrom nautobot.utilities.testing import TestCase\n\n\nclass JobTest(TestCase):\n \"\"\"\n Test basic jobs to ensure importing works.\n \"\"\"\n maxDiff = None\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_pass'\n name = 'TestPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_ipaddress_vars'\n name = 'TestIPAddresses'\n job_class = get_job(f'local/{module}/{name}')\n form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=\n '1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=\n '2001:db8::1', ipv6_with_mask='2001:db8::1/64',\n ipv6_network='2001:db8::/64')\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = job_class.serialize_data(form.cleaned_data)\n run_job(data=data, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data['run']['log'][0][2]\n job_result_data = json.loads(job_payload)\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n \"\"\"\n Test basic jobs to ensure importing works.\n \"\"\"\n maxDiff = None\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_pass'\n name = 'TestPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_ipaddress_vars'\n name = 'TestIPAddresses'\n job_class = get_job(f'local/{module}/{name}')\n form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=\n '1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=\n '2001:db8::1', ipv6_with_mask='2001:db8::1/64',\n ipv6_network='2001:db8::/64')\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = job_class.serialize_data(form.cleaned_data)\n run_job(data=data, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data['run']['log'][0][2]\n job_result_data = json.loads(job_payload)\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n <docstring token>\n maxDiff = None\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_pass'\n name = 'TestPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_ipaddress_vars'\n name = 'TestIPAddresses'\n job_class = get_job(f'local/{module}/{name}')\n form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=\n '1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=\n '2001:db8::1', ipv6_with_mask='2001:db8::1/64',\n ipv6_network='2001:db8::/64')\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = job_class.serialize_data(form.cleaned_data)\n run_job(data=data, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data['run']['log'][0][2]\n job_result_data = json.loads(job_payload)\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n <docstring token>\n <assignment token>\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def test_job_pass(self):\n \"\"\"\n Job test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_pass'\n name = 'TestPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_ipaddress_vars'\n name = 'TestIPAddresses'\n job_class = get_job(f'local/{module}/{name}')\n form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=\n '1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=\n '2001:db8::1', ipv6_with_mask='2001:db8::1/64',\n ipv6_network='2001:db8::/64')\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = job_class.serialize_data(form.cleaned_data)\n run_job(data=data, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data['run']['log'][0][2]\n job_result_data = json.loads(job_payload)\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n <docstring token>\n <assignment token>\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n <function token>\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n\n def test_ip_address_vars(self):\n \"\"\"\n Test that IPAddress variable fields behave as expected.\n\n This test case exercises the following types for both IPv4 and IPv6:\n\n - IPAddressVar\n - IPAddressWithMaskVar\n - IPNetworkVar\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_ipaddress_vars'\n name = 'TestIPAddresses'\n job_class = get_job(f'local/{module}/{name}')\n form_data = dict(ipv4_address='1.2.3.4', ipv4_with_mask=\n '1.2.3.4/32', ipv4_network='1.2.3.0/24', ipv6_address=\n '2001:db8::1', ipv6_with_mask='2001:db8::1/64',\n ipv6_network='2001:db8::/64')\n form = job_class().as_form(form_data)\n self.assertTrue(form.is_valid())\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = job_class.serialize_data(form.cleaned_data)\n run_job(data=data, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n job_payload = job_result.data['run']['log'][0][2]\n job_result_data = json.loads(job_payload)\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(form_data, job_result_data)\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n <docstring token>\n <assignment token>\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n <function token>\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_ready_only_job_pass(self):\n \"\"\"\n Job read only test with pass result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_pass'\n name = 'TestReadOnlyPass'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_COMPLETED)\n self.assertEqual(Site.objects.count(), 0)\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n <function token>\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n <docstring token>\n <assignment token>\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n <function token>\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n <function token>\n\n def test_read_only_job_fail(self):\n \"\"\"\n Job read only test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_fail'\n name = 'TestReadOnlyFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n self.assertEqual(Site.objects.count(), 0)\n self.assertNotEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n <function token>\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n <docstring token>\n <assignment token>\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n <function token>\n\n def test_job_fail(self):\n \"\"\"\n Job test with fail result.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_fail'\n name = 'TestFail'\n job_class = get_job(f'local/{module}/{name}')\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n run_job(data={}, request=None, commit=False, job_result_pk=\n job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.status, JobResultStatusChoices.\n STATUS_ERRORED)\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n <function token>\n <function token>\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n <function token>\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n <docstring token>\n <assignment token>\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n <function token>\n <function token>\n\n def test_field_order(self):\n \"\"\"\n Job test with field order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_field_order'\n name = 'TestFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n <function token>\n <function token>\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n <function token>\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n <docstring token>\n <assignment token>\n\n @classmethod\n def setUpTestData(cls):\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n <function token>\n <function token>\n <function token>\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n <function token>\n <function token>\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n <function token>\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n <function token>\n <function token>\n\n def test_read_only_no_commit_field(self):\n \"\"\"\n Job read only test commit field is not shown.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_read_only_no_commit_field'\n name = 'TestReadOnlyNoCommitField'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var\">Var:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var\" name=\"var\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span><input id=\"id__commit\" name=\"_commit\" type=\"hidden\" value=\"False\"></td></tr>\"\"\"\n )\n <function token>\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_no_field_order(self):\n \"\"\"\n Job test without field_order.\n \"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n module = 'test_no_field_order'\n name = 'TestNoFieldOrder'\n job_class = get_job(f'local/{module}/{name}')\n form = job_class().as_form()\n self.assertHTMLEqual(form.as_table(),\n \"\"\"<tr><th><label for=\"id_var23\">Var23:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var23\" name=\"var23\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">I want to be second</span></td></tr>\n<tr><th><label for=\"id_var2\">Var2:</label></th><td>\n<input class=\"form-control form-control\" id=\"id_var2\" name=\"var2\" placeholder=\"None\" required type=\"text\">\n<br><span class=\"helptext\">Hello</span></td></tr>\n<tr><th><label for=\"id__commit\">Commit changes:</label></th><td>\n<input checked id=\"id__commit\" name=\"_commit\" placeholder=\"Commit changes\" type=\"checkbox\">\n<br><span class=\"helptext\">Commit changes to the database (uncheck for a dry-run)</span></td></tr>\"\"\"\n )\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n\n\nclass JobTest(TestCase):\n <docstring token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n<class token>\n\n\nclass JobFileUploadTest(TestCase):\n \"\"\"Test a job that uploads/deletes files.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n<class token>\n\n\nclass JobFileUploadTest(TestCase):\n <docstring token>\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n\n def test_run_job_pass(self):\n \"\"\"Test that file upload succeeds; job SUCCEEDS; and files are deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_pass/TestFileUploadPass'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(FileProxy.objects.count(), 0)\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n<class token>\n\n\nclass JobFileUploadTest(TestCase):\n <docstring token>\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n <function token>\n\n def test_run_job_fail(self):\n \"\"\"Test that file upload succeeds; job FAILS; files deleted.\"\"\"\n with self.settings(JOBS_ROOT=os.path.join(settings.BASE_DIR,\n 'extras/tests/dummy_jobs')):\n job_name = 'local/test_file_upload_fail/TestFileUploadFail'\n job_class = get_job(job_name)\n job_result = JobResult.objects.create(name=job_class.class_path,\n obj_type=self.job_content_type, user=None, job_id=uuid.uuid4())\n data = {'file': self.dummy_file}\n form = job_class().as_form(files=data)\n self.assertTrue(form.is_valid())\n serialized_data = job_class.serialize_data(form.cleaned_data)\n self.assertTrue(isinstance(serialized_data['file'], uuid.UUID))\n self.assertEqual(serialized_data['file'], FileProxy.objects.\n latest().pk)\n self.assertEqual(FileProxy.objects.count(), 1)\n run_job(data=serialized_data, request=None, commit=False,\n job_result_pk=job_result.pk)\n job_result.refresh_from_db()\n self.assertEqual(job_result.data['run']['log'][0][2],\n f'File contents: {self.file_contents}')\n self.assertEqual(job_result.data['run']['log'][-1][-1],\n 'Database changes have been reverted due to error.')\n self.assertEqual(FileProxy.objects.count(), 0)\n",
"<import token>\n<class token>\n\n\nclass JobFileUploadTest(TestCase):\n <docstring token>\n\n @classmethod\n def setUpTestData(cls):\n cls.file_contents = b'I am content.\\n'\n cls.dummy_file = SimpleUploadedFile(name='dummy.txt', content=cls.\n file_contents)\n cls.job_content_type = ContentType.objects.get(app_label='extras',\n model='job')\n\n def setUp(self):\n self.dummy_file.seek(0)\n <function token>\n <function token>\n",
"<import token>\n<class token>\n\n\nclass JobFileUploadTest(TestCase):\n <docstring token>\n <function token>\n\n def setUp(self):\n self.dummy_file.seek(0)\n <function token>\n <function token>\n",
"<import token>\n<class token>\n\n\nclass JobFileUploadTest(TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
905 |
48f2cc5b6d53c7317ad882947cabbc367cda0fb7
|
import random
import numpy as np
import pandas as pd
def linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0, parameter_std=1, error_mean=0, error_std=1):
"""
Generate a column that is a random linear combination of
X1, X2 and X3 plus some random error
"""
length = X.shape[0]
param = np.random.normal(loc=parameter_mean,
scale=parameter_std,
size=(num_dependent_cols,))
error = np.random.normal(loc=error_mean,
scale=error_std,
size=(length,))
result = np.zeros(length,)
for i in range(num_dependent_cols):
result += param[i] * X[:, i]
return result + error
np.random.seed(472)
num_data = 10100
num_independent_cols = 3
X = np.zeros((num_data, 1001))
# Generate 3 principal components
for i in range(num_independent_cols):
X[:, i] = np.random.normal(np.random.uniform(-5, 5),
np.random.uniform(1, 5), size=(num_data,))
# Generate other columns
for i in range(3, 1000):
X[:, i] = linear_combination_plus_error(X, num_dependent_cols=num_independent_cols, parameter_std=2, error_std=1)
# Randomly suffle the 1000 feature columns
col_nums = list(range(1000))
np.random.shuffle(col_nums)
X[:, list(range(1000))] = X[:, col_nums]
# Randomly generate Y
X[:, 1000] = linear_combination_plus_error(X, num_dependent_cols=num_independent_cols, parameter_mean=5, parameter_std=2)
X[:, 1000] += abs(min(X[:, 1000])) + 5
# Take only three digits after decimal point
X = np.floor(X * 1000) / 1000
# Split the data into 2 files
X1 = X[:10000, :]
X2 = X[10000:, :]
X1_df = pd.DataFrame(X1)
X1_df.to_csv("./sensors1.csv", header=None, index=None)
X2_df = pd.DataFrame(X2)
X2_df.to_csv("./sensors2.csv", header=None, index=None)
|
[
"import random\nimport numpy as np\nimport pandas as pd\n\ndef linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0, parameter_std=1, error_mean=0, error_std=1):\n \"\"\"\n Generate a column that is a random linear combination of\n X1, X2 and X3 plus some random error\n \"\"\"\n length = X.shape[0]\n param = np.random.normal(loc=parameter_mean,\n scale=parameter_std,\n size=(num_dependent_cols,))\n error = np.random.normal(loc=error_mean,\n scale=error_std,\n size=(length,))\n result = np.zeros(length,)\n for i in range(num_dependent_cols):\n result += param[i] * X[:, i]\n return result + error\n \n\nnp.random.seed(472)\nnum_data = 10100\nnum_independent_cols = 3\n\nX = np.zeros((num_data, 1001))\n\n# Generate 3 principal components\nfor i in range(num_independent_cols):\n X[:, i] = np.random.normal(np.random.uniform(-5, 5), \n np.random.uniform(1, 5), size=(num_data,))\n\n\n# Generate other columns\nfor i in range(3, 1000):\n X[:, i] = linear_combination_plus_error(X, num_dependent_cols=num_independent_cols, parameter_std=2, error_std=1)\n\n# Randomly suffle the 1000 feature columns\ncol_nums = list(range(1000))\nnp.random.shuffle(col_nums)\nX[:, list(range(1000))] = X[:, col_nums]\n\n# Randomly generate Y\nX[:, 1000] = linear_combination_plus_error(X, num_dependent_cols=num_independent_cols, parameter_mean=5, parameter_std=2)\nX[:, 1000] += abs(min(X[:, 1000])) + 5\n\n\n# Take only three digits after decimal point\nX = np.floor(X * 1000) / 1000\n\n\n# Split the data into 2 files\nX1 = X[:10000, :]\nX2 = X[10000:, :]\nX1_df = pd.DataFrame(X1)\nX1_df.to_csv(\"./sensors1.csv\", header=None, index=None)\n\nX2_df = pd.DataFrame(X2)\nX2_df.to_csv(\"./sensors2.csv\", header=None, index=None)\n\n\n\n",
"import random\nimport numpy as np\nimport pandas as pd\n\n\ndef linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,\n parameter_std=1, error_mean=0, error_std=1):\n \"\"\"\n Generate a column that is a random linear combination of\n X1, X2 and X3 plus some random error\n \"\"\"\n length = X.shape[0]\n param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=\n (num_dependent_cols,))\n error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))\n result = np.zeros(length)\n for i in range(num_dependent_cols):\n result += param[i] * X[:, i]\n return result + error\n\n\nnp.random.seed(472)\nnum_data = 10100\nnum_independent_cols = 3\nX = np.zeros((num_data, 1001))\nfor i in range(num_independent_cols):\n X[:, i] = np.random.normal(np.random.uniform(-5, 5), np.random.uniform(\n 1, 5), size=(num_data,))\nfor i in range(3, 1000):\n X[:, i] = linear_combination_plus_error(X, num_dependent_cols=\n num_independent_cols, parameter_std=2, error_std=1)\ncol_nums = list(range(1000))\nnp.random.shuffle(col_nums)\nX[:, list(range(1000))] = X[:, col_nums]\nX[:, 1000] = linear_combination_plus_error(X, num_dependent_cols=\n num_independent_cols, parameter_mean=5, parameter_std=2)\nX[:, 1000] += abs(min(X[:, 1000])) + 5\nX = np.floor(X * 1000) / 1000\nX1 = X[:10000, :]\nX2 = X[10000:, :]\nX1_df = pd.DataFrame(X1)\nX1_df.to_csv('./sensors1.csv', header=None, index=None)\nX2_df = pd.DataFrame(X2)\nX2_df.to_csv('./sensors2.csv', header=None, index=None)\n",
"<import token>\n\n\ndef linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,\n parameter_std=1, error_mean=0, error_std=1):\n \"\"\"\n Generate a column that is a random linear combination of\n X1, X2 and X3 plus some random error\n \"\"\"\n length = X.shape[0]\n param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=\n (num_dependent_cols,))\n error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))\n result = np.zeros(length)\n for i in range(num_dependent_cols):\n result += param[i] * X[:, i]\n return result + error\n\n\nnp.random.seed(472)\nnum_data = 10100\nnum_independent_cols = 3\nX = np.zeros((num_data, 1001))\nfor i in range(num_independent_cols):\n X[:, i] = np.random.normal(np.random.uniform(-5, 5), np.random.uniform(\n 1, 5), size=(num_data,))\nfor i in range(3, 1000):\n X[:, i] = linear_combination_plus_error(X, num_dependent_cols=\n num_independent_cols, parameter_std=2, error_std=1)\ncol_nums = list(range(1000))\nnp.random.shuffle(col_nums)\nX[:, list(range(1000))] = X[:, col_nums]\nX[:, 1000] = linear_combination_plus_error(X, num_dependent_cols=\n num_independent_cols, parameter_mean=5, parameter_std=2)\nX[:, 1000] += abs(min(X[:, 1000])) + 5\nX = np.floor(X * 1000) / 1000\nX1 = X[:10000, :]\nX2 = X[10000:, :]\nX1_df = pd.DataFrame(X1)\nX1_df.to_csv('./sensors1.csv', header=None, index=None)\nX2_df = pd.DataFrame(X2)\nX2_df.to_csv('./sensors2.csv', header=None, index=None)\n",
"<import token>\n\n\ndef linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,\n parameter_std=1, error_mean=0, error_std=1):\n \"\"\"\n Generate a column that is a random linear combination of\n X1, X2 and X3 plus some random error\n \"\"\"\n length = X.shape[0]\n param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=\n (num_dependent_cols,))\n error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))\n result = np.zeros(length)\n for i in range(num_dependent_cols):\n result += param[i] * X[:, i]\n return result + error\n\n\nnp.random.seed(472)\n<assignment token>\nfor i in range(num_independent_cols):\n X[:, i] = np.random.normal(np.random.uniform(-5, 5), np.random.uniform(\n 1, 5), size=(num_data,))\nfor i in range(3, 1000):\n X[:, i] = linear_combination_plus_error(X, num_dependent_cols=\n num_independent_cols, parameter_std=2, error_std=1)\n<assignment token>\nnp.random.shuffle(col_nums)\n<assignment token>\nX[:, 1000] += abs(min(X[:, 1000])) + 5\n<assignment token>\nX1_df.to_csv('./sensors1.csv', header=None, index=None)\n<assignment token>\nX2_df.to_csv('./sensors2.csv', header=None, index=None)\n",
"<import token>\n\n\ndef linear_combination_plus_error(X, num_dependent_cols=5, parameter_mean=0,\n parameter_std=1, error_mean=0, error_std=1):\n \"\"\"\n Generate a column that is a random linear combination of\n X1, X2 and X3 plus some random error\n \"\"\"\n length = X.shape[0]\n param = np.random.normal(loc=parameter_mean, scale=parameter_std, size=\n (num_dependent_cols,))\n error = np.random.normal(loc=error_mean, scale=error_std, size=(length,))\n result = np.zeros(length)\n for i in range(num_dependent_cols):\n result += param[i] * X[:, i]\n return result + error\n\n\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
906 |
6ae529a5e5658ba409ec3e7284d8b2911c60dd00
|
import os
from linkedin_scraper import get_jobs
chrome_driver_path = os.path.join(os.path.abspath(os.getcwd()), 'chromedriver')
df = get_jobs('Data Scientist', 40, False, chrome_driver_path)
df.to_csv('linkedin_jobs.csv', index= False)
|
[
"import os\nfrom linkedin_scraper import get_jobs\n\nchrome_driver_path = os.path.join(os.path.abspath(os.getcwd()), 'chromedriver')\n\ndf = get_jobs('Data Scientist', 40, False, chrome_driver_path)\n\ndf.to_csv('linkedin_jobs.csv', index= False)\n\n\n",
"import os\nfrom linkedin_scraper import get_jobs\nchrome_driver_path = os.path.join(os.path.abspath(os.getcwd()), 'chromedriver')\ndf = get_jobs('Data Scientist', 40, False, chrome_driver_path)\ndf.to_csv('linkedin_jobs.csv', index=False)\n",
"<import token>\nchrome_driver_path = os.path.join(os.path.abspath(os.getcwd()), 'chromedriver')\ndf = get_jobs('Data Scientist', 40, False, chrome_driver_path)\ndf.to_csv('linkedin_jobs.csv', index=False)\n",
"<import token>\n<assignment token>\ndf.to_csv('linkedin_jobs.csv', index=False)\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
907 |
d268f8d563aac28852457f6f130b2fb4ea6269a2
|
import nltk
from nltk import bigrams
from lm import *
# Oppgave 1:
# opretter LM klasse til aa perpleksitere news og adventure
m = LM()
# Henter news og adventure for videre bruk
news=nltk.corpus.brown.sents(categories='news')
adventure=nltk.corpus.brown.sents(categories='adventure')
# initial parametre
perpNews = 0.0
perpAdventure = 0.0
# beregner perplexitet:
perpNews = m.perplexity(news)
perpAdventure = m.perplexity(adventure)
# printer ut perplexitet.
print("Perpleksitet til news: %.2f" %perpNews)
print("Perpleksitet til adventure: %.2f" %perpAdventure)
""" Oppgave 1 - evaluering av spraakmodeller
$ python oblig2b_steinrr.py
Perpleksitet til news: 72.69
Perpleksitet til adventure: 117.41
Perpleksiteten tiil adventure er hoeyeere fordi klassifikatoren vi benytter i LM er ikke trent paa dette korpuset.
Perpleksiteten til news ville ha veart lavere hvis klassifikatoren vi benytter hadde bare veart trent paa news.
Men dette er ikke bra pga da ville perpleksiteten til adventure veare enda hoyere enn den er naa.
"""
zippy = m.zipfity(news)
for sekvens in zippy:
print("Ord: %4s Antall: %4d Sekvens: %.4f " %(sekvens[0], sekvens[1], sekvens[2]))
""" Oppgave 2 - Zipfianske distribusjon
Ord: the Antall: 6386 Sekvens: 6386.0000
Ord: , Antall: 5188 Sekvens: 2594.0000
Ord: . Antall: 4030 Sekvens: 1343.3333
Ord: of Antall: 2861 Sekvens: 715.2500
Ord: and Antall: 2186 Sekvens: 437.2000
Ord: to Antall: 2144 Sekvens: 357.3333
Ord: a Antall: 2130 Sekvens: 304.2857
Ord: in Antall: 2020 Sekvens: 252.5000
Ord: for Antall: 969 Sekvens: 107.6667
Ord: that Antall: 829 Sekvens: 82.9000
"""
brown_tagged_sents = nltk.corpus.brown.tagged_sents(categories='adventure')
adventure = [[w.lower() for w in line] for line in nltk.corpus.brown.sents(categories='adventure')]
#m.regularTagger(adventure)
checkTaggStandardAdv = m.analyseRegularTagger('adventure')
checkTaggStandardFic = m.analyseRegularTagger('fiction')
checkTaggModifiedAdv = m.analyseRegularTagger('adventure', 'modified')
checkTaggModifiedFic = m.analyseRegularTagger('fiction', 'modified')
print("Standard vs modifisert tagging ved hjelp av reguleart uttrykk")
print("Med corpus: 'adventure'")
print(" Standard: %4.2f Modifisert: %4.2f " %(checkTaggStandardFic, checkTaggModifiedAdv))
print("Med corpus: 'fiction'")
print(" Standard: %4.2f Modifisert: %4.2f " %(checkTaggStandardFic, checkTaggModifiedFic))
infile = open("test_setninger.txt")
tekst = []
for line in infile:
words = line.split(" ")
tekst.append(words)
infile.close()
# fikser at alle ord har smaa bokstaver:
tekst = [[w.lower() for w in line] for line in tekst]
taggerTekst = m.regularTagger(tekst, 'modified')
for sentence in taggerTekst:
for taggs in sentence:
print(taggs)
""" Oppgave 3 - Ordklassetagging med regulære uttrykk
Standard vs modifisert tagging ved hjelp av reguleart uttrykk
Med corpus: 'adventure'
Standard: 0.18 Modifisert: 0.41
Med corpus: 'fiction'
Standard: 0.18 Modifisert: 0.40
...
..
... skriver ut tagger som blir kopiert inn til test_setninger_m_taggs.txt
..
Kommentarer for ytterligere forbedrelser:
1. said skulle ha veart kattegorisert som verb: VBD
2. he burde veare et pronom
3. had burde veare et verb til have
oppdatere reguleare utrykk:
1 og 3: (r'(.*ed|.*id|had)$', 'VBD')
2. regler for pronoum har jeg ikke lagt inn i det hele tatt saa dette er noe som
kan tilfoeres
"""
|
[
"import nltk\nfrom nltk import bigrams\nfrom lm import *\n\n# Oppgave 1:\n# opretter LM klasse til aa perpleksitere news og adventure\nm = LM()\n\n# Henter news og adventure for videre bruk\nnews=nltk.corpus.brown.sents(categories='news')\nadventure=nltk.corpus.brown.sents(categories='adventure')\n\n# initial parametre\nperpNews = 0.0\nperpAdventure = 0.0\n\n# beregner perplexitet:\nperpNews = m.perplexity(news)\nperpAdventure = m.perplexity(adventure)\n\n# printer ut perplexitet.\nprint(\"Perpleksitet til news: %.2f\" %perpNews)\nprint(\"Perpleksitet til adventure: %.2f\" %perpAdventure)\n\n\n\"\"\" Oppgave 1 - evaluering av spraakmodeller\n\n$ python oblig2b_steinrr.py\nPerpleksitet til news: 72.69\nPerpleksitet til adventure: 117.41\n\n\nPerpleksiteten tiil adventure er hoeyeere fordi klassifikatoren vi benytter i LM er ikke trent paa dette korpuset.\nPerpleksiteten til news ville ha veart lavere hvis klassifikatoren vi benytter hadde bare veart trent paa news.\nMen dette er ikke bra pga da ville perpleksiteten til adventure veare enda hoyere enn den er naa.\n\n\"\"\"\n\nzippy = m.zipfity(news)\n\nfor sekvens in zippy:\n print(\"Ord: %4s Antall: %4d Sekvens: %.4f \" %(sekvens[0], sekvens[1], sekvens[2]))\n\n\"\"\" Oppgave 2 - Zipfianske distribusjon\n \nOrd: the Antall: 6386 Sekvens: 6386.0000 \nOrd: , Antall: 5188 Sekvens: 2594.0000 \nOrd: . Antall: 4030 Sekvens: 1343.3333 \nOrd: of Antall: 2861 Sekvens: 715.2500 \nOrd: and Antall: 2186 Sekvens: 437.2000 \nOrd: to Antall: 2144 Sekvens: 357.3333 \nOrd: a Antall: 2130 Sekvens: 304.2857 \nOrd: in Antall: 2020 Sekvens: 252.5000 \nOrd: for Antall: 969 Sekvens: 107.6667 \nOrd: that Antall: 829 Sekvens: 82.9000 \n\n\"\"\"\n\nbrown_tagged_sents = nltk.corpus.brown.tagged_sents(categories='adventure')\nadventure = [[w.lower() for w in line] for line in nltk.corpus.brown.sents(categories='adventure')]\n\n#m.regularTagger(adventure)\ncheckTaggStandardAdv = m.analyseRegularTagger('adventure')\ncheckTaggStandardFic = m.analyseRegularTagger('fiction')\ncheckTaggModifiedAdv = m.analyseRegularTagger('adventure', 'modified')\ncheckTaggModifiedFic = m.analyseRegularTagger('fiction', 'modified')\n\nprint(\"Standard vs modifisert tagging ved hjelp av reguleart uttrykk\")\nprint(\"Med corpus: 'adventure'\")\nprint(\" Standard: %4.2f Modifisert: %4.2f \" %(checkTaggStandardFic, checkTaggModifiedAdv))\nprint(\"Med corpus: 'fiction'\")\nprint(\" Standard: %4.2f Modifisert: %4.2f \" %(checkTaggStandardFic, checkTaggModifiedFic))\n\ninfile = open(\"test_setninger.txt\")\ntekst = []\n\nfor line in infile:\n words = line.split(\" \")\n tekst.append(words)\ninfile.close()\n\n# fikser at alle ord har smaa bokstaver:\ntekst = [[w.lower() for w in line] for line in tekst]\n\ntaggerTekst = m.regularTagger(tekst, 'modified')\n\nfor sentence in taggerTekst:\n for taggs in sentence:\n print(taggs)\n\n\"\"\" Oppgave 3 - Ordklassetagging med regulære uttrykk\nStandard vs modifisert tagging ved hjelp av reguleart uttrykk\nMed corpus: 'adventure'\n Standard: 0.18 Modifisert: 0.41 \nMed corpus: 'fiction'\n Standard: 0.18 Modifisert: 0.40 \n...\n..\n... skriver ut tagger som blir kopiert inn til test_setninger_m_taggs.txt\n..\n\nKommentarer for ytterligere forbedrelser:\n1. said skulle ha veart kattegorisert som verb: VBD\n2. he burde veare et pronom\n3. had burde veare et verb til have\n\noppdatere reguleare utrykk:\n1 og 3: (r'(.*ed|.*id|had)$', 'VBD')\n\n2. regler for pronoum har jeg ikke lagt inn i det hele tatt saa dette er noe som\nkan tilfoeres\n\"\"\"\n",
"import nltk\nfrom nltk import bigrams\nfrom lm import *\nm = LM()\nnews = nltk.corpus.brown.sents(categories='news')\nadventure = nltk.corpus.brown.sents(categories='adventure')\nperpNews = 0.0\nperpAdventure = 0.0\nperpNews = m.perplexity(news)\nperpAdventure = m.perplexity(adventure)\nprint('Perpleksitet til news: %.2f' % perpNews)\nprint('Perpleksitet til adventure: %.2f' % perpAdventure)\n<docstring token>\nzippy = m.zipfity(news)\nfor sekvens in zippy:\n print('Ord: %4s Antall: %4d Sekvens: %.4f ' % (sekvens[0], sekvens[1],\n sekvens[2]))\n<docstring token>\nbrown_tagged_sents = nltk.corpus.brown.tagged_sents(categories='adventure')\nadventure = [[w.lower() for w in line] for line in nltk.corpus.brown.sents(\n categories='adventure')]\ncheckTaggStandardAdv = m.analyseRegularTagger('adventure')\ncheckTaggStandardFic = m.analyseRegularTagger('fiction')\ncheckTaggModifiedAdv = m.analyseRegularTagger('adventure', 'modified')\ncheckTaggModifiedFic = m.analyseRegularTagger('fiction', 'modified')\nprint('Standard vs modifisert tagging ved hjelp av reguleart uttrykk')\nprint(\"Med corpus: 'adventure'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedAdv))\nprint(\"Med corpus: 'fiction'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedFic))\ninfile = open('test_setninger.txt')\ntekst = []\nfor line in infile:\n words = line.split(' ')\n tekst.append(words)\ninfile.close()\ntekst = [[w.lower() for w in line] for line in tekst]\ntaggerTekst = m.regularTagger(tekst, 'modified')\nfor sentence in taggerTekst:\n for taggs in sentence:\n print(taggs)\n<docstring token>\n",
"<import token>\nm = LM()\nnews = nltk.corpus.brown.sents(categories='news')\nadventure = nltk.corpus.brown.sents(categories='adventure')\nperpNews = 0.0\nperpAdventure = 0.0\nperpNews = m.perplexity(news)\nperpAdventure = m.perplexity(adventure)\nprint('Perpleksitet til news: %.2f' % perpNews)\nprint('Perpleksitet til adventure: %.2f' % perpAdventure)\n<docstring token>\nzippy = m.zipfity(news)\nfor sekvens in zippy:\n print('Ord: %4s Antall: %4d Sekvens: %.4f ' % (sekvens[0], sekvens[1],\n sekvens[2]))\n<docstring token>\nbrown_tagged_sents = nltk.corpus.brown.tagged_sents(categories='adventure')\nadventure = [[w.lower() for w in line] for line in nltk.corpus.brown.sents(\n categories='adventure')]\ncheckTaggStandardAdv = m.analyseRegularTagger('adventure')\ncheckTaggStandardFic = m.analyseRegularTagger('fiction')\ncheckTaggModifiedAdv = m.analyseRegularTagger('adventure', 'modified')\ncheckTaggModifiedFic = m.analyseRegularTagger('fiction', 'modified')\nprint('Standard vs modifisert tagging ved hjelp av reguleart uttrykk')\nprint(\"Med corpus: 'adventure'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedAdv))\nprint(\"Med corpus: 'fiction'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedFic))\ninfile = open('test_setninger.txt')\ntekst = []\nfor line in infile:\n words = line.split(' ')\n tekst.append(words)\ninfile.close()\ntekst = [[w.lower() for w in line] for line in tekst]\ntaggerTekst = m.regularTagger(tekst, 'modified')\nfor sentence in taggerTekst:\n for taggs in sentence:\n print(taggs)\n<docstring token>\n",
"<import token>\n<assignment token>\nprint('Perpleksitet til news: %.2f' % perpNews)\nprint('Perpleksitet til adventure: %.2f' % perpAdventure)\n<docstring token>\n<assignment token>\nfor sekvens in zippy:\n print('Ord: %4s Antall: %4d Sekvens: %.4f ' % (sekvens[0], sekvens[1],\n sekvens[2]))\n<docstring token>\n<assignment token>\nprint('Standard vs modifisert tagging ved hjelp av reguleart uttrykk')\nprint(\"Med corpus: 'adventure'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedAdv))\nprint(\"Med corpus: 'fiction'\")\nprint(' Standard: %4.2f Modifisert: %4.2f ' % (checkTaggStandardFic,\n checkTaggModifiedFic))\n<assignment token>\nfor line in infile:\n words = line.split(' ')\n tekst.append(words)\ninfile.close()\n<assignment token>\nfor sentence in taggerTekst:\n for taggs in sentence:\n print(taggs)\n<docstring token>\n",
"<import token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<code token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<docstring token>\n"
] | false |
908 |
2f489a87e40bea979000dd429cc4cb0150ff4c3b
|
from flask import escape
import pandas as pd
import json
import requests
with open('result.csv', newline='') as f:
df = pd.read_csv(f)
def get_level_diff(word, only_common=False):
if only_common:
word_df = df[(df['word']==word) & (df['common']==1)]
else:
word_df = df[df['word']==word]
return (word_df.values[0][3], word_df.values[0][8]) if len(word_df) > 0 else (None, None)
# order words based on either level or frequency.
def order_words(words, by=0, reverse=False, only_common=False):
if (by not in {0, 1}): raise Exception("by is either 0 (by level), 1 (by frequency)")
if (by == 1): reverse = not reverse
word_results = []
for word in words:
level, freq = get_level_diff(word, only_common=only_common)
if level != None:
if by == 0:
word_results.append((word, level))
else:
word_results.append((word, freq))
word_results.sort(key=lambda x : x[1], reverse=reverse)
return word_results
def translate_words(words, target):
key = "AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w"
words_string = ""
for word in words:
words_string += "&q="
words_string += word
url = f"https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}"
res = json.loads(requests.get(url).content)['data']['translations']
return [s['translatedText'] for s in res]
def hello_http(request):
request_args = request.args
#'words', 'lang-from', 'lang-to', 'by', 'reverse'
if request_args and 'words' in request_args:
words = json.loads(request_args['words'])
if isinstance(words, list) and len(words) > 0:
target = request_args.get('target', 'es')
by_str = request_args.get('by', 'level')
by = 1 if by_str == 'freq' else 0
reverse = request_args.get('reverse', 'false') == 'true'
only_common = request_args.get('only-common', 'false') == 'true'
results = order_words(words, by=by, reverse=reverse, only_common=only_common)
translated = translate_words([result[0] for result in results], target)
return json.dumps([[results[i][0], results[i][1], translated[i]] for i in range(len(results))])
else:
return "not list"
else:
return "error"
|
[
"from flask import escape\nimport pandas as pd\nimport json\nimport requests\n\nwith open('result.csv', newline='') as f:\n df = pd.read_csv(f)\n\ndef get_level_diff(word, only_common=False):\n if only_common:\n word_df = df[(df['word']==word) & (df['common']==1)]\n else:\n word_df = df[df['word']==word]\n return (word_df.values[0][3], word_df.values[0][8]) if len(word_df) > 0 else (None, None)\n\n# order words based on either level or frequency. \ndef order_words(words, by=0, reverse=False, only_common=False):\n if (by not in {0, 1}): raise Exception(\"by is either 0 (by level), 1 (by frequency)\")\n if (by == 1): reverse = not reverse\n \n word_results = []\n for word in words:\n level, freq = get_level_diff(word, only_common=only_common)\n if level != None:\n if by == 0:\n word_results.append((word, level))\n else:\n word_results.append((word, freq))\n word_results.sort(key=lambda x : x[1], reverse=reverse)\n return word_results\n\ndef translate_words(words, target):\n key = \"AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w\"\n words_string = \"\"\n for word in words:\n words_string += \"&q=\"\n words_string += word\n url = f\"https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}\"\n res = json.loads(requests.get(url).content)['data']['translations']\n return [s['translatedText'] for s in res]\n\ndef hello_http(request):\n request_args = request.args\n\n #'words', 'lang-from', 'lang-to', 'by', 'reverse'\n\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n \n results = order_words(words, by=by, reverse=reverse, only_common=only_common)\n translated = translate_words([result[0] for result in results], target)\n return json.dumps([[results[i][0], results[i][1], translated[i]] for i in range(len(results))])\n else:\n return \"not list\"\n else:\n return \"error\"",
"from flask import escape\nimport pandas as pd\nimport json\nimport requests\nwith open('result.csv', newline='') as f:\n df = pd.read_csv(f)\n\n\ndef get_level_diff(word, only_common=False):\n if only_common:\n word_df = df[(df['word'] == word) & (df['common'] == 1)]\n else:\n word_df = df[df['word'] == word]\n return (word_df.values[0][3], word_df.values[0][8]) if len(word_df\n ) > 0 else (None, None)\n\n\ndef order_words(words, by=0, reverse=False, only_common=False):\n if by not in {0, 1}:\n raise Exception('by is either 0 (by level), 1 (by frequency)')\n if by == 1:\n reverse = not reverse\n word_results = []\n for word in words:\n level, freq = get_level_diff(word, only_common=only_common)\n if level != None:\n if by == 0:\n word_results.append((word, level))\n else:\n word_results.append((word, freq))\n word_results.sort(key=lambda x: x[1], reverse=reverse)\n return word_results\n\n\ndef translate_words(words, target):\n key = 'AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w'\n words_string = ''\n for word in words:\n words_string += '&q='\n words_string += word\n url = (\n f'https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}'\n )\n res = json.loads(requests.get(url).content)['data']['translations']\n return [s['translatedText'] for s in res]\n\n\ndef hello_http(request):\n request_args = request.args\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n results = order_words(words, by=by, reverse=reverse,\n only_common=only_common)\n translated = translate_words([result[0] for result in results],\n target)\n return json.dumps([[results[i][0], results[i][1], translated[i]\n ] for i in range(len(results))])\n else:\n return 'not list'\n else:\n return 'error'\n",
"<import token>\nwith open('result.csv', newline='') as f:\n df = pd.read_csv(f)\n\n\ndef get_level_diff(word, only_common=False):\n if only_common:\n word_df = df[(df['word'] == word) & (df['common'] == 1)]\n else:\n word_df = df[df['word'] == word]\n return (word_df.values[0][3], word_df.values[0][8]) if len(word_df\n ) > 0 else (None, None)\n\n\ndef order_words(words, by=0, reverse=False, only_common=False):\n if by not in {0, 1}:\n raise Exception('by is either 0 (by level), 1 (by frequency)')\n if by == 1:\n reverse = not reverse\n word_results = []\n for word in words:\n level, freq = get_level_diff(word, only_common=only_common)\n if level != None:\n if by == 0:\n word_results.append((word, level))\n else:\n word_results.append((word, freq))\n word_results.sort(key=lambda x: x[1], reverse=reverse)\n return word_results\n\n\ndef translate_words(words, target):\n key = 'AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w'\n words_string = ''\n for word in words:\n words_string += '&q='\n words_string += word\n url = (\n f'https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}'\n )\n res = json.loads(requests.get(url).content)['data']['translations']\n return [s['translatedText'] for s in res]\n\n\ndef hello_http(request):\n request_args = request.args\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n results = order_words(words, by=by, reverse=reverse,\n only_common=only_common)\n translated = translate_words([result[0] for result in results],\n target)\n return json.dumps([[results[i][0], results[i][1], translated[i]\n ] for i in range(len(results))])\n else:\n return 'not list'\n else:\n return 'error'\n",
"<import token>\n<code token>\n\n\ndef get_level_diff(word, only_common=False):\n if only_common:\n word_df = df[(df['word'] == word) & (df['common'] == 1)]\n else:\n word_df = df[df['word'] == word]\n return (word_df.values[0][3], word_df.values[0][8]) if len(word_df\n ) > 0 else (None, None)\n\n\ndef order_words(words, by=0, reverse=False, only_common=False):\n if by not in {0, 1}:\n raise Exception('by is either 0 (by level), 1 (by frequency)')\n if by == 1:\n reverse = not reverse\n word_results = []\n for word in words:\n level, freq = get_level_diff(word, only_common=only_common)\n if level != None:\n if by == 0:\n word_results.append((word, level))\n else:\n word_results.append((word, freq))\n word_results.sort(key=lambda x: x[1], reverse=reverse)\n return word_results\n\n\ndef translate_words(words, target):\n key = 'AIzaSyCmB0XTpv7PBLGllUBGyTVZ8syJJz2rL-w'\n words_string = ''\n for word in words:\n words_string += '&q='\n words_string += word\n url = (\n f'https://translation.googleapis.com/language/translate/v2?target={target}&key={key}{words_string}'\n )\n res = json.loads(requests.get(url).content)['data']['translations']\n return [s['translatedText'] for s in res]\n\n\ndef hello_http(request):\n request_args = request.args\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n results = order_words(words, by=by, reverse=reverse,\n only_common=only_common)\n translated = translate_words([result[0] for result in results],\n target)\n return json.dumps([[results[i][0], results[i][1], translated[i]\n ] for i in range(len(results))])\n else:\n return 'not list'\n else:\n return 'error'\n",
"<import token>\n<code token>\n\n\ndef get_level_diff(word, only_common=False):\n if only_common:\n word_df = df[(df['word'] == word) & (df['common'] == 1)]\n else:\n word_df = df[df['word'] == word]\n return (word_df.values[0][3], word_df.values[0][8]) if len(word_df\n ) > 0 else (None, None)\n\n\ndef order_words(words, by=0, reverse=False, only_common=False):\n if by not in {0, 1}:\n raise Exception('by is either 0 (by level), 1 (by frequency)')\n if by == 1:\n reverse = not reverse\n word_results = []\n for word in words:\n level, freq = get_level_diff(word, only_common=only_common)\n if level != None:\n if by == 0:\n word_results.append((word, level))\n else:\n word_results.append((word, freq))\n word_results.sort(key=lambda x: x[1], reverse=reverse)\n return word_results\n\n\n<function token>\n\n\ndef hello_http(request):\n request_args = request.args\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n results = order_words(words, by=by, reverse=reverse,\n only_common=only_common)\n translated = translate_words([result[0] for result in results],\n target)\n return json.dumps([[results[i][0], results[i][1], translated[i]\n ] for i in range(len(results))])\n else:\n return 'not list'\n else:\n return 'error'\n",
"<import token>\n<code token>\n\n\ndef get_level_diff(word, only_common=False):\n if only_common:\n word_df = df[(df['word'] == word) & (df['common'] == 1)]\n else:\n word_df = df[df['word'] == word]\n return (word_df.values[0][3], word_df.values[0][8]) if len(word_df\n ) > 0 else (None, None)\n\n\n<function token>\n<function token>\n\n\ndef hello_http(request):\n request_args = request.args\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n results = order_words(words, by=by, reverse=reverse,\n only_common=only_common)\n translated = translate_words([result[0] for result in results],\n target)\n return json.dumps([[results[i][0], results[i][1], translated[i]\n ] for i in range(len(results))])\n else:\n return 'not list'\n else:\n return 'error'\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n\n\ndef hello_http(request):\n request_args = request.args\n if request_args and 'words' in request_args:\n words = json.loads(request_args['words'])\n if isinstance(words, list) and len(words) > 0:\n target = request_args.get('target', 'es')\n by_str = request_args.get('by', 'level')\n by = 1 if by_str == 'freq' else 0\n reverse = request_args.get('reverse', 'false') == 'true'\n only_common = request_args.get('only-common', 'false') == 'true'\n results = order_words(words, by=by, reverse=reverse,\n only_common=only_common)\n translated = translate_words([result[0] for result in results],\n target)\n return json.dumps([[results[i][0], results[i][1], translated[i]\n ] for i in range(len(results))])\n else:\n return 'not list'\n else:\n return 'error'\n",
"<import token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
909 |
7a65a5522db97a7a113a412883b640feede5bcee
|
from layout import UIDump
import Tkinter
from Tkinter import *
from ScriptGenerator import ScriptGen
class Divide_and_Conquer():
def __init__(self, XY):
self.XY = XY
self.user_val = 'None'
self.flag = 'green'
print self.XY
def bounds_Compare(self, bounds, filename):
""" Compares the bounds with Master XY and generates the Script fro given Element. """
# removed "android.widget.Spinner", "android.widget.ExpandableListView" from reqlist, it's interfering with the view.
reqlist = ["android.widget.EditText",
"android.widget.Button", "android.widget.CheckBox", "android.widget.RadioButton", "android.widget.TextView", "android.widget.RelativeLayout",
"android.widget.ImageView", "android.app.Dialogue", "android.view.View"]
ignore_list = [None,'','None']
collection = []
logs = []
count = 0
len_bounds = len(bounds)
for i in bounds:
print '\n ---------------------------------------------- \n'
# print "for every bound block" ----> DEBUG < -----
if int(bounds[count][2]) <= self.XY[1] <= int(bounds[count][3]):
if int(bounds[count][0]) <= self.XY[0] <= int(bounds[count][1]):
# print "current X_Y : ", str(self.XY)
# print "current bounds : ", str(UIDump.bounds[count])
# print "unique id : ", str(UIDump.check_unique_id[count])
# print "resource id : ", str(UIDump.check_resource_id[count])
# print "current text : ", str(UIDump.check_text[count])
# print "in range block" ----> DEBUG < -----
if UIDump.elements[count] in reqlist:
# print "in reqlist block" ----> DEBUG < -----
if UIDump.elements[count] == reqlist[0]:
# print "EditText block" ----> DEBUG < -----
window = Tkinter.Tk()
window.resizable(width=False,height=False);
window.geometry("200x80")
l1=Label(window,width=30,text="Enter Text to Type: ")
l1.pack()
self.entry_id = StringVar()
e1 = Entry(window, width=30,textvariable=self.entry_id)
e1.pack()
def input(args= None):
self.user_val = e1.get()
window.destroy()
if self.resource_id not in ignore_list:
ScriptGen(filename).script("vc.findViewByIdOrRaise('{id}').setText('{text}')\n".format(id=self.resource_id, text=self.user_val))
ScriptGen(filename).log("#LOG({classname}): Cleared and Typed : '{text}' on id : '{id}'\n".format(classname =self.classname,text=self.user_val, id=self.resource_id))
elif self.unique_id not in ignore_list:
ScriptGen(filename).script("vc.findViewByIdOrRaise('{id}').setText('{text}')\n".format(id=self.unique_id, text=self.user_val))
ScriptGen(filename).log("#LOG({classname}): Cleared and Typed : '{text}'\n".format(classname =self.classname,text=self.user_val))
elif UIDump.check_text[count] not in ignore_list:
ScriptGen(filename).script("vc.findViewWithTextOrRaise('{id_text}').setText('{text}')\n".format(id_text=UIDump.check_text[count], text=self.user_val))
ScriptGen(filename).log("#LOG({classname}): Cleared and Typed : '{text}' on Element with text : '{id_text}'\n".format(classname =self.classname,id_text=UIDump.check_text[count], text=self.user_val))
else :
ScriptGen(filename).script("device.touchDip({X},{Y},0)\n".format(X=int(self.XY[0]), Y=int(self.XY[1])))
ScriptGen(filename).log("#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ="Vulnerable",X=int(self.XY[0]), Y=int(self.XY[1])))
def framedestroy():
window.destroy()
self.unique_id = UIDump.check_unique_id[count]
self.resource_id = UIDump.check_resource_id[count]
self.classname = UIDump.check_className[count]
b1=Button(window,text="Ok",width=10, command = input)
b1.pack(side=LEFT)
b1.place(x=10,y=50)
b2=Button(window, text = "Cancel", width=10, command = framedestroy)
b2.pack(side=RIGHT)
b2.place(x=110,y=50)
window.bind('<Return>', input)
window.mainloop()
self.flag = 'red'
break
elif UIDump.elements[count] in reqlist[1:4]:
# print "Button block" ----> DEBUG < -----
self.unique_id = UIDump.check_unique_id[count]
self.resource_id = UIDump.check_resource_id[count]
self.classname = UIDump.check_className[count]
if UIDump.check_text[count] not in ignore_list:
log_ = "#LOG({classname}): Clicked on element with text : '{id}'\n".format(classname =self.classname,id=UIDump.check_text[count])
line = "vc.findViewWithTextOrRaise('{id}').touch()\n\tvc.sleep(3)\n".format(id=UIDump.check_text[count])
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif self.resource_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.resource_id)
line = "vc.findViewByIdOrRaise('{id}').touch()\n\tvc.sleep(3)\n".format(id=self.resource_id)
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif self.unique_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.unique_id)
line = "vc.findViewByIdOrRaise('{id_text}').touch()\n\tvc.sleep(3)\n".format(id_text=self.unique_id)
if line not in collection:
collection.append(line)
logs.append(log_)
break
else :
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname =self.classname,X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif UIDump.elements[count] in reqlist[4:]:
# print "remaining views block" ----> DEBUG < -----
self.unique_id = UIDump.check_unique_id[count]
self.resource_id = UIDump.check_resource_id[count]
self.classname = UIDump.check_className[count]
if UIDump.check_text[count] not in ignore_list:
log_ = "#LOG({classname}): Clicked on element with Text : '{id}'\n".format(classname =self.classname,id=UIDump.check_text[count])
line = "vc.findViewWithTextOrRaise('{id}').touch()\n".format(id=UIDump.check_text[count])
if line not in collection:
collection.append(line)
logs.append(log_)
elif self.resource_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.resource_id)
line = "vc.findViewByIdOrRaise('{id}').touch()\n".format(id=self.resource_id)
if line not in collection:
collection.append(line)
logs.append(log_)
elif self.unique_id not in ignore_list:
log_ = "#LOG({classname}): Clicked on : '{id}'\n".format(classname =self.classname,id=self.unique_id)
line = "vc.findViewByIdOrRaise('{id_text}').touch()\n".format(id_text=self.unique_id)
if line not in collection:
collection.append(line)
logs.append(log_)
else :
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
else:
# print "not in imp view block" ----> DEBUG < -----
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
break
elif UIDump.elements[count] in ["android.widget.FrameLayout"]:
# print "FrameLayout block" ----> DEBUG < -----
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
count += 1
else :
# print "nothing matches block" ----> DEBUG < -----
log_ = "#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\n".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))
line = "device.touchDip({X},{Y},0)\n\tvc.sleep(3)\n".format(X=int(self.XY[0]), Y=int(self.XY[1]))
if line not in collection:
collection.append(line)
logs.append(log_)
print collection
print logs
# ----> DEBUG < -----
if self.flag == 'green':
ScriptGen(filename).script(collection[-1])
ScriptGen(filename).log(logs[-1])
else:
pass
def main():
Divide_and_Conquer().bounds_Compare(bounds)
if __name__ == '__main__':
main()
|
[
"from layout import UIDump\nimport Tkinter \nfrom Tkinter import *\nfrom ScriptGenerator import ScriptGen\n\nclass Divide_and_Conquer():\n\n\tdef __init__(self, XY):\n\t\tself.XY = XY\n\t\tself.user_val = 'None'\n\t\tself.flag = 'green'\n\n\t\tprint self.XY\n\t\n\tdef bounds_Compare(self, bounds, filename):\n\t\t\"\"\" Compares the bounds with Master XY and generates the Script fro given Element. \"\"\"\n\n\t\t# removed \"android.widget.Spinner\", \"android.widget.ExpandableListView\" from reqlist, it's interfering with the view.\n \t\t\n \t\treqlist = [\"android.widget.EditText\",\n \t\t\"android.widget.Button\", \"android.widget.CheckBox\", \"android.widget.RadioButton\", \"android.widget.TextView\", \"android.widget.RelativeLayout\",\n \t\t\"android.widget.ImageView\", \"android.app.Dialogue\", \"android.view.View\"]\n\n \t\tignore_list = [None,'','None']\n\t\t\n\t\tcollection = []\n\t\tlogs = []\n\n\t\tcount = 0\n\t\tlen_bounds = len(bounds)\n\t\t\n\t\tfor i in bounds:\n\t\t\tprint '\\n ---------------------------------------------- \\n'\n\t\t\t# print \"for every bound block\" ----> DEBUG < -----\n\t\t\tif int(bounds[count][2]) <= self.XY[1] <= int(bounds[count][3]):\n\t\t\t\tif int(bounds[count][0]) <= self.XY[0] <= int(bounds[count][1]):\n\t\t\t\t\t\n\t\t\t\t\t# print \"current X_Y : \", str(self.XY)\n\t\t\t\t\t# print \"current bounds : \", str(UIDump.bounds[count])\n\t\t\t\t\t# print \"unique id : \", str(UIDump.check_unique_id[count])\n\t\t\t\t\t# print \"resource id : \", str(UIDump.check_resource_id[count])\n\t\t\t\t\t# print \"current text : \", str(UIDump.check_text[count])\n\n\t\t\t\t\t# print \"in range block\" ----> DEBUG < -----\n\t\t\t\t\t\t\n\t\t\t\t\tif UIDump.elements[count] in reqlist:\n\t\t\t\t\t\t# print \"in reqlist block\" ----> DEBUG < -----\n\t\t\t\t\t\t\n\t\t\t\t\t\tif UIDump.elements[count] == reqlist[0]:\n\t\t\t\t\t\t\t# print \"EditText block\" ----> DEBUG < -----\n\n\t\t\t\t\t\t\twindow = Tkinter.Tk()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\twindow.resizable(width=False,height=False);\n\t\t\t\t\t\t\twindow.geometry(\"200x80\")\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tl1=Label(window,width=30,text=\"Enter Text to Type: \")\n\t\t\t\t\t\t\tl1.pack()\n\n\t\t\t\t\t\t\tself.entry_id = StringVar() \n\t\t\t\t\t\t\te1 = Entry(window, width=30,textvariable=self.entry_id)\n\t\t\t\t\t\t\te1.pack()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tdef input(args= None):\n\t\t\t\t\t\t\t\tself.user_val = e1.get()\n\t\t\t\t\t\t\t\twindow.destroy()\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif self.resource_id not in ignore_list:\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"vc.findViewByIdOrRaise('{id}').setText('{text}')\\n\".format(id=self.resource_id, text=self.user_val))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Cleared and Typed : '{text}' on id : '{id}'\\n\".format(classname =self.classname,text=self.user_val, id=self.resource_id))\n\t\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\telif self.unique_id not in ignore_list:\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"vc.findViewByIdOrRaise('{id}').setText('{text}')\\n\".format(id=self.unique_id, text=self.user_val))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Cleared and Typed : '{text}'\\n\".format(classname =self.classname,text=self.user_val))\n\t\t\t\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\telif UIDump.check_text[count] not in ignore_list:\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"vc.findViewWithTextOrRaise('{id_text}').setText('{text}')\\n\".format(id_text=UIDump.check_text[count], text=self.user_val))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Cleared and Typed : '{text}' on Element with text : '{id_text}'\\n\".format(classname =self.classname,id_text=UIDump.check_text[count], text=self.user_val))\n\t\t\t\t\t\t\t\t\n\n\t\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\t\tScriptGen(filename).script(\"device.touchDip({X},{Y},0)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1])))\n\t\t\t\t\t\t\t\t\tScriptGen(filename).log(\"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname =\"Vulnerable\",X=int(self.XY[0]), Y=int(self.XY[1])))\n\n\t\t\t\t\t\t\tdef framedestroy():\n\t\t\t\t\t\t\t\twindow.destroy()\n\n\t\t\t\t\t\t\tself.unique_id = UIDump.check_unique_id[count]\n\t\t\t\t\t\t\tself.resource_id = UIDump.check_resource_id[count]\n\t\t\t\t\t\t\tself.classname = UIDump.check_className[count]\n\n\t\t\t\t\t\t\tb1=Button(window,text=\"Ok\",width=10, command = input)\n\t\t\t\t\t\t\tb1.pack(side=LEFT)\n\t\t\t\t\t\t\tb1.place(x=10,y=50)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tb2=Button(window, text = \"Cancel\", width=10, command = framedestroy)\n\t\t\t\t\t\t\tb2.pack(side=RIGHT)\n\t\t\t\t\t\t\tb2.place(x=110,y=50)\n\n\t\t\t\t\t\t\twindow.bind('<Return>', input)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\twindow.mainloop()\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tself.flag = 'red'\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\n\t\t\t\t\t\telif UIDump.elements[count] in reqlist[1:4]:\n\t\t\t\t\t\t\t# print \"Button block\" ----> DEBUG < -----\n\n\t\t\t\t\t\t\tself.unique_id = UIDump.check_unique_id[count]\n\t\t\t\t\t\t\tself.resource_id = UIDump.check_resource_id[count]\n\t\t\t\t\t\t\tself.classname = UIDump.check_className[count]\n\n\t\t\t\t\t\t\tif UIDump.check_text[count] not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on element with text : '{id}'\\n\".format(classname =self.classname,id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tline = \"vc.findViewWithTextOrRaise('{id}').touch()\\n\\tvc.sleep(3)\\n\".format(id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\telif self.resource_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.resource_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id}').touch()\\n\\tvc.sleep(3)\\n\".format(id=self.resource_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\t\telif self.unique_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.unique_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id_text}').touch()\\n\\tvc.sleep(3)\\n\".format(id_text=self.unique_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname =self.classname,X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\tbreak\n\n\t\t\t\t\t\telif UIDump.elements[count] in reqlist[4:]:\n\t\t\t\t\t\t\t# print \"remaining views block\" ----> DEBUG < -----\n\n\t\t\t\t\t\t\tself.unique_id = UIDump.check_unique_id[count]\n\t\t\t\t\t\t\tself.resource_id = UIDump.check_resource_id[count]\n\t\t\t\t\t\t\tself.classname = UIDump.check_className[count]\n\n\t\t\t\t\t\t\tif UIDump.check_text[count] not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on element with Text : '{id}'\\n\".format(classname =self.classname,id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tline = \"vc.findViewWithTextOrRaise('{id}').touch()\\n\".format(id=UIDump.check_text[count])\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\n\t\t\t\t\t\t\telif self.resource_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.resource_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id}').touch()\\n\".format(id=self.resource_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\n\t\t\t\t\t\t\telif self.unique_id not in ignore_list:\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Clicked on : '{id}'\\n\".format(classname =self.classname,id=self.unique_id)\n\t\t\t\t\t\t\t\tline = \"vc.findViewByIdOrRaise('{id_text}').touch()\\n\".format(id_text=self.unique_id)\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telse :\n\t\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\t\tlogs.append(log_)\n\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t# print \"not in imp view block\" ----> DEBUG < -----\n\t\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\telif UIDump.elements[count] in [\"android.widget.FrameLayout\"]:\n\t\t\t\t\t\t# print \"FrameLayout block\" ----> DEBUG < -----\n\t\t\t\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\t\t\t\tif line not in collection: \n\t\t\t\t\t\t\tcollection.append(line)\n\t\t\t\t\t\t\tlogs.append(log_)\n\t\t\t\t\n\t\t\tcount += 1\n\n\t\telse :\n\t\t\t# print \"nothing matches block\" ----> DEBUG < -----\n\t\t\tlog_ = \"#LOG({classname}): Vulnerable/Unstable field on co-ordinates ({X},{Y})\\n\".format(classname ='Vulnerable',X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\tline = \"device.touchDip({X},{Y},0)\\n\\tvc.sleep(3)\\n\".format(X=int(self.XY[0]), Y=int(self.XY[1]))\n\t\t\tif line not in collection: \n\t\t\t\tcollection.append(line)\n\t\t\t\tlogs.append(log_)\n\n\t\tprint collection\n\t\tprint logs\n\t\t# ----> DEBUG < -----\n\t\t\n\t\tif self.flag == 'green':\n\t\t\tScriptGen(filename).script(collection[-1])\n\t\t\tScriptGen(filename).log(logs[-1])\n\t\telse:\n\t\t\tpass\n\ndef main():\n\tDivide_and_Conquer().bounds_Compare(bounds)\n\nif __name__ == '__main__':\n\tmain()\t"
] | true |
910 |
7ce679d5b889493f278de6deca6ec6bdb7acd3f5
|
#Author: Abeer Rafiq
#Modified: 11/23/2019 3:00pm
#Importing Packages
import socket, sys, time, json, sqlite3
import RPi.GPIO as GPIO
from datetime import datetime, date
#Creating a global server class
class GlobalServer:
#The constructor
def __init__(self, port, room_ip_addrs,
app_ip_addrs):
#Setting port
self.__port = int(port)
#Setting socket to receive
self.__soc_recv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
recv_address = ('', self.__port)
self.__soc_recv.bind(recv_address)
#Setting socket/addresses to send to the room rpi and app
self.__soc_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.__room_addrs = (room_ip_addrs, self.__port)
self.__app_addrs = (app_ip_addrs, self.__port)
#Setting up led blinking
self.__receiveLED = 14
self.__sendLED = 15
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(self.__receiveLED, GPIO.OUT)
GPIO.setup(self.__sendLED, GPIO.OUT)
#Setting up string for acknowldegements
self.__ackstr = "{'opcode':'0'}"
#Setting database connections
dbpath = '/home/pi/Documents/Team_Project/dataBases/plantNursery_DB.db'
self.__dbconnect = sqlite3.connect(dbpath);
self.__dbconnect.row_factory = sqlite3.Row;
self.__cursor = self.__dbconnect.cursor()
#Setting up default threshold variables
self.__defaultThresholdValue = 80
self.__defaultLessGreaterThan = "<"
self.__lightThreshold = self.__defaultThresholdValue
self.__lightLessGreaterThan = self.__defaultLessGreaterThan
self.__soilMoistureThreshold = self.__defaultThresholdValue
self.__soilMoistureLessGreaterThan = self.__defaultLessGreaterThan
self.__roomHumidityThreshold = self.__defaultThresholdValue
self.__roomHumidityLessGreaterThan = self.__defaultLessGreaterThan
self.__roomTemperatureThreshold = self.__defaultThresholdValue
self.__roomTemperatureLessGreaterThan = self.__defaultLessGreaterThan
self.__currentLight = 0
self.__currentSoilMoisture = 0
self.__currentWaterDistance = 0
self.__currentRoomHumidity = 0
self.__currentRoomTemperature = 0
self.__waterPumpDuration = 2
#Setting timeout/end time values
self.__ack_timeout = 1
self.__ack_endTime = 4
print("\nGlobal Server Initialized")
#To blink a pin once
def blink(self, pin):
GPIO.output(pin,GPIO.HIGH)
time.sleep(1)
GPIO.output(pin,GPIO.LOW)
return
#Receives/returns buffer and sends ack
def receive(self):
#Receiving
print("\nWaiting to receive on port %d ... " % self.__port)
buf, address = self.__soc_recv.recvfrom(self.__port)
if(len(buf) > 0):
#Blink receive Led
self.blink(self.__receiveLED)
print ("Received %s bytes from '%s': %s " % (len(buf), address[0], buf))
#Sending ack
self.__soc_send.sendto(self.__ackstr, (address[0], self.__port))
#Blink send Led
self.blink(self.__sendLED)
print ("Sent %s to %s" % (self.__ackstr, (address[0], self.__port)))
#Give time for the ack sent to be acknowledged
time.sleep(self.__ack_endTime)
return buf
else:
return False
#To insert data into the database
def insertDBData(self, mySQL):
#Try inserting data to database table
try:
#Insert data
self.__cursor.execute(mySQL)
self.__dbconnect.commit();
except sqlite3.Error, e:
#If error, exit program
print ('\nDatabase Error %s:' % e.args[0])
self.__soc_recv.shutdown(1)
self.__soc_send.shutdown(1)
self.__cursor.close()
sys.exit(1)
return
#To add default threshold entries into the db
def setDefaultThresholds(self, potID):
potID = str(potID)
tdate = str(date.today())
ttime = str(datetime.now().strftime("%H:%M:%S"))
#Insert default thresholds into db
mySQL = "INSERT INTO userThresholds VALUES ('" + potID + "', 'light', '" + \
str(self.__defaultThresholdValue) + "', '" + self.__defaultLessGreaterThan + \
"', '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
mySQL = "INSERT INTO userThresholds VALUES ('" + potID + "', 'soilMoisture', '" + \
str(self.__defaultThresholdValue) + "', '" + self.__defaultLessGreaterThan + \
"', '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
mySQL = "INSERT INTO userThresholds VALUES ('" + potID + "', 'roomTemperature', '" + \
str(self.__defaultThresholdValue) + "', '" + self.__defaultLessGreaterThan + \
"', '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
mySQL = "INSERT INTO userThresholds VALUES ('" + potID + "', 'roomHumidity', '" + \
str(self.__defaultThresholdValue) + "', '" + self.__defaultLessGreaterThan + \
"', '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
print("\nSet Default Thresholds")
return
#To add user requested threshold entries into the db
def updateUserThresholdsTable(self, threshold):
potID = str(threshold.get("potID"))
lessGreaterThan = str(threshold.get("lessGreaterThan"))
thresholdValue = float(str(threshold.get("thresholdValue")))
sensorType = str(threshold.get("sensorType"))
tdate = str(date.today())
ttime = str(datetime.now().strftime("%H:%M:%S"))
#Insert thresholds into db
mySQL = "INSERT INTO userThresholds VALUES ('" + potID + "', '" + sensorType + "', '" + str(thresholdValue) + \
"', '" + lessGreaterThan + "', '" + str(tdate) + "', '" + str(ttime) + "')"
self.insertDBData(mySQL)
#Reassign global server's instance threshold variables
if sensorType == "light":
self.__lightThreshold = thresholdValue
self.__lightLessGreaterThan = lessGreaterThan
elif sensorType == "soilMoisture":
self.__soilMoistureThreshold = thresholdValue
self.__soilMoistureLessGreaterThan = lessGreaterThan
elif sensorType == "roomTemperature":
self.__roomHumidityThreshold = thresholdValue
self.__roomHumidityLessGreaterThan = lessGreaterThan
elif sensorType == "roomHumidity":
self.__roomTemperatureThreshold = thresholdValue
self.__roomTemperatureLessGreaterThan = lessGreaterThan
print("\nSet User Requested Thresholds")
return
#To update user data in userPlantsTable
def updateUserPlantsTable(self, userInfo):
potID = str(userInfo.get('potID'))
roomID = str(userInfo.get('roomID'))
ownerID = str(userInfo.get('ownerID'))
#Inserting user data into db
mySQL = "INSERT INTO userPlants VALUES ('" + potID + "', '" + roomID + "', '" + ownerID + "')"
self.insertDBData(mySQL)
print("\nUpdated User Data")
return
#To update notes in userNotesTable
def updateUserNotesTable(self, userNotes):
potID = str(userNotes.get('potID'))
notes = str(userNotes.get('notes'))
tdate = str(date.today())
ttime = str(datetime.now().strftime("%H:%M:%S"))
#Inserting notes into db
mySQL = "INSERT INTO userNotes VALUES ('" + potID + "', '" + notes + "', '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
print("\nUpdated Notes Data")
return
#To update pot data in db
def updatePotTable(self, sensorInfo, tdate, time):
potID = sensorInfo.get('potID')
self.__currentWaterDistance = sensorInfo.get('waterDistance')
self.__currentLight = sensorInfo.get('light')
self.__currentSoilMoisture = sensorInfo.get('soilMoisture')
#Inserting pot data into db
mySQL = "INSERT INTO potData VALUES ('" + str(potID) + "', '" + str(self.__currentLight)+ "', '" + \
str(self.__currentSoilMoisture) + "', '" + str(self.__currentWaterDistance) + "', '" + \
tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
print("\nUpdated Pot Data")
return
#To update room data in db
def updateRoomTable(self, sensorInfo,tdate, time):
self.__currentRoomTemperature = round(sensorInfo.get('temperature'), 2)
self.__currentRoomHumidity = round(sensorInfo.get('humidity'), 2)
roomID = sensorInfo.get('roomID')
#Inserting room data into db
mySQL = "insert into roomData values ('" + str(roomID) + "', '" + str(self.__currentRoomTemperature) + \
"', '" + str(self.__currentRoomHumidity) + "' , '" + tdate + "', '" + ttime + "')"
self.insertDBData(mySQL)
print("\nUpdated Room Data")
return
#To compare current sensor data to threshold values
def checkUserThresholds(self):
#Notification json #Should be receiving an ack so timeout if no ack receivedstrings
lightNotfn = '{"opcode" : "D", "sensorArray" : "1, 0, 0, 0, 0, 0, 0, 0, 0, 0"}'
roomHumidityNotfn = '{"opcode" : "D", "sensorArray" : "0, 1, 0, 0, 0, 0, 0, 0, 0, 0"}'
roomTemperatureNotfn = '{"opcode" : "D", "sensorArray" : "0, 0, 1, 0, 0, 0, 0, 0, 0, 0"}'
soilMoistureNotfn = '{"opcode" : "D", "sensorArray" : "0, 0, 0, 1, 0, 0, 0, 0, 0, 0"}'
#Tuples of sensor data to easily neatly
light = (self.__currentLight, self.__lightThreshold, self.__lightLessGreaterThan, lightNotfn)
soilMoisture = (self.__currentSoilMoisture, self.__soilMoistureThreshold, \
self.__soilMoistureLessGreaterThan, soilMoistureNotfn, self.__waterPumpDuration)
roomHumidity = (self.__currentRoomHumidity, self.__roomHumidityThreshold, \
self.__roomHumidityLessGreaterThan, roomHumidityNotfn)
roomTemperature = (self.__currentRoomTemperature, self.__roomTemperatureThreshold, \
self.__roomTemperatureLessGreaterThan, roomTemperatureNotfn)
#Combined tuples for sensors
sensorArr = [light, roomHumidity, roomTemperature, soilMoisture]
#For each sensor compare current sensor value with threshold value
for sensor in sensorArr:
if sensor[2] == ">":
if sensor[0] > sensor[1]:
#Threshold is met, notify user
notifyApp(sensor[3])
if(len(sensor) == 4):
#Soil moisture's threshold is met, then start water pump, notify user
startPumpStr = '{"opcode" : "4", "pumpDuration" : "' + str(sensor[4]) + '"}'
startWaterPump(startPumpStr)
notifyApp(startPumpStr)
elif sensor[2] == "<":
if sensor[0] < sensor[1]:
#Threshold is met, notify user
notifyApp(sensor[3])
if(length(sensor) == 4):
#Soil moisture's threshold is met, then start water pump, notify user
startPumpStr = '{"opcode" : "4", "pumpDuration" : "' + str(sensor[4]) + '"}'
startWaterPump(startPumpStr)
notifyApp(startPumpStr)
print("\Thresholds Compared")
return
#Send room rpi msg to start water pump
def startWaterPump(self, startPump):
if (self.send_Room_Msg(startPump) == False):
#If no ack received, send msg again
print("\nStart Water Pump sent again to server")
self.startWaterPump(startPump)
return
#To send msgs to the room and wait for ack
def send_Room_Msg(self, message):
self.__soc_send.sendto(message, self.__room_addrs)
#Blink send LED
self.blink(self.__sendLED)
print("\Message sent to Room: " + message)
#Should be receiving an ack so timeout if no ack received
soc_recv.settimeout(self.__ack_timeout)
startTime = time.time()
endTime = self.__ack_endTime
while (True):
#If less than a endTime amount of time
if time.time() < (startTime + endTime):
try:
#Try Receving otherwise timeout and retry
print("Waiting for Acknowledgement . . .")
buf, address = soc_recv.recvfrom(self.__port)
except socket.timeout:
print("Receiving is Timed Out")
#Restart while loop (Retry)
continue
try:
#If buf is received, try to load it
buf = json.loads(buf)
if not len(buf):
#No ack received, retry
continue
else:
if (buf.get("opcode") == "0"):
#Ack recevied!
print("Acknowledgement Received")
return True
else:
#No ack received, retry
continue
except (ValueError, KeyError, TypeError):
#Ack not received, try again
continue
else:
#Failed to receive ack within a endTime amount of time
return False
return
#To notifcations msgs to the app
def notifyApp(self, message):
if (self.send_App_Msg(message) == False):
#If no ack received, send msg again
print("\nNotification sent again to server")
self.notifyApp(message)
return
#To send msgs to the app and wait for ack
def send_App_Msg(self, message):
self.__soc_send.sendto(message, self.__app_addrs)
#Blink send LED
self.blink(self.__sendLED)
print("\nNotifcation sent to App: " + message)
#Should be receiving an ack so timeout if no ack received
soc_recv.settimeout(self.__ack_timeout)
startTime = time.time()
endTime = self.__ack_endTime
while (True):
#If less than a endTime amount of time
if time.time() < (startTime + endTime):
try:
#Try Receving otherwise timeout and retry
print("Waiting for Acknowledgement . . .")
buf, address = soc_recv.recvfrom(self.__port)
except socket.timeout:
print("Receiving is Timed Out")
#Restart while loop (Retry)
continue
try:
#If buf is received, try to load it
buf = json.loads(buf)
if not len(buf):
#No ack received, retry
continue
else:
if (buf.get("opcode") == "0"):
#Ack recevied!
print("Acknowledgement Received")
return True
else:
#No ack received, retry
continue
except (ValueError, KeyError, TypeError):
#Ack not received, try again
continue
else:
#Failed to receive ack within a endTime amount of time
return False
return
#To get requested stats from the db
def get_stats(self, rowNumbers, sensors):
#Try retrieving data from the database
try:
#Retrieve Data
sensors = sensors.replace('"',"").replace("'","").replace('[',"").replace(']',"")
mysql = """SELECT """ + sensors + """, tdate, ttime FROM (
SELECT * FROM userPlants a
INNER JOIN potData b
ON a.potID = b.potID
INNER JOIN roomData c
ON a.roomID = c.roomID AND b.tdate = c.tdate AND b.ttime = c.ttime
ORDER BY c.tdate DESC, c.ttime DESC LIMIT """ + str(rowNumbers) + """)"""
myresult = self.__cursor.execute(mysql).fetchall()
except sqlite3.Error, e:
#If error, exit program
print '\nDatabase Error %s:' % e.args[0]
sys.exit(1)
#Convert data into json format
stats = json.dumps( [dict(i) for i in myresult] )
print("\nData Retreived from DB")
return stats
#To send the stats with the corresponding opcode
def send_stats(self, rowNumbers, sensors):
if rowNumbers == '0':
#0 means to send app just one most recent row of data (opcode E)
oneRow = globalServer.get_stats(1, sensors)
stats = '{"opcode" : "E", "statsArray" : "' + str(oneRow) + '"}'
else:
#Otherwise send mutiple recent rows of data (opcode 6)
manyRows = globalServer.get_stats(rowNumbers, sensors)
stats = '{"opcode" : "6", "statsArray" : "' + str(manyRows) + '"}'
#Send stats to App
#If ack received return
if (self.send_notifyApp(error) == True):
print("\nStats sent to app")
else:
#If no ack received, try sending again
print("\nStats sent again to app (notify again)")
self.send_stats(rowNumbers, sensors)
return
#Main function which receives json data and invokes methods based on opcode received
def main():
#Create GlobalServer object (port, room_ip_addrs, app_ip_addrs)
globalServer = GlobalServer(1000, '192.168.1.47',
'192.168.137.102')
while True:
message = globalServer.receive()
if (message == False):
#If length of buffer is <1
continue
else:
message = json.loads(message)
#User wants to update notes table
if (message.get('opcode') == "1"):
globalServer.updateUserNotesTable(message)
#User wants to add a pot with a room and owner
if (message.get('opcode') == "2"):
globalServer.updateUserPlantsTable(message)
#Set default thresholds for that potID
globalServer.setDefaultThresholds(message.get("potID"))
#If user wants to set thresholds to requested ones
if (message.get('opcode') == "3"):
globalServer.updateUserThresholdsTable(message)
#If user wants to view stats
if (message.get('opcode') == "5"):
rowNumbers = message.get("rowNumbers")
sensors = message.get("sensorType")
globalServer.send_stats(rowNumbers, sensors)
#If an error has occured in the room rpi or arduino
if (message.get('opcode') == "D"):
globalServer.notifyApp(str(message))
#If room rpi sent all sensory data, update tables, compare values to thresholds as well
if (message.get('opcode') == "9"):
tdate = str(date.today())
ttime = str(datetime.now().strftime("%H:%M:%S"))
globalServer.updateRoomTable(message, tdate, ttime)
globalServer.updatePotTable(message, tdate, ttime)
globalServer.checkUserThresholds()
self.__soc_recv.shutdown(1)
self.__soc_send.shutdown(1)
self.__cursor.close()
return
if __name__== "__main__":
main()
|
[
"#Author: Abeer Rafiq\n#Modified: 11/23/2019 3:00pm\n\n#Importing Packages\nimport socket, sys, time, json, sqlite3\nimport RPi.GPIO as GPIO\nfrom datetime import datetime, date\n\n#Creating a global server class\nclass GlobalServer:\n #The constructor\n def __init__(self, port, room_ip_addrs,\n app_ip_addrs):\n #Setting port\n self.__port = int(port)\n #Setting socket to receive\n self.__soc_recv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n recv_address = ('', self.__port)\n self.__soc_recv.bind(recv_address)\n #Setting socket/addresses to send to the room rpi and app\n self.__soc_send = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self.__room_addrs = (room_ip_addrs, self.__port)\n self.__app_addrs = (app_ip_addrs, self.__port)\n #Setting up led blinking\n self.__receiveLED = 14\n self.__sendLED = 15\n GPIO.setmode(GPIO.BCM)\n GPIO.setwarnings(False)\n GPIO.setup(self.__receiveLED, GPIO.OUT)\n GPIO.setup(self.__sendLED, GPIO.OUT)\n #Setting up string for acknowldegements\n self.__ackstr = \"{'opcode':'0'}\"\n #Setting database connections\n dbpath = '/home/pi/Documents/Team_Project/dataBases/plantNursery_DB.db'\n self.__dbconnect = sqlite3.connect(dbpath); \n self.__dbconnect.row_factory = sqlite3.Row;\n self.__cursor = self.__dbconnect.cursor() \n #Setting up default threshold variables\n self.__defaultThresholdValue = 80\n self.__defaultLessGreaterThan = \"<\"\n self.__lightThreshold = self.__defaultThresholdValue\n self.__lightLessGreaterThan = self.__defaultLessGreaterThan\n self.__soilMoistureThreshold = self.__defaultThresholdValue\n self.__soilMoistureLessGreaterThan = self.__defaultLessGreaterThan\n self.__roomHumidityThreshold = self.__defaultThresholdValue\n self.__roomHumidityLessGreaterThan = self.__defaultLessGreaterThan\n self.__roomTemperatureThreshold = self.__defaultThresholdValue\n self.__roomTemperatureLessGreaterThan = self.__defaultLessGreaterThan\n self.__currentLight = 0\n self.__currentSoilMoisture = 0\n self.__currentWaterDistance = 0\n self.__currentRoomHumidity = 0\n self.__currentRoomTemperature = 0\n self.__waterPumpDuration = 2\n #Setting timeout/end time values\n self.__ack_timeout = 1\n self.__ack_endTime = 4\n print(\"\\nGlobal Server Initialized\")\n \n #To blink a pin once\n def blink(self, pin):\n GPIO.output(pin,GPIO.HIGH)\n time.sleep(1)\n GPIO.output(pin,GPIO.LOW)\n return\n \n #Receives/returns buffer and sends ack \n def receive(self):\n #Receiving\n print(\"\\nWaiting to receive on port %d ... \" % self.__port)\n buf, address = self.__soc_recv.recvfrom(self.__port)\n if(len(buf) > 0):\n #Blink receive Led\n self.blink(self.__receiveLED)\n print (\"Received %s bytes from '%s': %s \" % (len(buf), address[0], buf))\n #Sending ack\n self.__soc_send.sendto(self.__ackstr, (address[0], self.__port))\n #Blink send Led\n self.blink(self.__sendLED)\n print (\"Sent %s to %s\" % (self.__ackstr, (address[0], self.__port)))\n #Give time for the ack sent to be acknowledged\n time.sleep(self.__ack_endTime)\n return buf\n else:\n return False\n \n #To insert data into the database\n def insertDBData(self, mySQL):\n #Try inserting data to database table\n try:\n #Insert data\n self.__cursor.execute(mySQL)\n self.__dbconnect.commit();\n except sqlite3.Error, e:\n #If error, exit program \n print ('\\nDatabase Error %s:' % e.args[0])\n self.__soc_recv.shutdown(1)\n self.__soc_send.shutdown(1)\n self.__cursor.close()\n sys.exit(1)\n return\n \n #To add default threshold entries into the db\n def setDefaultThresholds(self, potID):\n potID = str(potID)\n tdate = str(date.today())\n ttime = str(datetime.now().strftime(\"%H:%M:%S\"))\n #Insert default thresholds into db\n mySQL = \"INSERT INTO userThresholds VALUES ('\" + potID + \"', 'light', '\" + \\\n str(self.__defaultThresholdValue) + \"', '\" + self.__defaultLessGreaterThan + \\\n \"', '\" + tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n mySQL = \"INSERT INTO userThresholds VALUES ('\" + potID + \"', 'soilMoisture', '\" + \\\n str(self.__defaultThresholdValue) + \"', '\" + self.__defaultLessGreaterThan + \\\n \"', '\" + tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n mySQL = \"INSERT INTO userThresholds VALUES ('\" + potID + \"', 'roomTemperature', '\" + \\\n str(self.__defaultThresholdValue) + \"', '\" + self.__defaultLessGreaterThan + \\\n \"', '\" + tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n mySQL = \"INSERT INTO userThresholds VALUES ('\" + potID + \"', 'roomHumidity', '\" + \\\n str(self.__defaultThresholdValue) + \"', '\" + self.__defaultLessGreaterThan + \\\n \"', '\" + tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n print(\"\\nSet Default Thresholds\")\n return\n \n #To add user requested threshold entries into the db\n def updateUserThresholdsTable(self, threshold):\n potID = str(threshold.get(\"potID\"))\n lessGreaterThan = str(threshold.get(\"lessGreaterThan\"))\n thresholdValue = float(str(threshold.get(\"thresholdValue\")))\n sensorType = str(threshold.get(\"sensorType\"))\n tdate = str(date.today())\n ttime = str(datetime.now().strftime(\"%H:%M:%S\"))\n #Insert thresholds into db\n mySQL = \"INSERT INTO userThresholds VALUES ('\" + potID + \"', '\" + sensorType + \"', '\" + str(thresholdValue) + \\\n \"', '\" + lessGreaterThan + \"', '\" + str(tdate) + \"', '\" + str(ttime) + \"')\" \n self.insertDBData(mySQL)\n #Reassign global server's instance threshold variables\n if sensorType == \"light\":\n self.__lightThreshold = thresholdValue\n self.__lightLessGreaterThan = lessGreaterThan\n elif sensorType == \"soilMoisture\":\n self.__soilMoistureThreshold = thresholdValue \n self.__soilMoistureLessGreaterThan = lessGreaterThan\n elif sensorType == \"roomTemperature\":\n self.__roomHumidityThreshold = thresholdValue\n self.__roomHumidityLessGreaterThan = lessGreaterThan\n elif sensorType == \"roomHumidity\":\n self.__roomTemperatureThreshold = thresholdValue\n self.__roomTemperatureLessGreaterThan = lessGreaterThan\n print(\"\\nSet User Requested Thresholds\")\n return\n\n #To update user data in userPlantsTable\n def updateUserPlantsTable(self, userInfo):\n potID = str(userInfo.get('potID'))\n roomID = str(userInfo.get('roomID'))\n ownerID = str(userInfo.get('ownerID'))\n #Inserting user data into db\n mySQL = \"INSERT INTO userPlants VALUES ('\" + potID + \"', '\" + roomID + \"', '\" + ownerID + \"')\" \n self.insertDBData(mySQL)\n print(\"\\nUpdated User Data\")\n return\n \n #To update notes in userNotesTable\n def updateUserNotesTable(self, userNotes):\n potID = str(userNotes.get('potID'))\n notes = str(userNotes.get('notes'))\n tdate = str(date.today())\n ttime = str(datetime.now().strftime(\"%H:%M:%S\"))\n #Inserting notes into db\n mySQL = \"INSERT INTO userNotes VALUES ('\" + potID + \"', '\" + notes + \"', '\" + tdate + \"', '\" + ttime + \"')\"\n self.insertDBData(mySQL)\n print(\"\\nUpdated Notes Data\")\n return\n \n #To update pot data in db\n def updatePotTable(self, sensorInfo, tdate, time):\n potID = sensorInfo.get('potID')\n self.__currentWaterDistance = sensorInfo.get('waterDistance')\n self.__currentLight = sensorInfo.get('light')\n self.__currentSoilMoisture = sensorInfo.get('soilMoisture')\n #Inserting pot data into db\n mySQL = \"INSERT INTO potData VALUES ('\" + str(potID) + \"', '\" + str(self.__currentLight)+ \"', '\" + \\\n str(self.__currentSoilMoisture) + \"', '\" + str(self.__currentWaterDistance) + \"', '\" + \\\n tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n print(\"\\nUpdated Pot Data\")\n return\n \n #To update room data in db\n def updateRoomTable(self, sensorInfo,tdate, time):\n self.__currentRoomTemperature = round(sensorInfo.get('temperature'), 2)\n self.__currentRoomHumidity = round(sensorInfo.get('humidity'), 2)\n roomID = sensorInfo.get('roomID')\n #Inserting room data into db\n mySQL = \"insert into roomData values ('\" + str(roomID) + \"', '\" + str(self.__currentRoomTemperature) + \\\n \"', '\" + str(self.__currentRoomHumidity) + \"' , '\" + tdate + \"', '\" + ttime + \"')\" \n self.insertDBData(mySQL)\n print(\"\\nUpdated Room Data\")\n return\n\n\n #To compare current sensor data to threshold values\n def checkUserThresholds(self):\n #Notification json #Should be receiving an ack so timeout if no ack receivedstrings\n lightNotfn = '{\"opcode\" : \"D\", \"sensorArray\" : \"1, 0, 0, 0, 0, 0, 0, 0, 0, 0\"}' \n roomHumidityNotfn = '{\"opcode\" : \"D\", \"sensorArray\" : \"0, 1, 0, 0, 0, 0, 0, 0, 0, 0\"}'\n roomTemperatureNotfn = '{\"opcode\" : \"D\", \"sensorArray\" : \"0, 0, 1, 0, 0, 0, 0, 0, 0, 0\"}'\n soilMoistureNotfn = '{\"opcode\" : \"D\", \"sensorArray\" : \"0, 0, 0, 1, 0, 0, 0, 0, 0, 0\"}'\n #Tuples of sensor data to easily neatly\n light = (self.__currentLight, self.__lightThreshold, self.__lightLessGreaterThan, lightNotfn)\n soilMoisture = (self.__currentSoilMoisture, self.__soilMoistureThreshold, \\\n self.__soilMoistureLessGreaterThan, soilMoistureNotfn, self.__waterPumpDuration)\n roomHumidity = (self.__currentRoomHumidity, self.__roomHumidityThreshold, \\\n self.__roomHumidityLessGreaterThan, roomHumidityNotfn)\n roomTemperature = (self.__currentRoomTemperature, self.__roomTemperatureThreshold, \\\n self.__roomTemperatureLessGreaterThan, roomTemperatureNotfn)\n #Combined tuples for sensors\n sensorArr = [light, roomHumidity, roomTemperature, soilMoisture]\n #For each sensor compare current sensor value with threshold value\n for sensor in sensorArr:\n if sensor[2] == \">\":\n if sensor[0] > sensor[1]:\n #Threshold is met, notify user\n notifyApp(sensor[3])\n if(len(sensor) == 4):\n #Soil moisture's threshold is met, then start water pump, notify user\n startPumpStr = '{\"opcode\" : \"4\", \"pumpDuration\" : \"' + str(sensor[4]) + '\"}'\n startWaterPump(startPumpStr) \n notifyApp(startPumpStr) \n elif sensor[2] == \"<\":\n if sensor[0] < sensor[1]:\n #Threshold is met, notify user\n notifyApp(sensor[3])\n if(length(sensor) == 4):\n #Soil moisture's threshold is met, then start water pump, notify user\n startPumpStr = '{\"opcode\" : \"4\", \"pumpDuration\" : \"' + str(sensor[4]) + '\"}'\n startWaterPump(startPumpStr) \n notifyApp(startPumpStr) \n print(\"\\Thresholds Compared\")\n return\n \n #Send room rpi msg to start water pump\n def startWaterPump(self, startPump):\n if (self.send_Room_Msg(startPump) == False):\n #If no ack received, send msg again\n print(\"\\nStart Water Pump sent again to server\")\n self.startWaterPump(startPump)\n return\n \n #To send msgs to the room and wait for ack\n def send_Room_Msg(self, message):\n self.__soc_send.sendto(message, self.__room_addrs)\n #Blink send LED\n self.blink(self.__sendLED)\n print(\"\\Message sent to Room: \" + message)\n #Should be receiving an ack so timeout if no ack received\n soc_recv.settimeout(self.__ack_timeout)\n startTime = time.time()\n endTime = self.__ack_endTime\n while (True):\n #If less than a endTime amount of time\n if time.time() < (startTime + endTime):\n try:\n #Try Receving otherwise timeout and retry\n print(\"Waiting for Acknowledgement . . .\")\n buf, address = soc_recv.recvfrom(self.__port)\n except socket.timeout:\n print(\"Receiving is Timed Out\")\n #Restart while loop (Retry)\n continue\n try:\n #If buf is received, try to load it\n buf = json.loads(buf)\n if not len(buf):\n #No ack received, retry\n continue\n else:\n if (buf.get(\"opcode\") == \"0\"):\n #Ack recevied!\n print(\"Acknowledgement Received\")\n return True\n else:\n #No ack received, retry\n continue\n except (ValueError, KeyError, TypeError):\n #Ack not received, try again\n continue\n else:\n #Failed to receive ack within a endTime amount of time\n return False\n return\n \n #To notifcations msgs to the app\n def notifyApp(self, message):\n if (self.send_App_Msg(message) == False):\n #If no ack received, send msg again\n print(\"\\nNotification sent again to server\")\n self.notifyApp(message)\n return\n \n #To send msgs to the app and wait for ack\n def send_App_Msg(self, message):\n self.__soc_send.sendto(message, self.__app_addrs)\n #Blink send LED\n self.blink(self.__sendLED)\n print(\"\\nNotifcation sent to App: \" + message)\n #Should be receiving an ack so timeout if no ack received\n soc_recv.settimeout(self.__ack_timeout)\n startTime = time.time()\n endTime = self.__ack_endTime\n while (True):\n #If less than a endTime amount of time\n if time.time() < (startTime + endTime):\n try:\n #Try Receving otherwise timeout and retry\n print(\"Waiting for Acknowledgement . . .\")\n buf, address = soc_recv.recvfrom(self.__port)\n except socket.timeout:\n print(\"Receiving is Timed Out\")\n #Restart while loop (Retry)\n continue\n try:\n #If buf is received, try to load it\n buf = json.loads(buf)\n if not len(buf):\n #No ack received, retry\n continue\n else:\n if (buf.get(\"opcode\") == \"0\"):\n #Ack recevied!\n print(\"Acknowledgement Received\")\n return True\n else:\n #No ack received, retry\n continue\n except (ValueError, KeyError, TypeError):\n #Ack not received, try again\n continue\n else:\n #Failed to receive ack within a endTime amount of time\n return False\n return\n \n #To get requested stats from the db\n def get_stats(self, rowNumbers, sensors):\n #Try retrieving data from the database\n try:\n #Retrieve Data\n sensors = sensors.replace('\"',\"\").replace(\"'\",\"\").replace('[',\"\").replace(']',\"\")\n mysql = \"\"\"SELECT \"\"\" + sensors + \"\"\", tdate, ttime FROM (\n SELECT * FROM userPlants a\n INNER JOIN potData b\n ON a.potID = b.potID \n INNER JOIN roomData c \n ON a.roomID = c.roomID AND b.tdate = c.tdate AND b.ttime = c.ttime\n ORDER BY c.tdate DESC, c.ttime DESC LIMIT \"\"\" + str(rowNumbers) + \"\"\")\"\"\"\n myresult = self.__cursor.execute(mysql).fetchall()\n except sqlite3.Error, e:\n #If error, exit program \n print '\\nDatabase Error %s:' % e.args[0]\n sys.exit(1)\n #Convert data into json format\n stats = json.dumps( [dict(i) for i in myresult] )\n print(\"\\nData Retreived from DB\")\n return stats\n \n #To send the stats with the corresponding opcode\n def send_stats(self, rowNumbers, sensors):\n if rowNumbers == '0':\n #0 means to send app just one most recent row of data (opcode E)\n oneRow = globalServer.get_stats(1, sensors)\n stats = '{\"opcode\" : \"E\", \"statsArray\" : \"' + str(oneRow) + '\"}'\n else:\n #Otherwise send mutiple recent rows of data (opcode 6)\n manyRows = globalServer.get_stats(rowNumbers, sensors)\n stats = '{\"opcode\" : \"6\", \"statsArray\" : \"' + str(manyRows) + '\"}'\n #Send stats to App\n #If ack received return\n if (self.send_notifyApp(error) == True):\n print(\"\\nStats sent to app\")\n else:\n #If no ack received, try sending again\n print(\"\\nStats sent again to app (notify again)\")\n self.send_stats(rowNumbers, sensors)\n return\n\n#Main function which receives json data and invokes methods based on opcode received\ndef main():\n #Create GlobalServer object (port, room_ip_addrs, app_ip_addrs)\n globalServer = GlobalServer(1000, '192.168.1.47',\n '192.168.137.102')\n while True:\n message = globalServer.receive()\n if (message == False):\n #If length of buffer is <1\n continue\n else:\n message = json.loads(message)\n #User wants to update notes table\n if (message.get('opcode') == \"1\"):\n globalServer.updateUserNotesTable(message)\n #User wants to add a pot with a room and owner\n if (message.get('opcode') == \"2\"): \n globalServer.updateUserPlantsTable(message)\n #Set default thresholds for that potID\n globalServer.setDefaultThresholds(message.get(\"potID\"))\n #If user wants to set thresholds to requested ones\n if (message.get('opcode') == \"3\"): \n globalServer.updateUserThresholdsTable(message)\n #If user wants to view stats\n if (message.get('opcode') == \"5\"):\n rowNumbers = message.get(\"rowNumbers\")\n sensors = message.get(\"sensorType\")\n globalServer.send_stats(rowNumbers, sensors)\n #If an error has occured in the room rpi or arduino\n if (message.get('opcode') == \"D\"): \n globalServer.notifyApp(str(message))\n #If room rpi sent all sensory data, update tables, compare values to thresholds as well\n if (message.get('opcode') == \"9\"): \n tdate = str(date.today())\n ttime = str(datetime.now().strftime(\"%H:%M:%S\"))\n globalServer.updateRoomTable(message, tdate, ttime)\n globalServer.updatePotTable(message, tdate, ttime) \n globalServer.checkUserThresholds() \n self.__soc_recv.shutdown(1)\n self.__soc_send.shutdown(1)\n self.__cursor.close()\n return\n \nif __name__== \"__main__\":\n main()\n"
] | true |
911 |
08a5a903d3757f8821554aa3649ec2ac2b2995a5
|
/Users/tanzy/anaconda3/lib/python3.6/_dummy_thread.py
|
[
"/Users/tanzy/anaconda3/lib/python3.6/_dummy_thread.py"
] | true |
912 |
09d31df9c76975377b44470e1f2ba4a5c4b7bbde
|
import sys
import logging
import copy
import socket
from . import game_map
class GameUnix:
"""
:ivar map: Current map representation
:ivar initial_map: The initial version of the map before game starts
"""
def _send_string(self, s):
"""
Send data to the game. Call :function:`done_sending` once finished.
:param str s: String to send
:return: nothing
"""
self.sfile.write(s)
def _done_sending(self):
"""
Finish sending commands to the game.
:return: nothing
"""
self.sfile.write('\n')
self.sfile.flush()
def _get_string(self):
"""
Read input from the game.
:return: The input read from the Halite engine
:rtype: str
"""
result = self.sfile.readline().rstrip('\n')
return result
def send_command_queue(self, command_queue):
"""
Issue the given list of commands.
:param list[str] command_queue: List of commands to send the Halite engine
:return: nothing
"""
for command in command_queue:
self._send_string(command)
self._done_sending()
@staticmethod
def _set_up_logging(tag, name):
"""
Set up and truncate the log
:param tag: The user tag (used for naming the log)
:param name: The bot name (used for naming the log)
:return: nothing
"""
log_file = "{}_{}.log".format(tag, name)
logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')
logging.info("Initialized bot {}".format(name))
def __init__(self, name, socket_path="/dev/shm/bot.sock"):
"""
Initialize the bot with the given name.
:param name: The name of the bot.
"""
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
connected = False
while not connected:
try:
self.s.connect(socket_path)
connected = True
except Exception:
pass # Do nothing, just try again
self.sfile = self.s.makefile('rw')
self._name = name
self._send_name = False
tag = int(self._get_string())
GameUnix._set_up_logging(tag, name)
width, height = [int(x) for x in self._get_string().strip().split()]
self.map = game_map.Map(tag, width, height)
self.update_map()
self.initial_map = copy.deepcopy(self.map)
self._send_name = True
self.done = False
def update_map(self):
"""
Parse the map given by the engine.
:return: new parsed map
:rtype: game_map.Map
"""
if self._send_name:
self._send_string(self._name)
self._done_sending()
self._send_name = False
logging.info("---NEW TURN---")
recv = self._get_string()
if recv == "":
self.close()
self.done = True
return self.map # last step map
self.map._parse(recv)
return self.map
def close(self):
self.sfile.close()
self.s.close()
class GameStdIO:
"""
:ivar map: Current map representation
:ivar initial_map: The initial version of the map before game starts
"""
def _send_string(self, s):
"""
Send data to the game. Call :function:`done_sending` once finished.
:param str s: String to send
:return: nothing
"""
sys.stdout.write(s)
def _done_sending(self):
"""
Finish sending commands to the game.
:return: nothing
"""
sys.stdout.write('\n')
sys.stdout.flush()
def _get_string(self):
"""
Read input from the game.
:return: The input read from the Halite engine
:rtype: str
"""
result = sys.stdin.readline().rstrip('\n')
return result
def send_command_queue(self, command_queue):
"""
Issue the given list of commands.
:param list[str] command_queue: List of commands to send the Halite engine
:return: nothing
"""
for command in command_queue:
self._send_string(command)
self._done_sending()
@staticmethod
def _set_up_logging(tag, name):
"""
Set up and truncate the log
:param tag: The user tag (used for naming the log)
:param name: The bot name (used for naming the log)
:return: nothing
"""
log_file = "{}_{}.log".format(tag, name)
logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')
logging.info("Initialized bot {}".format(name))
def __init__(self, name):
"""
Initialize the bot with the given name.
:param name: The name of the bot.
"""
self._name = name
self._send_name = False
tag = int(self._get_string())
GameStdIO._set_up_logging(tag, name)
width, height = [int(x) for x in self._get_string().strip().split()]
self.map = game_map.Map(tag, width, height)
self.update_map()
self.initial_map = copy.deepcopy(self.map)
self._send_name = True
self.done = False
def update_map(self):
"""
Parse the map given by the engine.
:return: new parsed map
:rtype: game_map.Map
"""
if self._send_name:
self._send_string(self._name)
self._done_sending()
self._send_name = False
logging.info("---NEW TURN---")
recv = self._get_string()
if recv == "":
self.close()
self.done = True
return self.map # last step map
self.map._parse(recv)
return self.map
def close(self):
pass
|
[
"import sys\nimport logging\nimport copy\nimport socket\n\nfrom . import game_map\n\nclass GameUnix:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n self.sfile.write('\\n')\n self.sfile.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = \"{}_{}.log\".format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')\n logging.info(\"Initialized bot {}\".format(name))\n\n def __init__(self, name, socket_path=\"/dev/shm/bot.sock\"):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n connected = False\n while not connected:\n try:\n self.s.connect(socket_path)\n connected = True\n except Exception:\n pass # Do nothing, just try again\n self.sfile = self.s.makefile('rw')\n\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameUnix._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info(\"---NEW TURN---\")\n recv = self._get_string()\n\n if recv == \"\":\n self.close()\n self.done = True\n return self.map # last step map\n\n self.map._parse(recv)\n return self.map\n \n def close(self):\n self.sfile.close()\n self.s.close()\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = \"{}_{}.log\".format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG, filemode='w')\n logging.info(\"Initialized bot {}\".format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info(\"---NEW TURN---\")\n recv = self._get_string()\n\n if recv == \"\":\n self.close()\n self.done = True\n return self.map # last step map\n\n self.map._parse(recv)\n return self.map\n \n def close(self):\n pass",
"import sys\nimport logging\nimport copy\nimport socket\nfrom . import game_map\n\n\nclass GameUnix:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n self.sfile.write('\\n')\n self.sfile.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name, socket_path='/dev/shm/bot.sock'):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n connected = False\n while not connected:\n try:\n self.s.connect(socket_path)\n connected = True\n except Exception:\n pass\n self.sfile = self.s.makefile('rw')\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameUnix._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n self.sfile.close()\n self.s.close()\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n\n\nclass GameUnix:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n self.sfile.write('\\n')\n self.sfile.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name, socket_path='/dev/shm/bot.sock'):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n connected = False\n while not connected:\n try:\n self.s.connect(socket_path)\n connected = True\n except Exception:\n pass\n self.sfile = self.s.makefile('rw')\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameUnix._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n self.sfile.close()\n self.s.close()\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n\n\nclass GameUnix:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n self.sfile.write('\\n')\n self.sfile.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name, socket_path='/dev/shm/bot.sock'):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n connected = False\n while not connected:\n try:\n self.s.connect(socket_path)\n connected = True\n except Exception:\n pass\n self.sfile = self.s.makefile('rw')\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameUnix._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n self.sfile.close()\n self.s.close()\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n\n\nclass GameUnix:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n <function token>\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name, socket_path='/dev/shm/bot.sock'):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n connected = False\n while not connected:\n try:\n self.s.connect(socket_path)\n connected = True\n except Exception:\n pass\n self.sfile = self.s.makefile('rw')\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameUnix._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n self.sfile.close()\n self.s.close()\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n\n\nclass GameUnix:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n <function token>\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name, socket_path='/dev/shm/bot.sock'):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n connected = False\n while not connected:\n try:\n self.s.connect(socket_path)\n connected = True\n except Exception:\n pass\n self.sfile = self.s.makefile('rw')\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameUnix._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n <function token>\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n\n\nclass GameUnix:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n <function token>\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n <function token>\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n <function token>\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n\n\nclass GameUnix:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n <function token>\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n <function token>\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n <function token>\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n <function token>\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n\n\nclass GameUnix:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n <function token>\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n <function token>\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n <function token>\n <function token>\n <function token>\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n\n\nclass GameUnix:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n <function token>\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = self.sfile.readline().rstrip('\\n')\n return result\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n\n\nclass GameUnix:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n self.sfile.write(s)\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n\n\nclass GameUnix:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n<class token>\n\n\nclass GameStdIO:\n \"\"\"\n :ivar map: Current map representation\n :ivar initial_map: The initial version of the map before game starts\n \"\"\"\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n<class token>\n\n\nclass GameStdIO:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n\n def __init__(self, name):\n \"\"\"\n Initialize the bot with the given name.\n\n :param name: The name of the bot.\n \"\"\"\n self._name = name\n self._send_name = False\n tag = int(self._get_string())\n GameStdIO._set_up_logging(tag, name)\n width, height = [int(x) for x in self._get_string().strip().split()]\n self.map = game_map.Map(tag, width, height)\n self.update_map()\n self.initial_map = copy.deepcopy(self.map)\n self._send_name = True\n self.done = False\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n<class token>\n\n\nclass GameStdIO:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n <function token>\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n\n def close(self):\n pass\n",
"<import token>\n<class token>\n\n\nclass GameStdIO:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n def _get_string(self):\n \"\"\"\n Read input from the game.\n\n :return: The input read from the Halite engine\n :rtype: str\n \"\"\"\n result = sys.stdin.readline().rstrip('\\n')\n return result\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n <function token>\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n <function token>\n",
"<import token>\n<class token>\n\n\nclass GameStdIO:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n <function token>\n\n def send_command_queue(self, command_queue):\n \"\"\"\n Issue the given list of commands.\n\n :param list[str] command_queue: List of commands to send the Halite engine\n :return: nothing\n \"\"\"\n for command in command_queue:\n self._send_string(command)\n self._done_sending()\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n <function token>\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n <function token>\n",
"<import token>\n<class token>\n\n\nclass GameStdIO:\n <docstring token>\n\n def _send_string(self, s):\n \"\"\"\n Send data to the game. Call :function:`done_sending` once finished.\n\n :param str s: String to send\n :return: nothing\n \"\"\"\n sys.stdout.write(s)\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n <function token>\n <function token>\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n <function token>\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n <function token>\n",
"<import token>\n<class token>\n\n\nclass GameStdIO:\n <docstring token>\n <function token>\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n <function token>\n <function token>\n\n @staticmethod\n def _set_up_logging(tag, name):\n \"\"\"\n Set up and truncate the log\n\n :param tag: The user tag (used for naming the log)\n :param name: The bot name (used for naming the log)\n :return: nothing\n \"\"\"\n log_file = '{}_{}.log'.format(tag, name)\n logging.basicConfig(filename=log_file, level=logging.DEBUG,\n filemode='w')\n logging.info('Initialized bot {}'.format(name))\n <function token>\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n <function token>\n",
"<import token>\n<class token>\n\n\nclass GameStdIO:\n <docstring token>\n <function token>\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n <function token>\n <function token>\n <function token>\n <function token>\n\n def update_map(self):\n \"\"\"\n Parse the map given by the engine.\n\n :return: new parsed map\n :rtype: game_map.Map\n \"\"\"\n if self._send_name:\n self._send_string(self._name)\n self._done_sending()\n self._send_name = False\n logging.info('---NEW TURN---')\n recv = self._get_string()\n if recv == '':\n self.close()\n self.done = True\n return self.map\n self.map._parse(recv)\n return self.map\n <function token>\n",
"<import token>\n<class token>\n\n\nclass GameStdIO:\n <docstring token>\n <function token>\n\n def _done_sending(self):\n \"\"\"\n Finish sending commands to the game.\n\n :return: nothing\n \"\"\"\n sys.stdout.write('\\n')\n sys.stdout.flush()\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n\n\nclass GameStdIO:\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n<class token>\n"
] | false |
913 |
891588327046e26acb9a691fa8bb9a99420712d6
|
from django.conf.urls import url, include
from django.contrib import admin
from rest_framework_swagger.views import get_swagger_view
schema_view = get_swagger_view(title='Pastebin API')
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^doc_u/', schema_view),
url(r'^', include('o.urls', )),
url(r'^api/', include('restapi.urls', namespace='res')),
]
|
[
"from django.conf.urls import url, include\nfrom django.contrib import admin\n\nfrom rest_framework_swagger.views import get_swagger_view\nschema_view = get_swagger_view(title='Pastebin API')\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^doc_u/', schema_view),\n url(r'^', include('o.urls', )),\n url(r'^api/', include('restapi.urls', namespace='res')),\n\n]\n",
"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom rest_framework_swagger.views import get_swagger_view\nschema_view = get_swagger_view(title='Pastebin API')\nurlpatterns = [url('^admin/', admin.site.urls), url('^doc_u/', schema_view),\n url('^', include('o.urls')), url('^api/', include('restapi.urls',\n namespace='res'))]\n",
"<import token>\nschema_view = get_swagger_view(title='Pastebin API')\nurlpatterns = [url('^admin/', admin.site.urls), url('^doc_u/', schema_view),\n url('^', include('o.urls')), url('^api/', include('restapi.urls',\n namespace='res'))]\n",
"<import token>\n<assignment token>\n"
] | false |
914 |
a6f3c51d4115a6e0d6f01aa75bf5e6e367840d43
|
from typing import (Any, Callable, Dict, List, Optional, Set, Tuple, Type,
Union, overload)
from pccm.stubs import EnumClassValue, EnumValue
from cumm.tensorview import Tensor
class ConvMainUnitTest:
@staticmethod
def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor, padding: List[int], stride: List[int], dilation: List[int], ndim: int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int, o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int], num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[int], i_interleave: int = 1, w_interleave: int = 1, o_interleave: int = 1, alpha: float = 1, beta: float = 0, split_k_slices: int = 1, workspace: Tensor = Tensor(), mask_sparse: bool = False, increment_k_first: bool = False, mask: Tensor = Tensor(), mask_argsort: Tensor = Tensor(), indices: Tensor = Tensor(), mask_output: Tensor = Tensor()) -> None:
"""
Args:
input:
weight:
output:
padding:
stride:
dilation:
ndim:
iter_algo_:
op_type_:
i_ltype_:
w_ltype_:
o_ltype_:
ts:
wts:
num_stage:
dacc:
dcomp:
algo:
tensorop:
i_interleave:
w_interleave:
o_interleave:
alpha:
beta:
split_k_slices:
workspace:
mask_sparse:
increment_k_first:
mask:
mask_argsort:
indices:
mask_output:
"""
...
|
[
"from typing import (Any, Callable, Dict, List, Optional, Set, Tuple, Type,\n Union, overload)\n\nfrom pccm.stubs import EnumClassValue, EnumValue\n\nfrom cumm.tensorview import Tensor\n\nclass ConvMainUnitTest:\n @staticmethod\n def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor, padding: List[int], stride: List[int], dilation: List[int], ndim: int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int, o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int], num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[int], i_interleave: int = 1, w_interleave: int = 1, o_interleave: int = 1, alpha: float = 1, beta: float = 0, split_k_slices: int = 1, workspace: Tensor = Tensor(), mask_sparse: bool = False, increment_k_first: bool = False, mask: Tensor = Tensor(), mask_argsort: Tensor = Tensor(), indices: Tensor = Tensor(), mask_output: Tensor = Tensor()) -> None: \n \"\"\"\n Args:\n input: \n weight: \n output: \n padding: \n stride: \n dilation: \n ndim: \n iter_algo_: \n op_type_: \n i_ltype_: \n w_ltype_: \n o_ltype_: \n ts: \n wts: \n num_stage: \n dacc: \n dcomp: \n algo: \n tensorop: \n i_interleave: \n w_interleave: \n o_interleave: \n alpha: \n beta: \n split_k_slices: \n workspace: \n mask_sparse: \n increment_k_first: \n mask: \n mask_argsort: \n indices: \n mask_output: \n \"\"\"\n ...",
"from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union, overload\nfrom pccm.stubs import EnumClassValue, EnumValue\nfrom cumm.tensorview import Tensor\n\n\nclass ConvMainUnitTest:\n\n @staticmethod\n def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor,\n padding: List[int], stride: List[int], dilation: List[int], ndim:\n int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int,\n o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int],\n num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[\n int], i_interleave: int=1, w_interleave: int=1, o_interleave: int=1,\n alpha: float=1, beta: float=0, split_k_slices: int=1, workspace:\n Tensor=Tensor(), mask_sparse: bool=False, increment_k_first: bool=\n False, mask: Tensor=Tensor(), mask_argsort: Tensor=Tensor(),\n indices: Tensor=Tensor(), mask_output: Tensor=Tensor()) ->None:\n \"\"\"\n Args:\n input: \n weight: \n output: \n padding: \n stride: \n dilation: \n ndim: \n iter_algo_: \n op_type_: \n i_ltype_: \n w_ltype_: \n o_ltype_: \n ts: \n wts: \n num_stage: \n dacc: \n dcomp: \n algo: \n tensorop: \n i_interleave: \n w_interleave: \n o_interleave: \n alpha: \n beta: \n split_k_slices: \n workspace: \n mask_sparse: \n increment_k_first: \n mask: \n mask_argsort: \n indices: \n mask_output: \n \"\"\"\n ...\n",
"<import token>\n\n\nclass ConvMainUnitTest:\n\n @staticmethod\n def implicit_gemm(input: Tensor, weight: Tensor, output: Tensor,\n padding: List[int], stride: List[int], dilation: List[int], ndim:\n int, iter_algo_: int, op_type_: int, i_ltype_: int, w_ltype_: int,\n o_ltype_: int, ts: Tuple[int, int, int], wts: Tuple[int, int, int],\n num_stage: int, dacc: int, dcomp: int, algo: str, tensorop: List[\n int], i_interleave: int=1, w_interleave: int=1, o_interleave: int=1,\n alpha: float=1, beta: float=0, split_k_slices: int=1, workspace:\n Tensor=Tensor(), mask_sparse: bool=False, increment_k_first: bool=\n False, mask: Tensor=Tensor(), mask_argsort: Tensor=Tensor(),\n indices: Tensor=Tensor(), mask_output: Tensor=Tensor()) ->None:\n \"\"\"\n Args:\n input: \n weight: \n output: \n padding: \n stride: \n dilation: \n ndim: \n iter_algo_: \n op_type_: \n i_ltype_: \n w_ltype_: \n o_ltype_: \n ts: \n wts: \n num_stage: \n dacc: \n dcomp: \n algo: \n tensorop: \n i_interleave: \n w_interleave: \n o_interleave: \n alpha: \n beta: \n split_k_slices: \n workspace: \n mask_sparse: \n increment_k_first: \n mask: \n mask_argsort: \n indices: \n mask_output: \n \"\"\"\n ...\n",
"<import token>\n\n\nclass ConvMainUnitTest:\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
915 |
0dd361239d85ed485594ac0f5e7e2168f0684544
|
import pytest
import gadget
@pytest.mark.parametrize('invalid_line', [
'beginningGDG::',
'beginning::end',
'nothing',
])
def test_parse_invalid_line(invalid_line):
assert gadget.parse_log_line(invalid_line) is None
|
[
"import pytest\nimport gadget\n\[email protected]('invalid_line', [\n 'beginningGDG::',\n 'beginning::end',\n 'nothing',\n])\ndef test_parse_invalid_line(invalid_line):\n assert gadget.parse_log_line(invalid_line) is None\n",
"import pytest\nimport gadget\n\n\[email protected]('invalid_line', ['beginningGDG::',\n 'beginning::end', 'nothing'])\ndef test_parse_invalid_line(invalid_line):\n assert gadget.parse_log_line(invalid_line) is None\n",
"<import token>\n\n\[email protected]('invalid_line', ['beginningGDG::',\n 'beginning::end', 'nothing'])\ndef test_parse_invalid_line(invalid_line):\n assert gadget.parse_log_line(invalid_line) is None\n",
"<import token>\n<function token>\n"
] | false |
916 |
7de19a85a6a05bd2972b11571d5f05219c6beb1a
|
import os
import shutil
# root_path = '../from_1691'
root_path = 'C:/Users/koyou/Desktop/test'
# 실수할 수도 있으므로 dry_run 을 설정해서 로그만 찍을 것인지
# 실제 작동도 진행할 것인지 결정한다.
# dry_run = True
dry_run = False
def move_directory(input_directory_path, output_directory_path):
print("moving %s to %s" % (input_directory_path, output_directory_path))
if not dry_run:
shutil.move(input_directory_path, output_directory_path)
#
# main
#
print("Root dir is %s" % root_path)
for level1 in os.listdir(root_path): # level1 == test1
level1_path = os.path.join(root_path, level1)
if os.path.isdir(level1_path):
# 디렉토리 이름을 출력해줘야 진행상황 알 수 있음
print("> %s" % level1)
for level2 in os.listdir(level1_path): # level2 == test1-1
level2_path = os.path.join(level1_path, level2)
if os.path.isdir(level2_path):
# level2 이름 출력
print(">> %s" % level2)
move_directory(level2_path, root_path)
# 2. deleting dir
print("Deleting %s" % level1_path)
if not dry_run:
shutil.rmtree(level1_path)
|
[
"import os\nimport shutil\n\n# root_path = '../from_1691'\nroot_path = 'C:/Users/koyou/Desktop/test'\n\n# 실수할 수도 있으므로 dry_run 을 설정해서 로그만 찍을 것인지\n# 실제 작동도 진행할 것인지 결정한다.\n# dry_run = True\ndry_run = False\n\ndef move_directory(input_directory_path, output_directory_path):\n print(\"moving %s to %s\" % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\n#\n# main\n#\nprint(\"Root dir is %s\" % root_path)\n\nfor level1 in os.listdir(root_path): # level1 == test1\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n # 디렉토리 이름을 출력해줘야 진행상황 알 수 있음\n print(\"> %s\" % level1)\n\n for level2 in os.listdir(level1_path): # level2 == test1-1\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n # level2 이름 출력\n print(\">> %s\" % level2)\n\n move_directory(level2_path, root_path)\n\n # 2. deleting dir\n print(\"Deleting %s\" % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n",
"import os\nimport shutil\nroot_path = 'C:/Users/koyou/Desktop/test'\ndry_run = False\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\nprint('Root dir is %s' % root_path)\nfor level1 in os.listdir(root_path):\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n print('> %s' % level1)\n for level2 in os.listdir(level1_path):\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n print('>> %s' % level2)\n move_directory(level2_path, root_path)\n print('Deleting %s' % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n",
"<import token>\nroot_path = 'C:/Users/koyou/Desktop/test'\ndry_run = False\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\nprint('Root dir is %s' % root_path)\nfor level1 in os.listdir(root_path):\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n print('> %s' % level1)\n for level2 in os.listdir(level1_path):\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n print('>> %s' % level2)\n move_directory(level2_path, root_path)\n print('Deleting %s' % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n",
"<import token>\n<assignment token>\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\nprint('Root dir is %s' % root_path)\nfor level1 in os.listdir(root_path):\n level1_path = os.path.join(root_path, level1)\n if os.path.isdir(level1_path):\n print('> %s' % level1)\n for level2 in os.listdir(level1_path):\n level2_path = os.path.join(level1_path, level2)\n if os.path.isdir(level2_path):\n print('>> %s' % level2)\n move_directory(level2_path, root_path)\n print('Deleting %s' % level1_path)\n if not dry_run:\n shutil.rmtree(level1_path)\n",
"<import token>\n<assignment token>\n\n\ndef move_directory(input_directory_path, output_directory_path):\n print('moving %s to %s' % (input_directory_path, output_directory_path))\n if not dry_run:\n shutil.move(input_directory_path, output_directory_path)\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<function token>\n<code token>\n"
] | false |
917 |
c85d7e799a652e82bfaf58e1e8bfa9c4606a8ecb
|
import ast
import datetime
from pathlib import Path
from typing import Any, Dict
import yaml
from .lemmatizer import LemmatizerPymorphy2, Preprocessor
def get_config(path_to_config: str) -> Dict[str, Any]:
"""Get config.
Args:
path_to_config (str): Path to config.
Returns:
Dict[str, Any]: Config.
"""
with open(path_to_config, mode="r") as fp:
config = yaml.safe_load(fp)
# backward compatibility
if "experiment_name" not in config:
config["experiment_name"] = "model"
config["path_to_save_folder"] = (
Path(config["path_to_save_folder"])
/ f"{config['experiment_name']}_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}"
)
config["path_to_config"] = path_to_config
config["path_to_save_model"] = config["path_to_save_folder"] / "model.joblib"
config["path_to_save_logfile"] = config["path_to_save_folder"] / "logging.txt"
config["path_to_save_target_names_mapping"] = (
config["path_to_save_folder"] / "target_names.json"
)
# tf-idf
if ("tf-idf" not in config) or (config["tf-idf"] is None):
config["tf-idf"] = {}
if "ngram_range" in config["tf-idf"]:
config["tf-idf"]["ngram_range"] = ast.literal_eval(
config["tf-idf"]["ngram_range"]
)
if "preprocessing" in config: # backward compatibility
lemmatization = config["preprocessing"]["lemmatization"]
if lemmatization:
if lemmatization == "pymorphy2":
lemmatizer = LemmatizerPymorphy2()
preprocessor = Preprocessor(lemmatizer)
config["tf-idf"]["preprocessor"] = preprocessor
else:
raise KeyError(
f"Unknown lemmatizer {lemmatization}. Available lemmatizers: none, pymorphy2."
)
# logreg
if ("logreg" not in config) or (config["logreg"] is None):
config["logreg"] = {}
return config
|
[
"import ast\nimport datetime\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nimport yaml\n\nfrom .lemmatizer import LemmatizerPymorphy2, Preprocessor\n\n\ndef get_config(path_to_config: str) -> Dict[str, Any]:\n \"\"\"Get config.\n\n Args:\n path_to_config (str): Path to config.\n\n Returns:\n Dict[str, Any]: Config.\n \"\"\"\n\n with open(path_to_config, mode=\"r\") as fp:\n config = yaml.safe_load(fp)\n\n # backward compatibility\n if \"experiment_name\" not in config:\n config[\"experiment_name\"] = \"model\"\n\n config[\"path_to_save_folder\"] = (\n Path(config[\"path_to_save_folder\"])\n / f\"{config['experiment_name']}_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}\"\n )\n\n config[\"path_to_config\"] = path_to_config\n config[\"path_to_save_model\"] = config[\"path_to_save_folder\"] / \"model.joblib\"\n config[\"path_to_save_logfile\"] = config[\"path_to_save_folder\"] / \"logging.txt\"\n config[\"path_to_save_target_names_mapping\"] = (\n config[\"path_to_save_folder\"] / \"target_names.json\"\n )\n\n # tf-idf\n if (\"tf-idf\" not in config) or (config[\"tf-idf\"] is None):\n config[\"tf-idf\"] = {}\n if \"ngram_range\" in config[\"tf-idf\"]:\n config[\"tf-idf\"][\"ngram_range\"] = ast.literal_eval(\n config[\"tf-idf\"][\"ngram_range\"]\n )\n\n if \"preprocessing\" in config: # backward compatibility\n lemmatization = config[\"preprocessing\"][\"lemmatization\"]\n\n if lemmatization:\n if lemmatization == \"pymorphy2\":\n lemmatizer = LemmatizerPymorphy2()\n preprocessor = Preprocessor(lemmatizer)\n\n config[\"tf-idf\"][\"preprocessor\"] = preprocessor\n\n else:\n raise KeyError(\n f\"Unknown lemmatizer {lemmatization}. Available lemmatizers: none, pymorphy2.\"\n )\n\n # logreg\n if (\"logreg\" not in config) or (config[\"logreg\"] is None):\n config[\"logreg\"] = {}\n\n return config\n",
"import ast\nimport datetime\nfrom pathlib import Path\nfrom typing import Any, Dict\nimport yaml\nfrom .lemmatizer import LemmatizerPymorphy2, Preprocessor\n\n\ndef get_config(path_to_config: str) ->Dict[str, Any]:\n \"\"\"Get config.\n\n Args:\n path_to_config (str): Path to config.\n\n Returns:\n Dict[str, Any]: Config.\n \"\"\"\n with open(path_to_config, mode='r') as fp:\n config = yaml.safe_load(fp)\n if 'experiment_name' not in config:\n config['experiment_name'] = 'model'\n config['path_to_save_folder'] = (Path(config['path_to_save_folder']) /\n f\"{config['experiment_name']}_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}\"\n )\n config['path_to_config'] = path_to_config\n config['path_to_save_model'] = config['path_to_save_folder'\n ] / 'model.joblib'\n config['path_to_save_logfile'] = config['path_to_save_folder'\n ] / 'logging.txt'\n config['path_to_save_target_names_mapping'] = config['path_to_save_folder'\n ] / 'target_names.json'\n if 'tf-idf' not in config or config['tf-idf'] is None:\n config['tf-idf'] = {}\n if 'ngram_range' in config['tf-idf']:\n config['tf-idf']['ngram_range'] = ast.literal_eval(config['tf-idf']\n ['ngram_range'])\n if 'preprocessing' in config:\n lemmatization = config['preprocessing']['lemmatization']\n if lemmatization:\n if lemmatization == 'pymorphy2':\n lemmatizer = LemmatizerPymorphy2()\n preprocessor = Preprocessor(lemmatizer)\n config['tf-idf']['preprocessor'] = preprocessor\n else:\n raise KeyError(\n f'Unknown lemmatizer {lemmatization}. Available lemmatizers: none, pymorphy2.'\n )\n if 'logreg' not in config or config['logreg'] is None:\n config['logreg'] = {}\n return config\n",
"<import token>\n\n\ndef get_config(path_to_config: str) ->Dict[str, Any]:\n \"\"\"Get config.\n\n Args:\n path_to_config (str): Path to config.\n\n Returns:\n Dict[str, Any]: Config.\n \"\"\"\n with open(path_to_config, mode='r') as fp:\n config = yaml.safe_load(fp)\n if 'experiment_name' not in config:\n config['experiment_name'] = 'model'\n config['path_to_save_folder'] = (Path(config['path_to_save_folder']) /\n f\"{config['experiment_name']}_{datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}\"\n )\n config['path_to_config'] = path_to_config\n config['path_to_save_model'] = config['path_to_save_folder'\n ] / 'model.joblib'\n config['path_to_save_logfile'] = config['path_to_save_folder'\n ] / 'logging.txt'\n config['path_to_save_target_names_mapping'] = config['path_to_save_folder'\n ] / 'target_names.json'\n if 'tf-idf' not in config or config['tf-idf'] is None:\n config['tf-idf'] = {}\n if 'ngram_range' in config['tf-idf']:\n config['tf-idf']['ngram_range'] = ast.literal_eval(config['tf-idf']\n ['ngram_range'])\n if 'preprocessing' in config:\n lemmatization = config['preprocessing']['lemmatization']\n if lemmatization:\n if lemmatization == 'pymorphy2':\n lemmatizer = LemmatizerPymorphy2()\n preprocessor = Preprocessor(lemmatizer)\n config['tf-idf']['preprocessor'] = preprocessor\n else:\n raise KeyError(\n f'Unknown lemmatizer {lemmatization}. Available lemmatizers: none, pymorphy2.'\n )\n if 'logreg' not in config or config['logreg'] is None:\n config['logreg'] = {}\n return config\n",
"<import token>\n<function token>\n"
] | false |
918 |
73ff1444b5ab1469b616fe449ee6ab93acbbf85a
|
import time
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtSql import *
from PyQt5.QtWidgets import *
from qgis.core import QgsFeature, QgsGeometry, QgsProject
from shapely import wkb
print(__name__)
# Function definition
def TicTocGenerator():
# Generator that returns time differences
ti = 0 # initial time
tf = time.time() # final time
while True:
ti = tf
tf = time.time()
yield tf - ti # returns the time difference
def toc(tempBool=True):
# Prints the time difference yielded by generator instance TicToc
tempTimeInterval = next(TicToc)
if tempBool:
print("Elapsed time: %f seconds.\n" % tempTimeInterval)
def tic():
# Records a time in TicToc, marks the beginning of a time interval
toc(False)
def removeRoutesLayers():
layers = QgsProject.instance().mapLayers()
for layer_id, layer in layers.items():
if str(layer.name()) != "model_graph" and str(layer.name()) != "emme_zones" and str(layer.name()) != "labels" \
and str(layer.name()) != "OpenStreetMap" and str(layer.name()) != "all_results" and str(
layer.name()) != "Centroider" and str(layer.name()) != "dijk_result_table" and str(
layer.name()) != "ata_lid" and str(layer.name()) != "result_table":
QgsProject.instance().removeMapLayer(layer.id())
# Prints a route set based on whats in result_table.
def printRoutes():
i = 1
# WHERE rejoin_link=0 insert into to print
query = db.exec_("SELECT MAX(did) FROM result_table")
query.next()
print(query.value(0))
nr_routes = query.value(0)
lid_list_q = db.exec_("SELECT result_table.lid, foo.count FROM result_table INNER JOIN (SELECT count(*), lid \
FROM result_table WHERE not did=(-1) group by lid) as foo ON(result_table.lid = foo.lid) WHERE not did=(-1) \
group by result_table.lid, foo.count")
lid_list = []
lid_count = []
while lid_list_q.next():
lid_list.append(lid_list_q.value(0))
lid_count.append(lid_list_q.value(1))
# Källa https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
color_list = [QColor.fromRgb(128, 0, 0), QColor.fromRgb(170, 10, 40), QColor.fromRgb(128, 128, 0),
QColor.fromRgb(0, 128, 128), QColor.fromRgb(0, 0, 128), QColor.fromRgb(0, 0, 0),
QColor.fromRgb(230, 25, 75), QColor.fromRgb(245, 130, 48), QColor.fromRgb(255, 255, 25),
QColor.fromRgb(210, 245, 60), QColor.fromRgb(60, 180, 75), QColor.fromRgb(70, 240, 240),
QColor.fromRgb(0, 130, 200), QColor.fromRgb(145, 30, 180), QColor.fromRgb(240, 50, 230),
QColor.fromRgb(128, 128, 128), QColor.fromRgb(250, 190, 190), QColor.fromRgb(255, 215, 180),
QColor.fromRgb(255, 250, 200), QColor.fromRgb(170, 255, 195)]
bad_color = ['Maroon', 'Magenta', 'Olive', 'Orange', 'Navy', 'Black', 'Red', 'Teal', 'Blue', 'Lime', 'Cyan', 'Green'
, 'Brown', 'Purple', 'Yellow', 'Grey', 'Pink', 'Apricot', 'Beige', 'Mint', 'Lavender']
lid_c = []
# while lid_query.next():
while i <= nr_routes:
dummy_q = db.exec_(
"SELECT did, lid, ST_astext(geom) as geom FROM result_table WHERE not did=(-1) and result_table.did =" + str(
i))
layert = QgsVectorLayer("MultiLineString?crs=epsg:3006", " route " + str(i), "memory")
QgsProject.instance().addMapLayer(layert)
featurelist = []
while dummy_q.next():
lid = dummy_q.value(1)
seg = QgsFeature()
j = 0
while j < len(lid_list):
if lid == lid_list[j]:
lid_nr = j
j += 1
# print("lid nr is:"+str(lid_nr)+ " lid is :"+str(lid_list[lid_nr])+" lid count is:"+str(lid_count[lid_nr]))
nr_included = 0
dummy = 0
j = 0
while j < len(lid_c):
if lid == lid_c[j]:
nr_included += 1
j += 1
# if dummy < nr_included:
# dummy = nr_included
lid_c.append(lid)
if lid_count[lid_nr] == 1:
offset = 0
else:
if lid_count[lid_nr] % 2 == 0:
# Even
off = (-lid_count[lid_nr] / 2) + nr_included
if off == 0:
offset = ((-lid_count[lid_nr] / 2) + nr_included + 1) * 200
else:
<<<<<<< HEAD
offset = ((-lid_count[lid_nr]/2) + nr_included)*200
=======
offset = ((-lid_count[lid_nr] / 2) + nr_included) * 80
>>>>>>> b69948db6665ed5f30d2925c9356500bdac0da03
else:
# Odd
print("odd value is :", (-lid_count[lid_nr] / 2) + nr_included)
print("odd value rounded is :", int((-lid_count[lid_nr] / 2) + nr_included))
<<<<<<< HEAD
offset = int(((-lid_count[lid_nr]/2) + nr_included)*200)
print("odd ",offset)
=======
offset = int(((-lid_count[lid_nr] / 2) + nr_included) * 80)
print("odd ", offset)
>>>>>>> b69948db6665ed5f30d2925c9356500bdac0da03
seg.setGeometry(QgsGeometry.fromWkt(dummy_q.value(2)).offsetCurve(offset, 1, 1, 2.0))
featurelist.append(seg)
symbol = QgsLineSymbol.createSimple({'color': bad_color[i], 'width': '0.4'})
renderers = layert.renderer()
renderers.setSymbol(symbol.clone())
qgis.utils.iface.layerTreeView().refreshLayerSymbology(layert.id())
single_symbol_renderer = layert.renderer()
symbol = single_symbol_renderer.symbol()
symbol.setWidth(0.8)
layert.dataProvider().addFeatures(featurelist)
layert.triggerRepaint()
i += 1
print("route nr", i - 1)
print("nr included max ", dummy)
# Start node
start_q = db.exec_("SELECT lid, ST_astext(ST_PointN(the_geom,1)) AS start \
FROM (SELECT lid, (ST_Dump(geom)).geom As the_geom \
FROM result_table WHERE did=1 and path_seq=1) As foo")
start_q.next()
layer = QgsVectorLayer('Point?crs=epsg:3006', 'Start', 'memory')
# Set the provider to accept the data source
prov = layer.dataProvider()
# Add a new feature and assign the geometry
feat = QgsFeature()
feat.setGeometry(QgsGeometry.fromWkt(start_q.value(1)))
prov.addFeatures([feat])
# Update extent of the layer
layer.updateExtents()
# Add the layer to the Layers panel
QgsProject.instance().addMapLayer(layer)
single_symbol_renderer = layer.renderer()
symbol1 = single_symbol_renderer.symbol()
symbol1.setColor(QColor.fromRgb(0, 225, 0))
symbol1.setSize(3)
# more efficient than refreshing the whole canvas, which requires a redraw of ALL layers
layer.triggerRepaint()
# update legend for layer
qgis.utils.iface.layerTreeView().refreshLayerSymbology(layer.id())
# End Node
end_q = db.exec_("SELECT lid, ST_astext(ST_PointN(the_geom,-1)) AS start FROM (SELECT lid, (ST_Dump(geom)).geom As the_geom \
FROM result_table WHERE path_seq = (SELECT max(path_seq) FROM result_table WHERE did=1) and did=1) AS foo")
end_q.next()
layere = QgsVectorLayer('Point?crs=epsg:3006', 'End', 'memory')
# Set the provider to accept the data source
prov = layere.dataProvider()
# Add a new feature and assign the geometry
feat = QgsFeature()
feat.setGeometry(QgsGeometry.fromWkt(end_q.value(1)))
prov.addFeatures([feat])
# Update extent of the layer
layere.updateExtents()
# Add the layer to the Layers panel
QgsProject.instance().addMapLayer(layere)
single_symbol_renderer = layere.renderer()
symbol = single_symbol_renderer.symbol()
symbol.setColor(QColor.fromRgb(255, 0, 0))
symbol.setSize(3)
layere.triggerRepaint()
qgis.utils.iface.layerTreeView().refreshLayerSymbology(layere.id())
def printRoutesRejoin():
i = 1
# WHERE rejoin_link=0 insert into to print
query = db.exec_("SELECT MAX(did) FROM result_table")
query.next()
nr_routes = query.value(0)
# Källa https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
color_list = [QColor.fromRgb(128, 0, 0), QColor.fromRgb(170, 10, 40), QColor.fromRgb(128, 128, 0),
QColor.fromRgb(0, 128, 128), QColor.fromRgb(0, 0, 128), QColor.fromRgb(0, 0, 0),
QColor.fromRgb(230, 25, 75), QColor.fromRgb(245, 130, 48), QColor.fromRgb(255, 255, 25),
QColor.fromRgb(210, 245, 60), QColor.fromRgb(60, 180, 75), QColor.fromRgb(70, 240, 240),
QColor.fromRgb(0, 130, 200), QColor.fromRgb(145, 30, 180), QColor.fromRgb(240, 50, 230),
QColor.fromRgb(128, 128, 128), QColor.fromRgb(250, 190, 190), QColor.fromRgb(255, 215, 180),
QColor.fromRgb(255, 250, 200), QColor.fromRgb(170, 255, 195)]
bad_color = ['Maroon', 'Magenta', 'Olive', 'Orange', 'Navy', 'Black', 'Red', 'Teal', 'Blue', 'Lime', 'Cyan', 'Green'
, 'Brown', 'Purple', 'Yellow', 'Grey', 'Pink', 'Apricot', 'Beige', 'Mint', 'Lavender']
while i <= nr_routes:
# Routes without offset
sqlcall = "(select lid, did, geom from result_table where lid in (select lid from result_table group by lid having \
count(*) = 1) and did =" + str(i) + " and rejoin_link=0 group by lid, did, geom ORDER BY lid, did)"
uri.setDataSource("", sqlcall, "geom", "", "lid")
layert = QgsVectorLayer(uri.uri(), " route " + str(i), "postgres")
QgsProject.instance().addMapLayer(layert)
symbol = QgsLineSymbol.createSimple({'color': bad_color[i],
'width': '0.6',
'offset': '0'})
renderers = layert.renderer()
renderers.setSymbol(symbol.clone())
qgis.utils.iface.layerTreeView().refreshLayerSymbology(layert.id())
# single_symbol_renderer = layert.renderer()
# symbol = single_symbol_renderer.symbol()
# symbol.setWidth(0.8)
# Routes in need of offset
sqlcall = "(select lid, did, geom from result_table where lid in (select lid from result_table \
group by lid having count(*) > 1) and did=" + str(
i) + " and rejoin_link=0 group by lid, did, geom ORDER BY lid, did)"
uri.setDataSource("", sqlcall, "geom", "", "lid")
layert = QgsVectorLayer(uri.uri(), " route " + str(i), "postgres")
QgsProject.instance().addMapLayer(layert)
if i == 1:
offset = 0
else:
offset = i - i * 0.7
print("i is " + str(i) + " and offset is:" + str(offset))
symbol = QgsLineSymbol.createSimple({'color': bad_color[i],
'width': '0.4',
'offset': str(offset)})
renderers = layert.renderer()
renderers.setSymbol(symbol.clone())
qgis.utils.iface.layerTreeView().refreshLayerSymbology(layert.id())
# single_symbol_renderer = layert.renderer()
# symbol = single_symbol_renderer.symbol()
# symbol.setWidth(0.8)
#
# if i < len(color_list):
# symbol.setColor(color_list[i])
# layert.triggerRepaint()
# qgis.utils.iface.layerTreeView().refreshLayerSymbology(layert.id())
#
i = i + 1
# Start node
start_q = db.exec_("SELECT lid, ST_astext(ST_PointN(the_geom,1)) AS start \
FROM (SELECT lid, (ST_Dump(geom)).geom As the_geom \
FROM result_table WHERE did=1 and path_seq=1) As foo")
start_q.next()
layer = QgsVectorLayer('Point?crs=epsg:3006', 'Start', 'memory')
# Set the provider to accept the data source
prov = layer.dataProvider()
# Add a new feature and assign the geometry
feat = QgsFeature()
feat.setGeometry(QgsGeometry.fromWkt(start_q.value(1)))
prov.addFeatures([feat])
# Update extent of the layer
layer.updateExtents()
# Add the layer to the Layers panel
QgsProject.instance().addMapLayer(layer)
single_symbol_renderer = layer.renderer()
symbol = single_symbol_renderer.symbol()
symbol.setColor(QColor.fromRgb(0, 225, 0))
symbol.setSize(3)
# more efficient than refreshing the whole canvas, which requires a redraw of ALL layers
layer.triggerRepaint()
# update legend for layer
qgis.utils.iface.layerTreeView().refreshLayerSymbology(layer.id())
# End Node
end_q = db.exec_("SELECT lid, ST_astext(ST_PointN(the_geom,-1)) AS start FROM (SELECT lid, (ST_Dump(geom)).geom As the_geom \
FROM result_table WHERE path_seq = (SELECT max(path_seq) FROM result_table WHERE did=1) and did=1) AS foo")
end_q.next()
layere = QgsVectorLayer('Point?crs=epsg:3006', 'END', 'memory')
# Set the provider to accept the data source
prov = layere.dataProvider()
# Add a new feature and assign the geometry
feat = QgsFeature()
feat.setGeometry(QgsGeometry.fromWkt(end_q.value(1)))
prov.addFeatures([feat])
# Update extent of the layer
layere.updateExtents()
# Add the layer to the Layers panel
QgsProject.instance().addMapLayer(layere)
single_symbol_renderer = layere.renderer()
symbol = single_symbol_renderer.symbol()
symbol.setColor(QColor.fromRgb(255, 0, 0))
symbol.setSize(3)
layer.triggerRepaint()
qgis.utils.iface.layerTreeView().refreshLayerSymbology(layere.id())
# det jag behöver få från databasen start_list, end_list, lids
def print_selected_pairs():
# Removes layers not specified in removeRoutesLayers
removeRoutesLayers()
# Get list and removed lids
lids = []
temp_query1 = db.exec_("SELECT * FROM removed_lids")
while temp_query1.next():
lids.append(temp_query1.value(0))
temp_query2 = db.exec_("SELECT DISTINCT start_zone AS start_zones, end_zone AS end_zones FROM all_results")
start_list = []
end_list = []
while temp_query2.next():
start_list.append(temp_query2.value(0))
end_list.append(temp_query2.value(1))
# first it creates neccessary db-tables for visualization of the OD-pairs in star_list and end_list
# Create OD_lines table
db.exec_("DROP table if exists OD_lines")
db.exec_("SELECT ST_MakeLine(ST_Centroid(geom) ORDER BY id) AS geom into od_lines "
"FROM emme_zones where id = " + str(start_list[0]) + " OR id = " + str(end_list[0]) + "")
# Create emme_result table
db.exec_("DROP table if exists emme_results")
db.exec_("SELECT 0.0 as alt_route_cost,* INTO emme_results FROM emme_zones")
i = 0
while i < len(start_list):
if i > 0:
db.exec_("INSERT INTO OD_lines(geom) SELECT ST_MakeLine(ST_Centroid(geom) ORDER BY id) "
"AS geom FROM emme_zones where id = " + str(start_list[i]) + " OR id = " + str(end_list[i]) + "")
result_test = odEffect(start_list[i], end_list[i], lids)
print("Result of " + str(i) + " is: " + str(result_test))
db.exec_(
"UPDATE emme_results SET alt_route_cost = " + str(result_test) + " WHERE id = '" + str(start_list[i]) + "'"
" OR id = '" + str(
end_list[i]) + "';")
i += 1
db.exec_("ALTER TABLE OD_lines ADD COLUMN id SERIAL PRIMARY KEY;")
sqlcall = "(SELECT * FROM emme_results)"
uri.setDataSource("", sqlcall, "geom", "", "id")
layer = QgsVectorLayer(uri.uri(), "result_deterioration ", "postgres")
QgsProject.instance().addMapLayer(layer)
values = (
('Not affected', -3, -3, QColor.fromRgb(0, 0, 200)),
('No route', -2, -2, QColor.fromRgb(0, 225, 200)),
('No route that is not affected', -1, -1, QColor.fromRgb(255, 0, 0)),
('Not searched', 0, 0, QColor.fromRgb(255, 255, 255)),
('Alternative route: 1-10 % deterioration', 0, 1.1, QColor.fromRgb(102, 255, 102)),
('Alternative route: 10-100 % deterioration', 1.1, 1000, QColor.fromRgb(255, 255, 0)),
)
# create a category for each item in values
ranges = []
for label, lower, upper, color in values:
symbol = QgsSymbol.defaultSymbol(layer.geometryType())
symbol.setColor(QColor(color))
rng = QgsRendererRange(lower, upper, symbol, label)
ranges.append(rng)
## create the renderer and assign it to a layer
expression = 'alt_route_cost' # field name
layer.setRenderer(QgsGraduatedSymbolRenderer(expression, ranges))
# iface.mapCanvas().refresh()
# Print lines from od_lines
sqlcall = "(SELECT * FROM od_lines )"
uri.setDataSource("", sqlcall, "geom", "", "id")
layert = QgsVectorLayer(uri.uri(), " OD_pairs ", "postgres")
QgsProject.instance().addMapLayer(layert)
# Ska hämtas från databasen list,removed_lids
def allToAll():
# Removes layers not specified in removeRoutesLayers
removeRoutesLayers()
# Get list and removed lids
removed_lids = []
temp_query1 = db.exec_("SELECT * FROM removed_lids")
while temp_query1.next():
removed_lids.append(temp_query1.value(0))
temp_query2 = db.exec_("SELECT DISTINCT start_zone AS start_zones FROM all_results")
list = []
while temp_query2.next():
list.append(temp_query2.value(0))
removed_lid_string = "( lid = " + str(removed_lids[0])
i = 1
while i < len(removed_lids):
removed_lid_string += " or lid =" + str(removed_lids[i])
i += 1
removed_lid_string += ")"
# Queryn skapar tabell för alla länkar som går igenom removed_lid
db.exec_("DROP TABLE IF EXIST temp_test")
db.exec_(
" select * into temp_test from all_results f where exists(select 1 from all_results l where " + removed_lid_string + " and"
" (f.start_zone = l.start_zone and f.end_zone = l.end_zone and f.did = l.did))")
# Här vill jag skapa nytt lager som visar intressanta saker för varje zon
# Create emme_result table
db.exec_("DROP table if exists emme_results")
db.exec_(
"SELECT 0 as nr_non_affected, 0 as nr_no_routes, 0 as nr_all_routes_affected, 0.0 as mean_deterioration, 0 as nr_pairs,* INTO emme_results FROM emme_zones")
i = 0
while i < len(list):
result = analysis_multiple_zones(list[i], list, removed_lids)
db.exec_("UPDATE emme_results SET nr_non_affected = " + str(result[0]) + " , nr_no_routes = " +
str(result[1]) + " , nr_all_routes_affected = " + str(result[2]) + " , mean_deterioration = " +
str(result[3]) + " , nr_pairs = " + str(result[4]) + " WHERE id = " +
str(list[i]) + ";")
i += 1
############################ Create layer for mean deterioration
sqlcall = "(SELECT * FROM emme_results)"
uri.setDataSource("", sqlcall, "geom", "", "id")
layer = QgsVectorLayer(uri.uri(), "mean_deterioration ", "postgres")
QgsProject.instance().addMapLayer(layer)
values = (
('Not searched', 0, 0, QColor.fromRgb(255, 255, 255)),
('No deterioration', -1, -1, QColor.fromRgb(153, 204, 255)),
('Mean deterioration 1-20% ', 0, 1.2, QColor.fromRgb(102, 255, 102)),
('Mean deterioration 20-30% ', 1.2, 1.3, QColor.fromRgb(255, 255, 153)),
('Mean deterioration 30-50% ', 1.3, 1.5, QColor.fromRgb(255, 178, 102)),
('Mean deterioration 50-100% ', 1.5, 100, QColor.fromRgb(255, 102, 102)),
)
# create a category for each item in values
ranges = []
for label, lower, upper, color in values:
symbol = QgsSymbol.defaultSymbol(layer.geometryType())
symbol.setColor(QColor(color))
rng = QgsRendererRange(lower, upper, symbol, label)
ranges.append(rng)
## create the renderer and assign it to a layer
expression = 'mean_deterioration' # field name
layer.setRenderer(QgsGraduatedSymbolRenderer(expression, ranges))
############################ Create layer for nr_affected OD-pairs
sqlcall = "(select CASE WHEN nr_pairs > 0 THEN cast((nr_pairs - nr_non_affected) as float)/nr_pairs " \
"ELSE 100 END as prop_affected,* from emme_results)"
uri.setDataSource("", sqlcall, "geom", "", "id")
layer = QgsVectorLayer(uri.uri(), "prop_affected ", "postgres")
QgsProject.instance().addMapLayer(layer)
values = (
('Not searched', 1, 100, QColor.fromRgb(255, 255, 255)),
('0% affected pairs', 0, 0, QColor.fromRgb(153, 204, 255)),
('1-20% affected pairs', 0, 0.2, QColor.fromRgb(102, 255, 102)),
('20-30% affected pairs', 0.2, 0.3, QColor.fromRgb(255, 255, 153)),
('30-50% affected pairs', 0.3, 0.5, QColor.fromRgb(255, 178, 102)),
('50-100% affected pairs', 0.5, 1, QColor.fromRgb(255, 102, 102)),
)
# create a category for each item in values
ranges = []
for label, lower, upper, color in values:
symbol = QgsSymbol.defaultSymbol(layer.geometryType())
symbol.setColor(QColor(color))
rng = QgsRendererRange(lower, upper, symbol, label)
ranges.append(rng)
## create the renderer and assign it to a layer
expression = 'prop_affected' # field name
layer.setRenderer(QgsGraduatedSymbolRenderer(expression, ranges))
def odEffect(start, end, lids):
start_zone = start
end_zone = end
removed_lid_string = "( lid = " + str(lids[0])
i = 1
while i < len(lids):
removed_lid_string += " or lid =" + str(lids[i])
i += 1
removed_lid_string += ")"
# Finding best, non-affected alternative route
query1 = db.exec_("SELECT MIN(did) FROM all_results WHERE"
" start_zone = " + str(start_zone) + " AND end_zone = " + str(end_zone) + " AND "
" did NOT IN (select did from all_results where start_zone = " + str(
start_zone) + " AND end_zone = " + str(end_zone) + " AND " + removed_lid_string + ")")
query1.next()
id_alt = str(query1.value(0))
# print("id_alt är: "+ id_alt)
if id_alt == "NULL":
# Either there's only one route in the route set or the route set is empty
query = db.exec_(
"SELECT MIN(did) FROM all_results where start_zone = " + str(start_zone) + " AND end_zone = " + str(
end_zone) + "")
query.next()
if query.value(0):
# There is no route that is not affected
return -1
else:
# There is no routes with that start and end zone
return -2;
elif id_alt == "1":
# print("Zon påverkas inte")
return -3
else:
# print("Zon påverkas och bästa id är:" + id_alt)
# Fetching cost of the optimal route and the alternative
query2 = db.exec_("SELECT sum(link_cost) from all_results where "
" (start_zone = " + str(start_zone) + " AND end_zone = " + str(end_zone) + ") AND "
"(did = 1 OR did = " + str(
id_alt) + ") group by did")
query2.next()
# Best cost
cost_opt = str(query2.value(0))
# Alternative cost
query2.next()
cost_alt = str(query2.value(0))
# Proportion of extra cost of alternative route in relation to opt route
# print("cost_opt = " + cost_opt + " and cost_alt = " + cost_alt)
return float(cost_alt) / float(cost_opt)
def analysis_multiple_zones(start_node, list, lids):
count3 = 0
count2 = 0
count1 = 0
count_detour = 0
sum_detour = 0
i = 0
while i < len(list):
if start_node != list[i]:
result_test = odEffect(start_node, list[i], lids)
if result_test == -3:
count3 += 1
elif result_test == -2:
count2 += 1
elif result_test == -1:
count1 += 1
else:
count_detour += 1
sum_detour += result_test
i = i + 1
if count_detour != 0:
mean_detour = sum_detour / count_detour
else:
mean_detour = -1
return [count3, count2, count1, mean_detour, i - 1]
# End of function definition
# Initialize TicToc function.
TicToc = TicTocGenerator()
# DATABASE CONNECTION ------------------------------------------------------
uri = QgsDataSourceUri()
# set host name, port, database name, username and password
uri.setConnection("localhost", "5432", "exjobb", "postgres", "password123")
print(uri.uri())
db = QSqlDatabase.addDatabase('QPSQL')
if db.isValid():
print("QPSQL db is valid")
db.setHostName(uri.host())
db.setDatabaseName(uri.database())
db.setPort(int(uri.port()))
db.setUserName(uri.username())
db.setPassword(uri.password())
# open (create) the connection
if db.open():
print("Opened %s" % uri.uri())
else:
err = db.lastError()
print(err.driverText())
# DATABASE CONNECTION COMPLETE ---------------------------------------------
def main():
tic()
if db.isValid:
removeRoutesLayers()
# Create layer for one route set (run routeSetGeneration before).
# printRoutes()
# printRoutesRejoin()
# Creates new visualisation layer for selected pairs (run selectedODResultTable before).
print_selected_pairs()
# All to all visualisation for all pairs in list (run AllToAllResultTable before).
# allToAll()
toc()
if __name__ == "__main__" or __name__ == "__console__":
main()
db.close()
|
[
"import time\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtSql import *\nfrom PyQt5.QtWidgets import *\nfrom qgis.core import QgsFeature, QgsGeometry, QgsProject\nfrom shapely import wkb\n\nprint(__name__)\n\n\n# Function definition\n\ndef TicTocGenerator():\n # Generator that returns time differences\n ti = 0 # initial time\n tf = time.time() # final time\n while True:\n ti = tf\n tf = time.time()\n yield tf - ti # returns the time difference\n\n\ndef toc(tempBool=True):\n # Prints the time difference yielded by generator instance TicToc\n tempTimeInterval = next(TicToc)\n if tempBool:\n print(\"Elapsed time: %f seconds.\\n\" % tempTimeInterval)\n\n\ndef tic():\n # Records a time in TicToc, marks the beginning of a time interval\n toc(False)\n\n\ndef removeRoutesLayers():\n layers = QgsProject.instance().mapLayers()\n\n for layer_id, layer in layers.items():\n if str(layer.name()) != \"model_graph\" and str(layer.name()) != \"emme_zones\" and str(layer.name()) != \"labels\" \\\n and str(layer.name()) != \"OpenStreetMap\" and str(layer.name()) != \"all_results\" and str(\n layer.name()) != \"Centroider\" and str(layer.name()) != \"dijk_result_table\" and str(\n layer.name()) != \"ata_lid\" and str(layer.name()) != \"result_table\":\n QgsProject.instance().removeMapLayer(layer.id())\n\n\n# Prints a route set based on whats in result_table.\ndef printRoutes():\n i = 1\n # WHERE rejoin_link=0 insert into to print\n query = db.exec_(\"SELECT MAX(did) FROM result_table\")\n query.next()\n print(query.value(0))\n nr_routes = query.value(0)\n lid_list_q = db.exec_(\"SELECT result_table.lid, foo.count FROM result_table INNER JOIN (SELECT count(*), lid \\\n FROM result_table WHERE not did=(-1) group by lid) as foo ON(result_table.lid = foo.lid) WHERE not did=(-1) \\\n group by result_table.lid, foo.count\")\n lid_list = []\n lid_count = []\n while lid_list_q.next():\n lid_list.append(lid_list_q.value(0))\n lid_count.append(lid_list_q.value(1))\n\n # Källa https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/\n color_list = [QColor.fromRgb(128, 0, 0), QColor.fromRgb(170, 10, 40), QColor.fromRgb(128, 128, 0),\n QColor.fromRgb(0, 128, 128), QColor.fromRgb(0, 0, 128), QColor.fromRgb(0, 0, 0),\n QColor.fromRgb(230, 25, 75), QColor.fromRgb(245, 130, 48), QColor.fromRgb(255, 255, 25),\n QColor.fromRgb(210, 245, 60), QColor.fromRgb(60, 180, 75), QColor.fromRgb(70, 240, 240),\n QColor.fromRgb(0, 130, 200), QColor.fromRgb(145, 30, 180), QColor.fromRgb(240, 50, 230),\n QColor.fromRgb(128, 128, 128), QColor.fromRgb(250, 190, 190), QColor.fromRgb(255, 215, 180),\n QColor.fromRgb(255, 250, 200), QColor.fromRgb(170, 255, 195)]\n bad_color = ['Maroon', 'Magenta', 'Olive', 'Orange', 'Navy', 'Black', 'Red', 'Teal', 'Blue', 'Lime', 'Cyan', 'Green'\n , 'Brown', 'Purple', 'Yellow', 'Grey', 'Pink', 'Apricot', 'Beige', 'Mint', 'Lavender']\n\n lid_c = []\n # while lid_query.next():\n while i <= nr_routes:\n\n dummy_q = db.exec_(\n \"SELECT did, lid, ST_astext(geom) as geom FROM result_table WHERE not did=(-1) and result_table.did =\" + str(\n i))\n layert = QgsVectorLayer(\"MultiLineString?crs=epsg:3006\", \" route \" + str(i), \"memory\")\n QgsProject.instance().addMapLayer(layert)\n\n featurelist = []\n while dummy_q.next():\n lid = dummy_q.value(1)\n seg = QgsFeature()\n j = 0\n while j < len(lid_list):\n if lid == lid_list[j]:\n lid_nr = j\n j += 1\n # print(\"lid nr is:\"+str(lid_nr)+ \" lid is :\"+str(lid_list[lid_nr])+\" lid count is:\"+str(lid_count[lid_nr]))\n nr_included = 0\n dummy = 0\n j = 0\n while j < len(lid_c):\n if lid == lid_c[j]:\n nr_included += 1\n j += 1\n # if dummy < nr_included:\n # dummy = nr_included\n lid_c.append(lid)\n if lid_count[lid_nr] == 1:\n offset = 0\n else:\n if lid_count[lid_nr] % 2 == 0:\n # Even\n off = (-lid_count[lid_nr] / 2) + nr_included\n if off == 0:\n offset = ((-lid_count[lid_nr] / 2) + nr_included + 1) * 200\n else:\n<<<<<<< HEAD\n offset = ((-lid_count[lid_nr]/2) + nr_included)*200\n=======\n offset = ((-lid_count[lid_nr] / 2) + nr_included) * 80\n>>>>>>> b69948db6665ed5f30d2925c9356500bdac0da03\n\n else:\n # Odd\n print(\"odd value is :\", (-lid_count[lid_nr] / 2) + nr_included)\n print(\"odd value rounded is :\", int((-lid_count[lid_nr] / 2) + nr_included))\n<<<<<<< HEAD\n offset = int(((-lid_count[lid_nr]/2) + nr_included)*200)\n print(\"odd \",offset)\n=======\n offset = int(((-lid_count[lid_nr] / 2) + nr_included) * 80)\n print(\"odd \", offset)\n>>>>>>> b69948db6665ed5f30d2925c9356500bdac0da03\n\n seg.setGeometry(QgsGeometry.fromWkt(dummy_q.value(2)).offsetCurve(offset, 1, 1, 2.0))\n featurelist.append(seg)\n\n symbol = QgsLineSymbol.createSimple({'color': bad_color[i], 'width': '0.4'})\n renderers = layert.renderer()\n renderers.setSymbol(symbol.clone())\n qgis.utils.iface.layerTreeView().refreshLayerSymbology(layert.id())\n single_symbol_renderer = layert.renderer()\n symbol = single_symbol_renderer.symbol()\n symbol.setWidth(0.8)\n\n layert.dataProvider().addFeatures(featurelist)\n layert.triggerRepaint()\n i += 1\n print(\"route nr\", i - 1)\n print(\"nr included max \", dummy)\n\n # Start node\n start_q = db.exec_(\"SELECT lid, ST_astext(ST_PointN(the_geom,1)) AS start \\\n FROM (SELECT lid, (ST_Dump(geom)).geom As the_geom \\\n FROM result_table WHERE did=1 and path_seq=1) As foo\")\n start_q.next()\n layer = QgsVectorLayer('Point?crs=epsg:3006', 'Start', 'memory')\n # Set the provider to accept the data source\n prov = layer.dataProvider()\n # Add a new feature and assign the geometry\n feat = QgsFeature()\n feat.setGeometry(QgsGeometry.fromWkt(start_q.value(1)))\n prov.addFeatures([feat])\n # Update extent of the layer\n layer.updateExtents()\n # Add the layer to the Layers panel\n QgsProject.instance().addMapLayer(layer)\n single_symbol_renderer = layer.renderer()\n symbol1 = single_symbol_renderer.symbol()\n symbol1.setColor(QColor.fromRgb(0, 225, 0))\n symbol1.setSize(3)\n # more efficient than refreshing the whole canvas, which requires a redraw of ALL layers\n layer.triggerRepaint()\n # update legend for layer\n qgis.utils.iface.layerTreeView().refreshLayerSymbology(layer.id())\n\n # End Node\n end_q = db.exec_(\"SELECT lid, ST_astext(ST_PointN(the_geom,-1)) AS start FROM (SELECT lid, (ST_Dump(geom)).geom As the_geom \\\n FROM result_table WHERE path_seq = (SELECT max(path_seq) FROM result_table WHERE did=1) and did=1) AS foo\")\n end_q.next()\n layere = QgsVectorLayer('Point?crs=epsg:3006', 'End', 'memory')\n # Set the provider to accept the data source\n prov = layere.dataProvider()\n # Add a new feature and assign the geometry\n feat = QgsFeature()\n feat.setGeometry(QgsGeometry.fromWkt(end_q.value(1)))\n prov.addFeatures([feat])\n # Update extent of the layer\n layere.updateExtents()\n # Add the layer to the Layers panel\n QgsProject.instance().addMapLayer(layere)\n single_symbol_renderer = layere.renderer()\n symbol = single_symbol_renderer.symbol()\n symbol.setColor(QColor.fromRgb(255, 0, 0))\n symbol.setSize(3)\n layere.triggerRepaint()\n qgis.utils.iface.layerTreeView().refreshLayerSymbology(layere.id())\n\n\ndef printRoutesRejoin():\n i = 1\n # WHERE rejoin_link=0 insert into to print\n query = db.exec_(\"SELECT MAX(did) FROM result_table\")\n query.next()\n nr_routes = query.value(0)\n\n # Källa https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/\n color_list = [QColor.fromRgb(128, 0, 0), QColor.fromRgb(170, 10, 40), QColor.fromRgb(128, 128, 0),\n QColor.fromRgb(0, 128, 128), QColor.fromRgb(0, 0, 128), QColor.fromRgb(0, 0, 0),\n QColor.fromRgb(230, 25, 75), QColor.fromRgb(245, 130, 48), QColor.fromRgb(255, 255, 25),\n QColor.fromRgb(210, 245, 60), QColor.fromRgb(60, 180, 75), QColor.fromRgb(70, 240, 240),\n QColor.fromRgb(0, 130, 200), QColor.fromRgb(145, 30, 180), QColor.fromRgb(240, 50, 230),\n QColor.fromRgb(128, 128, 128), QColor.fromRgb(250, 190, 190), QColor.fromRgb(255, 215, 180),\n QColor.fromRgb(255, 250, 200), QColor.fromRgb(170, 255, 195)]\n bad_color = ['Maroon', 'Magenta', 'Olive', 'Orange', 'Navy', 'Black', 'Red', 'Teal', 'Blue', 'Lime', 'Cyan', 'Green'\n , 'Brown', 'Purple', 'Yellow', 'Grey', 'Pink', 'Apricot', 'Beige', 'Mint', 'Lavender']\n\n while i <= nr_routes:\n # Routes without offset\n sqlcall = \"(select lid, did, geom from result_table where lid in (select lid from result_table group by lid having \\\n count(*) = 1) and did =\" + str(i) + \" and rejoin_link=0 group by lid, did, geom ORDER BY lid, did)\"\n uri.setDataSource(\"\", sqlcall, \"geom\", \"\", \"lid\")\n layert = QgsVectorLayer(uri.uri(), \" route \" + str(i), \"postgres\")\n QgsProject.instance().addMapLayer(layert)\n symbol = QgsLineSymbol.createSimple({'color': bad_color[i],\n 'width': '0.6',\n 'offset': '0'})\n renderers = layert.renderer()\n renderers.setSymbol(symbol.clone())\n qgis.utils.iface.layerTreeView().refreshLayerSymbology(layert.id())\n # single_symbol_renderer = layert.renderer()\n # symbol = single_symbol_renderer.symbol()\n # symbol.setWidth(0.8)\n\n # Routes in need of offset\n sqlcall = \"(select lid, did, geom from result_table where lid in (select lid from result_table \\\n group by lid having count(*) > 1) and did=\" + str(\n i) + \" and rejoin_link=0 group by lid, did, geom ORDER BY lid, did)\"\n uri.setDataSource(\"\", sqlcall, \"geom\", \"\", \"lid\")\n layert = QgsVectorLayer(uri.uri(), \" route \" + str(i), \"postgres\")\n QgsProject.instance().addMapLayer(layert)\n if i == 1:\n offset = 0\n else:\n offset = i - i * 0.7\n print(\"i is \" + str(i) + \" and offset is:\" + str(offset))\n symbol = QgsLineSymbol.createSimple({'color': bad_color[i],\n 'width': '0.4',\n 'offset': str(offset)})\n renderers = layert.renderer()\n renderers.setSymbol(symbol.clone())\n qgis.utils.iface.layerTreeView().refreshLayerSymbology(layert.id())\n\n # single_symbol_renderer = layert.renderer()\n # symbol = single_symbol_renderer.symbol()\n # symbol.setWidth(0.8)\n\n #\n # if i < len(color_list):\n # symbol.setColor(color_list[i])\n # layert.triggerRepaint()\n # qgis.utils.iface.layerTreeView().refreshLayerSymbology(layert.id())\n #\n i = i + 1\n\n # Start node\n start_q = db.exec_(\"SELECT lid, ST_astext(ST_PointN(the_geom,1)) AS start \\\n FROM (SELECT lid, (ST_Dump(geom)).geom As the_geom \\\n FROM result_table WHERE did=1 and path_seq=1) As foo\")\n start_q.next()\n layer = QgsVectorLayer('Point?crs=epsg:3006', 'Start', 'memory')\n\n # Set the provider to accept the data source\n prov = layer.dataProvider()\n\n # Add a new feature and assign the geometry\n feat = QgsFeature()\n feat.setGeometry(QgsGeometry.fromWkt(start_q.value(1)))\n prov.addFeatures([feat])\n\n # Update extent of the layer\n layer.updateExtents()\n\n # Add the layer to the Layers panel\n QgsProject.instance().addMapLayer(layer)\n\n single_symbol_renderer = layer.renderer()\n symbol = single_symbol_renderer.symbol()\n symbol.setColor(QColor.fromRgb(0, 225, 0))\n symbol.setSize(3)\n # more efficient than refreshing the whole canvas, which requires a redraw of ALL layers\n layer.triggerRepaint()\n # update legend for layer\n qgis.utils.iface.layerTreeView().refreshLayerSymbology(layer.id())\n\n # End Node\n end_q = db.exec_(\"SELECT lid, ST_astext(ST_PointN(the_geom,-1)) AS start FROM (SELECT lid, (ST_Dump(geom)).geom As the_geom \\\n FROM result_table WHERE path_seq = (SELECT max(path_seq) FROM result_table WHERE did=1) and did=1) AS foo\")\n end_q.next()\n layere = QgsVectorLayer('Point?crs=epsg:3006', 'END', 'memory')\n\n # Set the provider to accept the data source\n prov = layere.dataProvider()\n\n # Add a new feature and assign the geometry\n feat = QgsFeature()\n feat.setGeometry(QgsGeometry.fromWkt(end_q.value(1)))\n prov.addFeatures([feat])\n\n # Update extent of the layer\n layere.updateExtents()\n\n # Add the layer to the Layers panel\n QgsProject.instance().addMapLayer(layere)\n\n single_symbol_renderer = layere.renderer()\n symbol = single_symbol_renderer.symbol()\n symbol.setColor(QColor.fromRgb(255, 0, 0))\n symbol.setSize(3)\n\n layer.triggerRepaint()\n qgis.utils.iface.layerTreeView().refreshLayerSymbology(layere.id())\n\n\n# det jag behöver få från databasen start_list, end_list, lids\ndef print_selected_pairs():\n # Removes layers not specified in removeRoutesLayers\n removeRoutesLayers()\n\n # Get list and removed lids\n lids = []\n temp_query1 = db.exec_(\"SELECT * FROM removed_lids\")\n\n while temp_query1.next():\n lids.append(temp_query1.value(0))\n\n temp_query2 = db.exec_(\"SELECT DISTINCT start_zone AS start_zones, end_zone AS end_zones FROM all_results\")\n start_list = []\n end_list = []\n\n while temp_query2.next():\n start_list.append(temp_query2.value(0))\n end_list.append(temp_query2.value(1))\n\n # first it creates neccessary db-tables for visualization of the OD-pairs in star_list and end_list\n # Create OD_lines table\n db.exec_(\"DROP table if exists OD_lines\")\n db.exec_(\"SELECT ST_MakeLine(ST_Centroid(geom) ORDER BY id) AS geom into od_lines \"\n \"FROM emme_zones where id = \" + str(start_list[0]) + \" OR id = \" + str(end_list[0]) + \"\")\n\n # Create emme_result table\n db.exec_(\"DROP table if exists emme_results\")\n db.exec_(\"SELECT 0.0 as alt_route_cost,* INTO emme_results FROM emme_zones\")\n\n i = 0\n while i < len(start_list):\n if i > 0:\n db.exec_(\"INSERT INTO OD_lines(geom) SELECT ST_MakeLine(ST_Centroid(geom) ORDER BY id) \"\n \"AS geom FROM emme_zones where id = \" + str(start_list[i]) + \" OR id = \" + str(end_list[i]) + \"\")\n\n result_test = odEffect(start_list[i], end_list[i], lids)\n print(\"Result of \" + str(i) + \" is: \" + str(result_test))\n db.exec_(\n \"UPDATE emme_results SET alt_route_cost = \" + str(result_test) + \" WHERE id = '\" + str(start_list[i]) + \"'\"\n \" OR id = '\" + str(\n end_list[i]) + \"';\")\n\n i += 1\n\n db.exec_(\"ALTER TABLE OD_lines ADD COLUMN id SERIAL PRIMARY KEY;\")\n\n sqlcall = \"(SELECT * FROM emme_results)\"\n uri.setDataSource(\"\", sqlcall, \"geom\", \"\", \"id\")\n layer = QgsVectorLayer(uri.uri(), \"result_deterioration \", \"postgres\")\n QgsProject.instance().addMapLayer(layer)\n\n values = (\n ('Not affected', -3, -3, QColor.fromRgb(0, 0, 200)),\n ('No route', -2, -2, QColor.fromRgb(0, 225, 200)),\n ('No route that is not affected', -1, -1, QColor.fromRgb(255, 0, 0)),\n ('Not searched', 0, 0, QColor.fromRgb(255, 255, 255)),\n ('Alternative route: 1-10 % deterioration', 0, 1.1, QColor.fromRgb(102, 255, 102)),\n ('Alternative route: 10-100 % deterioration', 1.1, 1000, QColor.fromRgb(255, 255, 0)),\n )\n\n # create a category for each item in values\n ranges = []\n for label, lower, upper, color in values:\n symbol = QgsSymbol.defaultSymbol(layer.geometryType())\n symbol.setColor(QColor(color))\n rng = QgsRendererRange(lower, upper, symbol, label)\n ranges.append(rng)\n\n ## create the renderer and assign it to a layer\n expression = 'alt_route_cost' # field name\n layer.setRenderer(QgsGraduatedSymbolRenderer(expression, ranges))\n # iface.mapCanvas().refresh()\n\n # Print lines from od_lines\n sqlcall = \"(SELECT * FROM od_lines )\"\n uri.setDataSource(\"\", sqlcall, \"geom\", \"\", \"id\")\n layert = QgsVectorLayer(uri.uri(), \" OD_pairs \", \"postgres\")\n QgsProject.instance().addMapLayer(layert)\n\n\n# Ska hämtas från databasen list,removed_lids\ndef allToAll():\n # Removes layers not specified in removeRoutesLayers\n removeRoutesLayers()\n\n # Get list and removed lids\n removed_lids = []\n temp_query1 = db.exec_(\"SELECT * FROM removed_lids\")\n\n while temp_query1.next():\n removed_lids.append(temp_query1.value(0))\n\n temp_query2 = db.exec_(\"SELECT DISTINCT start_zone AS start_zones FROM all_results\")\n\n list = []\n while temp_query2.next():\n list.append(temp_query2.value(0))\n\n removed_lid_string = \"( lid = \" + str(removed_lids[0])\n i = 1\n while i < len(removed_lids):\n removed_lid_string += \" or lid =\" + str(removed_lids[i])\n i += 1\n removed_lid_string += \")\"\n\n # Queryn skapar tabell för alla länkar som går igenom removed_lid\n db.exec_(\"DROP TABLE IF EXIST temp_test\")\n db.exec_(\n \" select * into temp_test from all_results f where exists(select 1 from all_results l where \" + removed_lid_string + \" and\"\n \" (f.start_zone = l.start_zone and f.end_zone = l.end_zone and f.did = l.did))\")\n\n # Här vill jag skapa nytt lager som visar intressanta saker för varje zon\n # Create emme_result table\n db.exec_(\"DROP table if exists emme_results\")\n db.exec_(\n \"SELECT 0 as nr_non_affected, 0 as nr_no_routes, 0 as nr_all_routes_affected, 0.0 as mean_deterioration, 0 as nr_pairs,* INTO emme_results FROM emme_zones\")\n\n i = 0\n while i < len(list):\n result = analysis_multiple_zones(list[i], list, removed_lids)\n db.exec_(\"UPDATE emme_results SET nr_non_affected = \" + str(result[0]) + \" , nr_no_routes = \" +\n str(result[1]) + \" , nr_all_routes_affected = \" + str(result[2]) + \" , mean_deterioration = \" +\n str(result[3]) + \" , nr_pairs = \" + str(result[4]) + \" WHERE id = \" +\n str(list[i]) + \";\")\n i += 1\n\n ############################ Create layer for mean deterioration\n sqlcall = \"(SELECT * FROM emme_results)\"\n uri.setDataSource(\"\", sqlcall, \"geom\", \"\", \"id\")\n\n layer = QgsVectorLayer(uri.uri(), \"mean_deterioration \", \"postgres\")\n QgsProject.instance().addMapLayer(layer)\n\n values = (\n ('Not searched', 0, 0, QColor.fromRgb(255, 255, 255)),\n ('No deterioration', -1, -1, QColor.fromRgb(153, 204, 255)),\n ('Mean deterioration 1-20% ', 0, 1.2, QColor.fromRgb(102, 255, 102)),\n ('Mean deterioration 20-30% ', 1.2, 1.3, QColor.fromRgb(255, 255, 153)),\n ('Mean deterioration 30-50% ', 1.3, 1.5, QColor.fromRgb(255, 178, 102)),\n ('Mean deterioration 50-100% ', 1.5, 100, QColor.fromRgb(255, 102, 102)),\n )\n\n # create a category for each item in values\n ranges = []\n for label, lower, upper, color in values:\n symbol = QgsSymbol.defaultSymbol(layer.geometryType())\n symbol.setColor(QColor(color))\n rng = QgsRendererRange(lower, upper, symbol, label)\n ranges.append(rng)\n\n ## create the renderer and assign it to a layer\n expression = 'mean_deterioration' # field name\n layer.setRenderer(QgsGraduatedSymbolRenderer(expression, ranges))\n\n ############################ Create layer for nr_affected OD-pairs\n sqlcall = \"(select CASE WHEN nr_pairs > 0 THEN cast((nr_pairs - nr_non_affected) as float)/nr_pairs \" \\\n \"ELSE 100 END as prop_affected,* from emme_results)\"\n uri.setDataSource(\"\", sqlcall, \"geom\", \"\", \"id\")\n\n layer = QgsVectorLayer(uri.uri(), \"prop_affected \", \"postgres\")\n QgsProject.instance().addMapLayer(layer)\n\n values = (\n ('Not searched', 1, 100, QColor.fromRgb(255, 255, 255)),\n ('0% affected pairs', 0, 0, QColor.fromRgb(153, 204, 255)),\n ('1-20% affected pairs', 0, 0.2, QColor.fromRgb(102, 255, 102)),\n ('20-30% affected pairs', 0.2, 0.3, QColor.fromRgb(255, 255, 153)),\n ('30-50% affected pairs', 0.3, 0.5, QColor.fromRgb(255, 178, 102)),\n ('50-100% affected pairs', 0.5, 1, QColor.fromRgb(255, 102, 102)),\n )\n\n # create a category for each item in values\n ranges = []\n for label, lower, upper, color in values:\n symbol = QgsSymbol.defaultSymbol(layer.geometryType())\n symbol.setColor(QColor(color))\n rng = QgsRendererRange(lower, upper, symbol, label)\n ranges.append(rng)\n\n ## create the renderer and assign it to a layer\n expression = 'prop_affected' # field name\n layer.setRenderer(QgsGraduatedSymbolRenderer(expression, ranges))\n\n\ndef odEffect(start, end, lids):\n start_zone = start\n end_zone = end\n\n removed_lid_string = \"( lid = \" + str(lids[0])\n i = 1\n while i < len(lids):\n removed_lid_string += \" or lid =\" + str(lids[i])\n i += 1\n removed_lid_string += \")\"\n\n # Finding best, non-affected alternative route\n query1 = db.exec_(\"SELECT MIN(did) FROM all_results WHERE\"\n \" start_zone = \" + str(start_zone) + \" AND end_zone = \" + str(end_zone) + \" AND \"\n \" did NOT IN (select did from all_results where start_zone = \" + str(\n start_zone) + \" AND end_zone = \" + str(end_zone) + \" AND \" + removed_lid_string + \")\")\n\n query1.next()\n id_alt = str(query1.value(0))\n # print(\"id_alt är: \"+ id_alt)\n\n if id_alt == \"NULL\":\n # Either there's only one route in the route set or the route set is empty\n query = db.exec_(\n \"SELECT MIN(did) FROM all_results where start_zone = \" + str(start_zone) + \" AND end_zone = \" + str(\n end_zone) + \"\")\n query.next()\n\n if query.value(0):\n # There is no route that is not affected\n return -1\n else:\n # There is no routes with that start and end zone\n return -2;\n\n elif id_alt == \"1\":\n # print(\"Zon påverkas inte\")\n return -3\n else:\n # print(\"Zon påverkas och bästa id är:\" + id_alt)\n\n # Fetching cost of the optimal route and the alternative\n query2 = db.exec_(\"SELECT sum(link_cost) from all_results where \"\n \" (start_zone = \" + str(start_zone) + \" AND end_zone = \" + str(end_zone) + \") AND \"\n \"(did = 1 OR did = \" + str(\n id_alt) + \") group by did\")\n query2.next()\n # Best cost\n cost_opt = str(query2.value(0))\n\n # Alternative cost\n query2.next()\n cost_alt = str(query2.value(0))\n\n # Proportion of extra cost of alternative route in relation to opt route\n # print(\"cost_opt = \" + cost_opt + \" and cost_alt = \" + cost_alt)\n return float(cost_alt) / float(cost_opt)\n\n\ndef analysis_multiple_zones(start_node, list, lids):\n count3 = 0\n count2 = 0\n count1 = 0\n count_detour = 0\n sum_detour = 0\n\n i = 0\n while i < len(list):\n if start_node != list[i]:\n result_test = odEffect(start_node, list[i], lids)\n\n if result_test == -3:\n count3 += 1\n elif result_test == -2:\n count2 += 1\n elif result_test == -1:\n count1 += 1\n else:\n count_detour += 1\n sum_detour += result_test\n i = i + 1\n\n if count_detour != 0:\n mean_detour = sum_detour / count_detour\n else:\n mean_detour = -1\n return [count3, count2, count1, mean_detour, i - 1]\n\n\n# End of function definition\n\n\n# Initialize TicToc function.\nTicToc = TicTocGenerator()\n\n# DATABASE CONNECTION ------------------------------------------------------\nuri = QgsDataSourceUri()\n# set host name, port, database name, username and password\nuri.setConnection(\"localhost\", \"5432\", \"exjobb\", \"postgres\", \"password123\")\nprint(uri.uri())\ndb = QSqlDatabase.addDatabase('QPSQL')\n\nif db.isValid():\n print(\"QPSQL db is valid\")\n db.setHostName(uri.host())\n db.setDatabaseName(uri.database())\n db.setPort(int(uri.port()))\n db.setUserName(uri.username())\n db.setPassword(uri.password())\n # open (create) the connection\n if db.open():\n print(\"Opened %s\" % uri.uri())\n else:\n err = db.lastError()\n print(err.driverText())\n\n\n# DATABASE CONNECTION COMPLETE ---------------------------------------------\n\ndef main():\n tic()\n if db.isValid:\n removeRoutesLayers()\n\n # Create layer for one route set (run routeSetGeneration before).\n # printRoutes()\n # printRoutesRejoin()\n\n # Creates new visualisation layer for selected pairs (run selectedODResultTable before).\n print_selected_pairs()\n\n # All to all visualisation for all pairs in list (run AllToAllResultTable before).\n # allToAll()\n\n toc()\n\n\nif __name__ == \"__main__\" or __name__ == \"__console__\":\n main()\ndb.close()\n\n"
] | true |
919 |
4d9575c178b672815bb561116689b9b0721cb5ba
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 25 15:14:15 2020
@author: luisa
"""
horast = int(input("Horas Trabajadas: "+"\n\t\t"))
tarifa = int(input("Tarifa por hora: "+"\n\t\t"))
descu = int(input("Descuentos: "+"\n\t\t"))
resp0 = horast - descu
resp1 = (resp0 * tarifa)/2
resp2 = (horast * tarifa) + resp1
resp3 = resp2 - descu
resp4 = horast * tarifa
if horast >= 41:
print("Valor a Pagar: ", resp3)
elif horast <= 40:
print("Valor a Pagar: ", resp4)
|
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Jun 25 15:14:15 2020\r\n\r\n@author: luisa\r\n\"\"\"\r\n\r\n\r\nhorast = int(input(\"Horas Trabajadas: \"+\"\\n\\t\\t\"))\r\ntarifa = int(input(\"Tarifa por hora: \"+\"\\n\\t\\t\"))\r\ndescu = int(input(\"Descuentos: \"+\"\\n\\t\\t\"))\r\nresp0 = horast - descu\r\nresp1 = (resp0 * tarifa)/2\r\nresp2 = (horast * tarifa) + resp1\r\nresp3 = resp2 - descu\r\nresp4 = horast * tarifa\r\nif horast >= 41:\r\n print(\"Valor a Pagar: \", resp3)\r\nelif horast <= 40:\r\n print(\"Valor a Pagar: \", resp4)\r\n",
"<docstring token>\nhorast = int(input('Horas Trabajadas: ' + '\\n\\t\\t'))\ntarifa = int(input('Tarifa por hora: ' + '\\n\\t\\t'))\ndescu = int(input('Descuentos: ' + '\\n\\t\\t'))\nresp0 = horast - descu\nresp1 = resp0 * tarifa / 2\nresp2 = horast * tarifa + resp1\nresp3 = resp2 - descu\nresp4 = horast * tarifa\nif horast >= 41:\n print('Valor a Pagar: ', resp3)\nelif horast <= 40:\n print('Valor a Pagar: ', resp4)\n",
"<docstring token>\n<assignment token>\nif horast >= 41:\n print('Valor a Pagar: ', resp3)\nelif horast <= 40:\n print('Valor a Pagar: ', resp4)\n",
"<docstring token>\n<assignment token>\n<code token>\n"
] | false |
920 |
8479c70fed36dc6f1e6094c832fb22d8c2e53e3a
|
import os
import time
from datetime import datetime, timedelta
from git import Repo
class CommitAnalyzer():
"""
Takes path of the repo
"""
def __init__(self, repo_path):
self.repo_path = repo_path
self.repo = Repo(self.repo_path)
assert not self.repo.bare
def get_conflict_commits(self):
conflict_commits = []
current_date = datetime.now()
for commit in self.repo.iter_commits('master'):
parents = commit.parents
if len(parents) > 1 and "conflict" in commit.message.lower() and ".java" in commit.message.lower():
#if datetime.fromtimestamp(commit.committed_date) >= current_date - timedelta(5):
conflict_commits.append(commit)
return conflict_commits
#run script in cloned repo
commit_analyzer = CommitAnalyzer(os.getcwd())
for commit in commit_analyzer.get_conflict_commits():
print (commit, time.asctime(time.gmtime(commit.committed_date)))
|
[
"import os\nimport time\nfrom datetime import datetime, timedelta\nfrom git import Repo\n\nclass CommitAnalyzer():\n\n\t\"\"\"\n\tTakes path of the repo\n\t\"\"\"\n\tdef __init__(self, repo_path):\n\t\tself.repo_path = repo_path\n\t\tself.repo = Repo(self.repo_path)\n\t\tassert not self.repo.bare\n\n\tdef get_conflict_commits(self):\n\t\tconflict_commits = []\n\t\tcurrent_date = datetime.now()\n\t\tfor commit in self.repo.iter_commits('master'):\n\t\t\tparents = commit.parents\n\t\t\tif len(parents) > 1 and \"conflict\" in commit.message.lower() and \".java\" in commit.message.lower():\n\t\t\t\t#if datetime.fromtimestamp(commit.committed_date) >= current_date - timedelta(5):\n\t\t\t\tconflict_commits.append(commit)\n\n\t\treturn conflict_commits\n\n#run script in cloned repo\ncommit_analyzer = CommitAnalyzer(os.getcwd())\nfor commit in commit_analyzer.get_conflict_commits():\n\tprint (commit, time.asctime(time.gmtime(commit.committed_date)))",
"import os\nimport time\nfrom datetime import datetime, timedelta\nfrom git import Repo\n\n\nclass CommitAnalyzer:\n \"\"\"\n\tTakes path of the repo\n\t\"\"\"\n\n def __init__(self, repo_path):\n self.repo_path = repo_path\n self.repo = Repo(self.repo_path)\n assert not self.repo.bare\n\n def get_conflict_commits(self):\n conflict_commits = []\n current_date = datetime.now()\n for commit in self.repo.iter_commits('master'):\n parents = commit.parents\n if len(parents) > 1 and 'conflict' in commit.message.lower(\n ) and '.java' in commit.message.lower():\n conflict_commits.append(commit)\n return conflict_commits\n\n\ncommit_analyzer = CommitAnalyzer(os.getcwd())\nfor commit in commit_analyzer.get_conflict_commits():\n print(commit, time.asctime(time.gmtime(commit.committed_date)))\n",
"<import token>\n\n\nclass CommitAnalyzer:\n \"\"\"\n\tTakes path of the repo\n\t\"\"\"\n\n def __init__(self, repo_path):\n self.repo_path = repo_path\n self.repo = Repo(self.repo_path)\n assert not self.repo.bare\n\n def get_conflict_commits(self):\n conflict_commits = []\n current_date = datetime.now()\n for commit in self.repo.iter_commits('master'):\n parents = commit.parents\n if len(parents) > 1 and 'conflict' in commit.message.lower(\n ) and '.java' in commit.message.lower():\n conflict_commits.append(commit)\n return conflict_commits\n\n\ncommit_analyzer = CommitAnalyzer(os.getcwd())\nfor commit in commit_analyzer.get_conflict_commits():\n print(commit, time.asctime(time.gmtime(commit.committed_date)))\n",
"<import token>\n\n\nclass CommitAnalyzer:\n \"\"\"\n\tTakes path of the repo\n\t\"\"\"\n\n def __init__(self, repo_path):\n self.repo_path = repo_path\n self.repo = Repo(self.repo_path)\n assert not self.repo.bare\n\n def get_conflict_commits(self):\n conflict_commits = []\n current_date = datetime.now()\n for commit in self.repo.iter_commits('master'):\n parents = commit.parents\n if len(parents) > 1 and 'conflict' in commit.message.lower(\n ) and '.java' in commit.message.lower():\n conflict_commits.append(commit)\n return conflict_commits\n\n\n<assignment token>\nfor commit in commit_analyzer.get_conflict_commits():\n print(commit, time.asctime(time.gmtime(commit.committed_date)))\n",
"<import token>\n\n\nclass CommitAnalyzer:\n \"\"\"\n\tTakes path of the repo\n\t\"\"\"\n\n def __init__(self, repo_path):\n self.repo_path = repo_path\n self.repo = Repo(self.repo_path)\n assert not self.repo.bare\n\n def get_conflict_commits(self):\n conflict_commits = []\n current_date = datetime.now()\n for commit in self.repo.iter_commits('master'):\n parents = commit.parents\n if len(parents) > 1 and 'conflict' in commit.message.lower(\n ) and '.java' in commit.message.lower():\n conflict_commits.append(commit)\n return conflict_commits\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass CommitAnalyzer:\n <docstring token>\n\n def __init__(self, repo_path):\n self.repo_path = repo_path\n self.repo = Repo(self.repo_path)\n assert not self.repo.bare\n\n def get_conflict_commits(self):\n conflict_commits = []\n current_date = datetime.now()\n for commit in self.repo.iter_commits('master'):\n parents = commit.parents\n if len(parents) > 1 and 'conflict' in commit.message.lower(\n ) and '.java' in commit.message.lower():\n conflict_commits.append(commit)\n return conflict_commits\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass CommitAnalyzer:\n <docstring token>\n\n def __init__(self, repo_path):\n self.repo_path = repo_path\n self.repo = Repo(self.repo_path)\n assert not self.repo.bare\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n\n\nclass CommitAnalyzer:\n <docstring token>\n <function token>\n <function token>\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<class token>\n<assignment token>\n<code token>\n"
] | false |
921 |
a41d00c86d0bdab1bced77c275e56c3569af4f4e
|
from django.apps import AppConfig
from django.conf import settings
import importlib
import importlib.util
class RestAdminAppConfig(AppConfig):
name = 'libraries.django_rest_admin'
verbose_name = 'Rest Admin'
loaded = False
def ready(self):
autodiscover()
def autodiscover():
"""
Automatic discovering of rest_admin.py file inside apps.
similar to what Django admin does.
"""
from .register import rest_admin
if not RestAdminAppConfig.loaded:
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an rest_admin.py inside that app's
# package. We can't use os.path here -- recall that modules may be
# imported different ways (think zip files) -- so we need to get
# the app's __path__ and look for rest_admin.py on that path.
# Step 1: find out the app's __path__ Import errors here will (and
# should) bubble up, but a missing __path__ (which is legal, but weird)
# fails silently -- apps that do weird things with __path__ might
# need to roll their own rest_admin registration.
try:
app_path = importlib.import_module(app).__path__
except AttributeError:
continue
# Step 2: use imp.find_module to find the app's rest_admin.py. For some
# reason imp.find_module raises ImportError if the app can't be found
# but doesn't actually try to import the module. So skip this app if
# its rest_admin.py doesn't exist
# try:
# importlib.util.find_spec('rest_admin', app_path)
# # imp.find_module('rest_admin', app_path)
# except ImportError:
# continue
#
if not importlib.find_loader('rest_admin', app_path):
continue
# Step 3: import the app's admin file. If this has errors we want them
# to bubble up.
importlib.import_module("%s.rest_admin" % app)
# autodiscover was successful, reset loading flag.
RestAdminAppConfig.loaded = True
|
[
"from django.apps import AppConfig\nfrom django.conf import settings\nimport importlib\nimport importlib.util\n\n\nclass RestAdminAppConfig(AppConfig):\n name = 'libraries.django_rest_admin'\n verbose_name = 'Rest Admin'\n loaded = False\n\n def ready(self):\n autodiscover()\n\n\ndef autodiscover():\n \"\"\"\n Automatic discovering of rest_admin.py file inside apps.\n similar to what Django admin does. \n \"\"\"\n from .register import rest_admin\n\n if not RestAdminAppConfig.loaded:\n for app in settings.INSTALLED_APPS:\n # For each app, we need to look for an rest_admin.py inside that app's\n # package. We can't use os.path here -- recall that modules may be\n # imported different ways (think zip files) -- so we need to get\n # the app's __path__ and look for rest_admin.py on that path.\n\n # Step 1: find out the app's __path__ Import errors here will (and\n # should) bubble up, but a missing __path__ (which is legal, but weird)\n # fails silently -- apps that do weird things with __path__ might\n # need to roll their own rest_admin registration.\n try:\n app_path = importlib.import_module(app).__path__\n except AttributeError:\n continue\n\n # Step 2: use imp.find_module to find the app's rest_admin.py. For some\n # reason imp.find_module raises ImportError if the app can't be found\n # but doesn't actually try to import the module. So skip this app if\n # its rest_admin.py doesn't exist\n # try:\n # importlib.util.find_spec('rest_admin', app_path)\n # # imp.find_module('rest_admin', app_path)\n # except ImportError:\n # continue\n #\n if not importlib.find_loader('rest_admin', app_path):\n continue\n\n # Step 3: import the app's admin file. If this has errors we want them\n # to bubble up.\n importlib.import_module(\"%s.rest_admin\" % app)\n\n # autodiscover was successful, reset loading flag.\n RestAdminAppConfig.loaded = True\n",
"from django.apps import AppConfig\nfrom django.conf import settings\nimport importlib\nimport importlib.util\n\n\nclass RestAdminAppConfig(AppConfig):\n name = 'libraries.django_rest_admin'\n verbose_name = 'Rest Admin'\n loaded = False\n\n def ready(self):\n autodiscover()\n\n\ndef autodiscover():\n \"\"\"\n Automatic discovering of rest_admin.py file inside apps.\n similar to what Django admin does. \n \"\"\"\n from .register import rest_admin\n if not RestAdminAppConfig.loaded:\n for app in settings.INSTALLED_APPS:\n try:\n app_path = importlib.import_module(app).__path__\n except AttributeError:\n continue\n if not importlib.find_loader('rest_admin', app_path):\n continue\n importlib.import_module('%s.rest_admin' % app)\n RestAdminAppConfig.loaded = True\n",
"<import token>\n\n\nclass RestAdminAppConfig(AppConfig):\n name = 'libraries.django_rest_admin'\n verbose_name = 'Rest Admin'\n loaded = False\n\n def ready(self):\n autodiscover()\n\n\ndef autodiscover():\n \"\"\"\n Automatic discovering of rest_admin.py file inside apps.\n similar to what Django admin does. \n \"\"\"\n from .register import rest_admin\n if not RestAdminAppConfig.loaded:\n for app in settings.INSTALLED_APPS:\n try:\n app_path = importlib.import_module(app).__path__\n except AttributeError:\n continue\n if not importlib.find_loader('rest_admin', app_path):\n continue\n importlib.import_module('%s.rest_admin' % app)\n RestAdminAppConfig.loaded = True\n",
"<import token>\n\n\nclass RestAdminAppConfig(AppConfig):\n name = 'libraries.django_rest_admin'\n verbose_name = 'Rest Admin'\n loaded = False\n\n def ready(self):\n autodiscover()\n\n\n<function token>\n",
"<import token>\n\n\nclass RestAdminAppConfig(AppConfig):\n <assignment token>\n <assignment token>\n <assignment token>\n\n def ready(self):\n autodiscover()\n\n\n<function token>\n",
"<import token>\n\n\nclass RestAdminAppConfig(AppConfig):\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\n<function token>\n",
"<import token>\n<class token>\n<function token>\n"
] | false |
922 |
e0435b0b34fc011e7330ab8882865131f7f78882
|
import pytest
import responses
from auctioneer import constants, controllers, entities
from common.http import UnExpectedResult
def test_keywordbid_rule_init(kwb_rule, account):
assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1_000_000
assert kwb_rule.get_bid_increase_percentage_display() == kwb_rule.bid_increase_percentage / 100
assert kwb_rule.get_target_bid_diff_display() == kwb_rule.target_bid_diff / 100
assert kwb_rule.account is account
assert kwb_rule.target_values == [1,2,3]
assert kwb_rule.get_target_type_display() in map(lambda t: t[1], constants.KEYWORD_BID_TARGET_TYPES)
def test_make_keywordbid_rule(kwb_rule):
kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)
assert kwb_rule == kw_bid_rule
assert kw_bid_rule.account == kwb_rule.account
not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)
assert not_found_kwb_rule is None
def test_map_keywordbid_rule(kwb_rule, account):
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
assert isinstance(kwb_ent, entities.KeywordBidRule)
assert kwb_ent.account == account.id
for f in kwb_rule._meta.fields:
if f.name in ('id', 'title') :
continue
model_attr = getattr(kwb_rule, f.name)
ent_attr = getattr(kwb_ent, f.name)
if not hasattr(model_attr, 'pk'):
try:
assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()
except AttributeError:
assert ent_attr == model_attr
else:
assert ent_attr == model_attr.id
def test_get_keyword_bids(yd_gateway, keyword_bids):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
data = keyword_bids
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=data)
mock.add(method='POST', url=url, status=404)
mock.add(method='POST', url=url, status=200, json=data)
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={"CampaignIds": []})
assert next(kwb).keyword_id == 13102117581
assert next(kwb).keyword_id == 13102117582
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={"CampaignIds": []})
with pytest.raises(UnExpectedResult):
next(kwb)
kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={"CampaignIds": []})
assert type(next(kwb).as_dict()) is dict
def test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result']['KeywordBids'])
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)
response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)
assert len(list(response)) == 1514
def test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids, keyword_bids_w_warnings):
url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'
kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)
with responses.RequestsMock() as mock:
mock.add(method='POST', url=url, status=200, json=keyword_bids)
mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)
mock.add(method='POST', url=url, status=200, json={'error': {'error_code': 0000, 'error_message': 'oops!'}})
result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,
selection_criteria={"CampaignIds": []})
assert len(result) == 1514
with pytest.raises(UnExpectedResult):
controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,
selection_criteria={"CampaignIds": []})
|
[
"import pytest\nimport responses\n\nfrom auctioneer import constants, controllers, entities\nfrom common.http import UnExpectedResult\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1_000_000\n assert kwb_rule.get_bid_increase_percentage_display() == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display() == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1,2,3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1], constants.KEYWORD_BID_TARGET_TYPES)\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title') :\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={\"CampaignIds\": []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={\"CampaignIds\": []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway, selection_criteria={\"CampaignIds\": []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result']['KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {'error_code': 0000, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,\n selection_criteria={\"CampaignIds\": []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway, kwb_ent,\n selection_criteria={\"CampaignIds\": []})\n\n",
"import pytest\nimport responses\nfrom auctioneer import constants, controllers, entities\nfrom common.http import UnExpectedResult\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000\n assert kwb_rule.get_bid_increase_percentage_display(\n ) == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display(\n ) == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1, 2, 3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1],\n constants.KEYWORD_BID_TARGET_TYPES)\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title'):\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"<import token>\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000\n assert kwb_rule.get_bid_increase_percentage_display(\n ) == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display(\n ) == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1, 2, 3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1],\n constants.KEYWORD_BID_TARGET_TYPES)\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\ndef test_map_keywordbid_rule(kwb_rule, account):\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n assert isinstance(kwb_ent, entities.KeywordBidRule)\n assert kwb_ent.account == account.id\n for f in kwb_rule._meta.fields:\n if f.name in ('id', 'title'):\n continue\n model_attr = getattr(kwb_rule, f.name)\n ent_attr = getattr(kwb_ent, f.name)\n if not hasattr(model_attr, 'pk'):\n try:\n assert ent_attr == getattr(kwb_rule, f'get_{f.name}_display')()\n except AttributeError:\n assert ent_attr == model_attr\n else:\n assert ent_attr == model_attr.id\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"<import token>\n\n\ndef test_keywordbid_rule_init(kwb_rule, account):\n assert kwb_rule.get_max_bid_display() == kwb_rule.max_bid * 1000000\n assert kwb_rule.get_bid_increase_percentage_display(\n ) == kwb_rule.bid_increase_percentage / 100\n assert kwb_rule.get_target_bid_diff_display(\n ) == kwb_rule.target_bid_diff / 100\n assert kwb_rule.account is account\n assert kwb_rule.target_values == [1, 2, 3]\n assert kwb_rule.get_target_type_display() in map(lambda t: t[1],\n constants.KEYWORD_BID_TARGET_TYPES)\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\n<function token>\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"<import token>\n<function token>\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\n<function token>\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\ndef test_calculate_keyword_bids(yd_gateway, kwb_rule, keyword_bids,\n keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb_ent = controllers.keyword_bid_rule.map_keyword_bid_rule(kwb_rule)\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=keyword_bids)\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n mock.add(method='POST', url=url, status=200, json={'error': {\n 'error_code': 0, 'error_message': 'oops!'}})\n result = controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n assert len(result) == 1514\n with pytest.raises(UnExpectedResult):\n controllers.keyword_bids.calculate_keyword_bids(yd_gateway,\n kwb_ent, selection_criteria={'CampaignIds': []})\n",
"<import token>\n<function token>\n\n\ndef test_make_keywordbid_rule(kwb_rule):\n kw_bid_rule = controllers.keyword_bid_rule.get_keywordbid_rule(kwb_rule.id)\n assert kwb_rule == kw_bid_rule\n assert kw_bid_rule.account == kwb_rule.account\n not_found_kwb_rule = controllers.keyword_bid_rule.get_keywordbid_rule(0)\n assert not_found_kwb_rule is None\n\n\n<function token>\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_get_keyword_bids(yd_gateway, keyword_bids):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n data = keyword_bids\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=data)\n mock.add(method='POST', url=url, status=404)\n mock.add(method='POST', url=url, status=200, json=data)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert next(kwb).keyword_id == 13102117581\n assert next(kwb).keyword_id == 13102117582\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n with pytest.raises(UnExpectedResult):\n next(kwb)\n kwb = controllers.keyword_bids.get_keyword_bids(yd_gateway,\n selection_criteria={'CampaignIds': []})\n assert type(next(kwb).as_dict()) is dict\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef test_set_keyword_bids(yd_gateway, keyword_bids, keyword_bids_w_warnings):\n url = f'{yd_gateway.get_api_url()}/{yd_gateway.endpoints.KEYWORD_BIDS}'\n kwb = controllers.keyword_bids.map_keyword_bids(keyword_bids['result'][\n 'KeywordBids'])\n with responses.RequestsMock() as mock:\n mock.add(method='POST', url=url, status=200, json=\n keyword_bids_w_warnings)\n response = controllers.keyword_bids.set_keyword_bids(yd_gateway, kwb)\n assert len(list(response)) == 1514\n\n\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
923 |
66904cbe3e57d9cc1ee385cd8a4c1ba3767626bd
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# sphinx_gallery_thumbnail_number = 3
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import NullFormatter # useful for `logit` scale
import matplotlib.ticker as ticker
import matplotlib as mpl
mpl.style.use('classic')
# Data for plotting
chi2=np.loadtxt(r'Lam0/buffer/chi2.dat')
chi4=np.loadtxt(r'Lam0/buffer/chi4.dat')
# Create figure
fig=plt.figure(figsize=(9, 3.5))
ax1=fig.add_subplot(121)
ax1.plot(chi2,color='r',linestyle='--',linewidth=2,markersize=5,label=r'$\chi^B_2$')
ax1.axis([0,300,-0.05,0.2])
ax1.set_xlabel('$T\,[\mathrm{MeV}]$', fontsize=15, color='black')
ax1.set_ylabel(r'$\chi_2$', fontsize=15, color='black')
for label in ax1.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax1.yaxis.get_ticklabels():
label.set_fontsize(10)
# Plot two
ax2=fig.add_subplot(122)
ax2.plot(chi4,color='k',linestyle='-',linewidth=2,markersize=5,label=r'$\chi^B_4$')
ax2.axis([0,300,-0.15,0.2])
ax2.set_xlabel('$T\,[\mathrm{MeV}]$', fontsize=15, color='black')
ax2.set_ylabel(r'$\chi_4$', fontsize=15, color='black')
ax2.legend(loc=0,fontsize=7.3,frameon=False,shadow=True,handlelength=3.,borderpad=0.5,borderaxespad=1)
for label in ax2.xaxis.get_ticklabels():
label.set_fontsize(10)
for label in ax2.yaxis.get_ticklabels():
label.set_fontsize(10)
fig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,
wspace=0.2)
fig.savefig("chi.pdf")
#plt.show()
|
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# sphinx_gallery_thumbnail_number = 3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import NullFormatter # useful for `logit` scale\nimport matplotlib.ticker as ticker\nimport matplotlib as mpl\n\nmpl.style.use('classic')\n\n\n# Data for plotting\n\n\nchi2=np.loadtxt(r'Lam0/buffer/chi2.dat')\nchi4=np.loadtxt(r'Lam0/buffer/chi4.dat')\n\n\n# Create figure\nfig=plt.figure(figsize=(9, 3.5))\nax1=fig.add_subplot(121)\n\nax1.plot(chi2,color='r',linestyle='--',linewidth=2,markersize=5,label=r'$\\chi^B_2$')\n\n\nax1.axis([0,300,-0.05,0.2])\n\nax1.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel(r'$\\chi_2$', fontsize=15, color='black')\n\n\n\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\n\n\n# Plot two\nax2=fig.add_subplot(122)\n\nax2.plot(chi4,color='k',linestyle='-',linewidth=2,markersize=5,label=r'$\\chi^B_4$')\n\nax2.axis([0,300,-0.15,0.2])\n\nax2.set_xlabel('$T\\,[\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel(r'$\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0,fontsize=7.3,frameon=False,shadow=True,handlelength=3.,borderpad=0.5,borderaxespad=1)\n\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\n\n\n\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\n \n\nfig.savefig(\"chi.pdf\")\n\n#plt.show()\n",
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import NullFormatter\nimport matplotlib.ticker as ticker\nimport matplotlib as mpl\nmpl.style.use('classic')\nchi2 = np.loadtxt('Lam0/buffer/chi2.dat')\nchi4 = np.loadtxt('Lam0/buffer/chi4.dat')\nfig = plt.figure(figsize=(9, 3.5))\nax1 = fig.add_subplot(121)\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\nax2 = fig.add_subplot(122)\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"<import token>\nmpl.style.use('classic')\nchi2 = np.loadtxt('Lam0/buffer/chi2.dat')\nchi4 = np.loadtxt('Lam0/buffer/chi4.dat')\nfig = plt.figure(figsize=(9, 3.5))\nax1 = fig.add_subplot(121)\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\nax2 = fig.add_subplot(122)\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"<import token>\nmpl.style.use('classic')\n<assignment token>\nax1.plot(chi2, color='r', linestyle='--', linewidth=2, markersize=5, label=\n '$\\\\chi^B_2$')\nax1.axis([0, 300, -0.05, 0.2])\nax1.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax1.set_ylabel('$\\\\chi_2$', fontsize=15, color='black')\nfor label in ax1.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax1.yaxis.get_ticklabels():\n label.set_fontsize(10)\n<assignment token>\nax2.plot(chi4, color='k', linestyle='-', linewidth=2, markersize=5, label=\n '$\\\\chi^B_4$')\nax2.axis([0, 300, -0.15, 0.2])\nax2.set_xlabel('$T\\\\,[\\\\mathrm{MeV}]$', fontsize=15, color='black')\nax2.set_ylabel('$\\\\chi_4$', fontsize=15, color='black')\nax2.legend(loc=0, fontsize=7.3, frameon=False, shadow=True, handlelength=\n 3.0, borderpad=0.5, borderaxespad=1)\nfor label in ax2.xaxis.get_ticklabels():\n label.set_fontsize(10)\nfor label in ax2.yaxis.get_ticklabels():\n label.set_fontsize(10)\nfig.subplots_adjust(top=0.9, bottom=0.15, left=0.1, right=0.95, hspace=0.35,\n wspace=0.2)\nfig.savefig('chi.pdf')\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
924 |
a01ca49c3fa8ea76de2880c1b04bf15ccd341edd
|
# coding=UTF-8
"""
View for managing accounts
"""
from django.contrib import messages
from django.http import Http404, HttpResponse
from django.shortcuts import redirect
from django import forms
from athena.core import render_to_response
from athena.users.models import User
from athena.users import must_be_admin
def klist(**kwargs):
kwargs.update({
'teachers': [x for x in User.objects.filter(status=1) if not x.is_demo()],
'admins': User.objects.filter(status=2),
})
return kwargs
@must_be_admin
def list(request):
return render_to_response('radmin/manage_accounts_list.html', request, **klist())
@must_be_admin
def account(request, account_id):
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
class AccountBaseForm(forms.ModelForm):
class Meta:
model = User
fields = ['name', 'surname', 'number']
widgets = {
'name': forms.TextInput(),
'surname': forms.TextInput(),
}
if request.method == 'POST':
form = AccountBaseForm(request.POST, instance=acc)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, u'Zapisano.')
else:
form = AccountBaseForm(instance=acc)
if acc.status != 0:
return render_to_response('radmin/manage_accounts_acc.html', request, **klist(
account=acc,
selected_user_id=acc.id,
form=form))
else:
return render_to_response('radmin/manage_accounts_students_acc.html', request,
account=acc,
selected_user_id=acc.id,
form=form,
page=Paginator(User.objects.filter(status=0).order_by('surname', 'name'), 30).page(1))
@must_be_admin
def reset_pwd(request, account_id):
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
from random import choice
randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])
acc.set_password(randompass)
messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (randompass, ))
return redirect('/admin/accounts/%s/' % (acc.id, ))
@must_be_admin
def su(request, account_id):
"""Login as this user"""
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
request.logout()
request.login(acc.login)
messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' % (acc.login, ))
return redirect('/')
@must_be_admin
def delete(request, account_id):
if request.method != 'POST':
return HttpResponse(status=400)
try:
acc = User.objects.get(id=int(account_id))
except:
raise Http404
if acc.login in ('[email protected]', '[email protected]', '[email protected]'):
messages.add_message(request, messages.ERROR, u'Nie można usunąć konta wbudowanego')
return redirect('/admin/accounts/%s/' % (acc.id, ))
if acc.status == 1:
# This is a teacher. You should reparent all of it's tests
# and groups to user to [email protected]
pass
messages.add_message(request, messages.SUCCESS, u'Konto "%s %s" usunięte.' % (acc.name, acc.surname))
acc.delete()
return redirect('/admin/accounts/')
@must_be_admin
def create(request):
class NewAccountForm(forms.Form):
_CHOICE = ((1, 'Nauczyciel'), (2, 'Adminstrator'))
login = forms.EmailField(label=u'E-mail')
name = forms.CharField(label=u'Imię', required=False)
surname = forms.CharField(label=u'Nazwisko', required=False)
status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')
if request.method == 'POST':
form = NewAccountForm(request.POST)
if form.is_valid():
# grab a random password
from random import choice
randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])
u = User(login=form.cleaned_data['login'],
name=form.cleaned_data['name'],
surname=form.cleaned_data['surname'],
status=form.cleaned_data['status'])
u.save()
u.set_password(randompass)
messages.add_message(request, messages.SUCCESS, u'Konto stworzone. Nowe hasło to %s' % (randompass, ))
return redirect('/admin/accounts/%s/' % (u.id, ))
else:
form = NewAccountForm()
return render_to_response('radmin/manage_accounts_add.html', request, **klist(
selected_user_id='create',
form=form))
from django.core.paginator import Paginator
@must_be_admin
def view_students(request, page='1'):
page = int(page)
students = User.objects.filter(status=0).order_by('surname', 'name')
students = [x for x in students if not x.is_demo()]
p = Paginator(students, 30)
cpage = p.page(page)
return render_to_response('radmin/manage_accounts_students_list.html', request,
page=cpage)
|
[
"# coding=UTF-8\n\"\"\"\nView for managing accounts\n\"\"\"\n\nfrom django.contrib import messages\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django import forms\nfrom athena.core import render_to_response\nfrom athena.users.models import User\nfrom athena.users import must_be_admin\n\n\ndef klist(**kwargs):\n kwargs.update({\n 'teachers': [x for x in User.objects.filter(status=1) if not x.is_demo()],\n 'admins': User.objects.filter(status=2),\n })\n return kwargs\n\n\n@must_be_admin\ndef list(request):\n return render_to_response('radmin/manage_accounts_list.html', request, **klist())\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n class AccountBaseForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {\n 'name': forms.TextInput(),\n 'surname': forms.TextInput(),\n }\n\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n\n else:\n form = AccountBaseForm(instance=acc)\n\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html', request, **klist(\n account=acc,\n selected_user_id=acc.id,\n form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html', request,\n account=acc,\n selected_user_id=acc.id,\n form=form,\n page=Paginator(User.objects.filter(status=0).order_by('surname', 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n\n acc.set_password(randompass)\n\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (randompass, ))\n\n return redirect('/admin/accounts/%s/' % (acc.id, ))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n request.logout()\n request.login(acc.login)\n\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' % (acc.login, ))\n\n return redirect('/')\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n if acc.login in ('[email protected]', '[email protected]', '[email protected]'):\n messages.add_message(request, messages.ERROR, u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id, ))\n\n if acc.status == 1:\n # This is a teacher. You should reparent all of it's tests\n # and groups to user to [email protected]\n pass\n\n messages.add_message(request, messages.SUCCESS, u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n\n acc.delete()\n\n return redirect('/admin/accounts/')\n\n\n@must_be_admin\ndef create(request):\n\n class NewAccountForm(forms.Form):\n _CHOICE = ((1, 'Nauczyciel'), (2, 'Adminstrator'))\n login = forms.EmailField(label=u'E-mail')\n name = forms.CharField(label=u'Imię', required=False) \n surname = forms.CharField(label=u'Nazwisko', required=False)\n status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')\n\n if request.method == 'POST':\n form = NewAccountForm(request.POST)\n\n if form.is_valid():\n\n # grab a random password\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n\n u = User(login=form.cleaned_data['login'],\n name=form.cleaned_data['name'],\n surname=form.cleaned_data['surname'],\n status=form.cleaned_data['status'])\n u.save()\n u.set_password(randompass)\n\n messages.add_message(request, messages.SUCCESS, u'Konto stworzone. Nowe hasło to %s' % (randompass, ))\n\n return redirect('/admin/accounts/%s/' % (u.id, ))\n\n else:\n form = NewAccountForm()\n\n return render_to_response('radmin/manage_accounts_add.html', request, **klist(\n selected_user_id='create',\n form=form))\n\nfrom django.core.paginator import Paginator\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n\n cpage = p.page(page)\n\n return render_to_response('radmin/manage_accounts_students_list.html', request,\n page=cpage)",
"<docstring token>\nfrom django.contrib import messages\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django import forms\nfrom athena.core import render_to_response\nfrom athena.users.models import User\nfrom athena.users import must_be_admin\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n@must_be_admin\ndef list(request):\n return render_to_response('radmin/manage_accounts_list.html', request,\n **klist())\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n request.logout()\n request.login(acc.login)\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %\n (acc.login,))\n return redirect('/')\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n@must_be_admin\ndef create(request):\n\n\n class NewAccountForm(forms.Form):\n _CHOICE = (1, 'Nauczyciel'), (2, 'Adminstrator')\n login = forms.EmailField(label=u'E-mail')\n name = forms.CharField(label=u'Imię', required=False)\n surname = forms.CharField(label=u'Nazwisko', required=False)\n status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')\n if request.method == 'POST':\n form = NewAccountForm(request.POST)\n if form.is_valid():\n from random import choice\n randompass = ''.join([choice(\n '1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n u = User(login=form.cleaned_data['login'], name=form.\n cleaned_data['name'], surname=form.cleaned_data['surname'],\n status=form.cleaned_data['status'])\n u.save()\n u.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, \n u'Konto stworzone. Nowe hasło to %s' % (randompass,))\n return redirect('/admin/accounts/%s/' % (u.id,))\n else:\n form = NewAccountForm()\n return render_to_response('radmin/manage_accounts_add.html', request,\n **klist(selected_user_id='create', form=form))\n\n\nfrom django.core.paginator import Paginator\n\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n cpage = p.page(page)\n return render_to_response('radmin/manage_accounts_students_list.html',\n request, page=cpage)\n",
"<docstring token>\n<import token>\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n@must_be_admin\ndef list(request):\n return render_to_response('radmin/manage_accounts_list.html', request,\n **klist())\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n request.logout()\n request.login(acc.login)\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %\n (acc.login,))\n return redirect('/')\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n@must_be_admin\ndef create(request):\n\n\n class NewAccountForm(forms.Form):\n _CHOICE = (1, 'Nauczyciel'), (2, 'Adminstrator')\n login = forms.EmailField(label=u'E-mail')\n name = forms.CharField(label=u'Imię', required=False)\n surname = forms.CharField(label=u'Nazwisko', required=False)\n status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')\n if request.method == 'POST':\n form = NewAccountForm(request.POST)\n if form.is_valid():\n from random import choice\n randompass = ''.join([choice(\n '1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n u = User(login=form.cleaned_data['login'], name=form.\n cleaned_data['name'], surname=form.cleaned_data['surname'],\n status=form.cleaned_data['status'])\n u.save()\n u.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, \n u'Konto stworzone. Nowe hasło to %s' % (randompass,))\n return redirect('/admin/accounts/%s/' % (u.id,))\n else:\n form = NewAccountForm()\n return render_to_response('radmin/manage_accounts_add.html', request,\n **klist(selected_user_id='create', form=form))\n\n\n<import token>\n\n\n@must_be_admin\ndef view_students(request, page='1'):\n page = int(page)\n students = User.objects.filter(status=0).order_by('surname', 'name')\n students = [x for x in students if not x.is_demo()]\n p = Paginator(students, 30)\n cpage = p.page(page)\n return render_to_response('radmin/manage_accounts_students_list.html',\n request, page=cpage)\n",
"<docstring token>\n<import token>\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n@must_be_admin\ndef list(request):\n return render_to_response('radmin/manage_accounts_list.html', request,\n **klist())\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n request.logout()\n request.login(acc.login)\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %\n (acc.login,))\n return redirect('/')\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n@must_be_admin\ndef create(request):\n\n\n class NewAccountForm(forms.Form):\n _CHOICE = (1, 'Nauczyciel'), (2, 'Adminstrator')\n login = forms.EmailField(label=u'E-mail')\n name = forms.CharField(label=u'Imię', required=False)\n surname = forms.CharField(label=u'Nazwisko', required=False)\n status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')\n if request.method == 'POST':\n form = NewAccountForm(request.POST)\n if form.is_valid():\n from random import choice\n randompass = ''.join([choice(\n '1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n u = User(login=form.cleaned_data['login'], name=form.\n cleaned_data['name'], surname=form.cleaned_data['surname'],\n status=form.cleaned_data['status'])\n u.save()\n u.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, \n u'Konto stworzone. Nowe hasło to %s' % (randompass,))\n return redirect('/admin/accounts/%s/' % (u.id,))\n else:\n form = NewAccountForm()\n return render_to_response('radmin/manage_accounts_add.html', request,\n **klist(selected_user_id='create', form=form))\n\n\n<import token>\n<function token>\n",
"<docstring token>\n<import token>\n\n\ndef klist(**kwargs):\n kwargs.update({'teachers': [x for x in User.objects.filter(status=1) if\n not x.is_demo()], 'admins': User.objects.filter(status=2)})\n return kwargs\n\n\n<function token>\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n request.logout()\n request.login(acc.login)\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %\n (acc.login,))\n return redirect('/')\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n@must_be_admin\ndef create(request):\n\n\n class NewAccountForm(forms.Form):\n _CHOICE = (1, 'Nauczyciel'), (2, 'Adminstrator')\n login = forms.EmailField(label=u'E-mail')\n name = forms.CharField(label=u'Imię', required=False)\n surname = forms.CharField(label=u'Nazwisko', required=False)\n status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')\n if request.method == 'POST':\n form = NewAccountForm(request.POST)\n if form.is_valid():\n from random import choice\n randompass = ''.join([choice(\n '1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n u = User(login=form.cleaned_data['login'], name=form.\n cleaned_data['name'], surname=form.cleaned_data['surname'],\n status=form.cleaned_data['status'])\n u.save()\n u.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, \n u'Konto stworzone. Nowe hasło to %s' % (randompass,))\n return redirect('/admin/accounts/%s/' % (u.id,))\n else:\n form = NewAccountForm()\n return render_to_response('radmin/manage_accounts_add.html', request,\n **klist(selected_user_id='create', form=form))\n\n\n<import token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n@must_be_admin\ndef su(request, account_id):\n \"\"\"Login as this user\"\"\"\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n request.logout()\n request.login(acc.login)\n messages.add_message(request, messages.SUCCESS, u'Zalogowano jako %s' %\n (acc.login,))\n return redirect('/')\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n@must_be_admin\ndef create(request):\n\n\n class NewAccountForm(forms.Form):\n _CHOICE = (1, 'Nauczyciel'), (2, 'Adminstrator')\n login = forms.EmailField(label=u'E-mail')\n name = forms.CharField(label=u'Imię', required=False)\n surname = forms.CharField(label=u'Nazwisko', required=False)\n status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')\n if request.method == 'POST':\n form = NewAccountForm(request.POST)\n if form.is_valid():\n from random import choice\n randompass = ''.join([choice(\n '1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n u = User(login=form.cleaned_data['login'], name=form.\n cleaned_data['name'], surname=form.cleaned_data['surname'],\n status=form.cleaned_data['status'])\n u.save()\n u.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, \n u'Konto stworzone. Nowe hasło to %s' % (randompass,))\n return redirect('/admin/accounts/%s/' % (u.id,))\n else:\n form = NewAccountForm()\n return render_to_response('radmin/manage_accounts_add.html', request,\n **klist(selected_user_id='create', form=form))\n\n\n<import token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n<function token>\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n@must_be_admin\ndef create(request):\n\n\n class NewAccountForm(forms.Form):\n _CHOICE = (1, 'Nauczyciel'), (2, 'Adminstrator')\n login = forms.EmailField(label=u'E-mail')\n name = forms.CharField(label=u'Imię', required=False)\n surname = forms.CharField(label=u'Nazwisko', required=False)\n status = forms.ChoiceField(choices=_CHOICE, initial=1, label=u'Typ')\n if request.method == 'POST':\n form = NewAccountForm(request.POST)\n if form.is_valid():\n from random import choice\n randompass = ''.join([choice(\n '1234567890qwertyupasdfghjklzxcvbnm') for i in range(7)])\n u = User(login=form.cleaned_data['login'], name=form.\n cleaned_data['name'], surname=form.cleaned_data['surname'],\n status=form.cleaned_data['status'])\n u.save()\n u.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, \n u'Konto stworzone. Nowe hasło to %s' % (randompass,))\n return redirect('/admin/accounts/%s/' % (u.id,))\n else:\n form = NewAccountForm()\n return render_to_response('radmin/manage_accounts_add.html', request,\n **klist(selected_user_id='create', form=form))\n\n\n<import token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n@must_be_admin\ndef reset_pwd(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n from random import choice\n randompass = ''.join([choice('1234567890qwertyupasdfghjklzxcvbnm') for\n i in range(7)])\n acc.set_password(randompass)\n messages.add_message(request, messages.SUCCESS, u'Nowe hasło to %s' % (\n randompass,))\n return redirect('/admin/accounts/%s/' % (acc.id,))\n\n\n<function token>\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n<function token>\n<import token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\n@must_be_admin\ndef account(request, account_id):\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n\n\n class AccountBaseForm(forms.ModelForm):\n\n\n class Meta:\n model = User\n fields = ['name', 'surname', 'number']\n widgets = {'name': forms.TextInput(), 'surname': forms.TextInput()}\n if request.method == 'POST':\n form = AccountBaseForm(request.POST, instance=acc)\n if form.is_valid():\n form.save()\n messages.add_message(request, messages.SUCCESS, u'Zapisano.')\n else:\n form = AccountBaseForm(instance=acc)\n if acc.status != 0:\n return render_to_response('radmin/manage_accounts_acc.html',\n request, **klist(account=acc, selected_user_id=acc.id, form=form))\n else:\n return render_to_response('radmin/manage_accounts_students_acc.html',\n request, account=acc, selected_user_id=acc.id, form=form, page=\n Paginator(User.objects.filter(status=0).order_by('surname',\n 'name'), 30).page(1))\n\n\n<function token>\n<function token>\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n<function token>\n<import token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\n@must_be_admin\ndef delete(request, account_id):\n if request.method != 'POST':\n return HttpResponse(status=400)\n try:\n acc = User.objects.get(id=int(account_id))\n except:\n raise Http404\n if acc.login in ('[email protected]', '[email protected]',\n '[email protected]'):\n messages.add_message(request, messages.ERROR,\n u'Nie można usunąć konta wbudowanego')\n return redirect('/admin/accounts/%s/' % (acc.id,))\n if acc.status == 1:\n pass\n messages.add_message(request, messages.SUCCESS, \n u'Konto \"%s %s\" usunięte.' % (acc.name, acc.surname))\n acc.delete()\n return redirect('/admin/accounts/')\n\n\n<function token>\n<import token>\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<import token>\n<function token>\n"
] | false |
925 |
9adff5da4e26088def9f0e32aa712a1f2b0336ba
|
class Step:
def __init__(self, action):
self.action = action
def __str__(self) -> str:
return f'Step: {{action: {self.action.__str__()}}}'
def __repr__(self) -> str:
return f'Step: {{action: {self.action.__str__()}}}'
|
[
"class Step:\n def __init__(self, action):\n self.action = action\n\n def __str__(self) -> str:\n return f'Step: {{action: {self.action.__str__()}}}'\n\n def __repr__(self) -> str:\n return f'Step: {{action: {self.action.__str__()}}}'\n\n",
"class Step:\n\n def __init__(self, action):\n self.action = action\n\n def __str__(self) ->str:\n return f'Step: {{action: {self.action.__str__()}}}'\n\n def __repr__(self) ->str:\n return f'Step: {{action: {self.action.__str__()}}}'\n",
"class Step:\n <function token>\n\n def __str__(self) ->str:\n return f'Step: {{action: {self.action.__str__()}}}'\n\n def __repr__(self) ->str:\n return f'Step: {{action: {self.action.__str__()}}}'\n",
"class Step:\n <function token>\n\n def __str__(self) ->str:\n return f'Step: {{action: {self.action.__str__()}}}'\n <function token>\n",
"class Step:\n <function token>\n <function token>\n <function token>\n",
"<class token>\n"
] | false |
926 |
668a8005f2f66190d588fb9289293d73a608f767
|
# -*- coding: utf-8-*-
import random
import re
from datetime import datetime, time
from phue import Bridge
import os
import glob
WORDS = []
def handle(text, mic, profile):
messages1 = ["Naturally Sir ","Of course Sir ","I'll get right at it"]
final = random.choice(messages1)
mic.say(final)
command = "ssh pi@"
ip = profile['piip']
command += ip
command += " pkill omxplayer"
os.system(command)
mic.say("The music process has successfully been killed")
def isValid(text):
"""
Returns True if the input is related to the meaning of life.
Arguments:
text -- user-input, typically transcribed speech
"""
return bool(re.search(r'\b((kill|stop) the (alarm|clock|music))\b', text, re.IGNORECASE))
|
[
"# -*- coding: utf-8-*-\nimport random\nimport re\nfrom datetime import datetime, time\nfrom phue import Bridge\nimport os\nimport glob\n\nWORDS = []\n\n\ndef handle(text, mic, profile):\n \n\n\tmessages1 = [\"Naturally Sir \",\"Of course Sir \",\"I'll get right at it\"]\n\tfinal = random.choice(messages1)\n\tmic.say(final)\n\tcommand = \"ssh pi@\"\n\tip = profile['piip']\n\tcommand += ip\n\tcommand += \" pkill omxplayer\"\n\tos.system(command)\n\tmic.say(\"The music process has successfully been killed\")\n\n\n\ndef isValid(text):\n \"\"\"\n Returns True if the input is related to the meaning of life.\n\n Arguments:\n text -- user-input, typically transcribed speech\n \"\"\"\n return bool(re.search(r'\\b((kill|stop) the (alarm|clock|music))\\b', text, re.IGNORECASE))\n\n\n",
"import random\nimport re\nfrom datetime import datetime, time\nfrom phue import Bridge\nimport os\nimport glob\nWORDS = []\n\n\ndef handle(text, mic, profile):\n messages1 = ['Naturally Sir ', 'Of course Sir ', \"I'll get right at it\"]\n final = random.choice(messages1)\n mic.say(final)\n command = 'ssh pi@'\n ip = profile['piip']\n command += ip\n command += ' pkill omxplayer'\n os.system(command)\n mic.say('The music process has successfully been killed')\n\n\ndef isValid(text):\n \"\"\"\n Returns True if the input is related to the meaning of life.\n\n Arguments:\n text -- user-input, typically transcribed speech\n \"\"\"\n return bool(re.search('\\\\b((kill|stop) the (alarm|clock|music))\\\\b',\n text, re.IGNORECASE))\n",
"<import token>\nWORDS = []\n\n\ndef handle(text, mic, profile):\n messages1 = ['Naturally Sir ', 'Of course Sir ', \"I'll get right at it\"]\n final = random.choice(messages1)\n mic.say(final)\n command = 'ssh pi@'\n ip = profile['piip']\n command += ip\n command += ' pkill omxplayer'\n os.system(command)\n mic.say('The music process has successfully been killed')\n\n\ndef isValid(text):\n \"\"\"\n Returns True if the input is related to the meaning of life.\n\n Arguments:\n text -- user-input, typically transcribed speech\n \"\"\"\n return bool(re.search('\\\\b((kill|stop) the (alarm|clock|music))\\\\b',\n text, re.IGNORECASE))\n",
"<import token>\n<assignment token>\n\n\ndef handle(text, mic, profile):\n messages1 = ['Naturally Sir ', 'Of course Sir ', \"I'll get right at it\"]\n final = random.choice(messages1)\n mic.say(final)\n command = 'ssh pi@'\n ip = profile['piip']\n command += ip\n command += ' pkill omxplayer'\n os.system(command)\n mic.say('The music process has successfully been killed')\n\n\ndef isValid(text):\n \"\"\"\n Returns True if the input is related to the meaning of life.\n\n Arguments:\n text -- user-input, typically transcribed speech\n \"\"\"\n return bool(re.search('\\\\b((kill|stop) the (alarm|clock|music))\\\\b',\n text, re.IGNORECASE))\n",
"<import token>\n<assignment token>\n<function token>\n\n\ndef isValid(text):\n \"\"\"\n Returns True if the input is related to the meaning of life.\n\n Arguments:\n text -- user-input, typically transcribed speech\n \"\"\"\n return bool(re.search('\\\\b((kill|stop) the (alarm|clock|music))\\\\b',\n text, re.IGNORECASE))\n",
"<import token>\n<assignment token>\n<function token>\n<function token>\n"
] | false |
927 |
2257f73a290dfd428a874e963c26e51f1c1f1efa
|
# -*- coding: utf-8 -*-
"""The app module, containing the app factory function."""
from flask import Flask, render_template
from flask_cors import CORS
from flask_misaka import Misaka
from flask_mailman import Mail
from flask_talisman import Talisman
from werkzeug.middleware.proxy_fix import ProxyFix
from micawber.providers import bootstrap_basic
from whitenoise import WhiteNoise
from pytz import timezone
from urllib.parse import quote_plus
from dribdat import commands, public, admin
from dribdat.assets import assets # noqa: I005
from dribdat.sso import get_auth_blueprint
from dribdat.extensions import (
hashing,
cache,
db,
login_manager,
migrate,
)
from dribdat.settings import ProdConfig # noqa: I005
from dribdat.utils import timesince
from dribdat.onebox import make_oembedplus
def init_app(config_object=ProdConfig):
"""Define an application factory.
See: http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
"""
app = Flask(__name__)
app.config.from_object(config_object)
# Set up cross-site access to the API
if app.config['SERVER_CORS']:
CORS(app, resources={r"/api/*": {"origins": "*"}})
app.config['CORS_HEADERS'] = 'Content-Type'
# Set up using an external proxy/static server
if app.config['SERVER_PROXY']:
app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)
else:
# Internally optimize static file hosting
app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')
for static in ('css', 'img', 'js', 'public'):
app.wsgi_app.add_files('dribdat/static/' + static)
register_extensions(app)
register_blueprints(app)
register_oauthhandlers(app)
register_errorhandlers(app)
register_filters(app)
register_loggers(app)
register_shellcontext(app)
register_commands(app)
register_caching(app)
return app
def register_extensions(app):
"""Register Flask extensions."""
assets.init_app(app)
hashing.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
migrate.init_app(app, db)
init_mailman(app)
init_talisman(app)
return None
def init_mailman(app):
"""Initialize mailer support."""
if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:
if not app.config['MAIL_DEFAULT_SENDER']:
app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')
else:
mail = Mail()
mail.init_app(app)
def init_talisman(app):
"""Initialize Talisman support."""
if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:
Talisman(app,
content_security_policy=app.config['CSP_DIRECTIVES'],
frame_options_allow_from='*')
def register_blueprints(app):
"""Register Flask blueprints."""
app.register_blueprint(public.views.blueprint)
app.register_blueprint(public.project.blueprint)
app.register_blueprint(public.auth.blueprint)
app.register_blueprint(public.api.blueprint)
app.register_blueprint(admin.views.blueprint)
return None
def register_oauthhandlers(app):
"""Set up OAuth handlers based on configuration."""
blueprint = get_auth_blueprint(app)
if blueprint is not None:
app.register_blueprint(blueprint, url_prefix="/oauth")
def register_errorhandlers(app):
"""Register error handlers."""
def render_error(error):
"""Render error template."""
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template('{0}.html'.format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
def register_shellcontext(app):
"""Register shell context objects."""
def shell_context():
"""Shell context objects."""
from dribdat.user.models import User
return {
'db': db,
'User': User}
app.shell_context_processor(shell_context)
def register_commands(app):
"""Register Click commands."""
app.cli.add_command(commands.lint)
app.cli.add_command(commands.clean)
app.cli.add_command(commands.urls)
def register_filters(app):
"""Register filters for templates."""
#
# Conversion of Markdown to HTML
Misaka(app, autolink=True, fenced_code=True,
strikethrough=True, tables=True)
# Registration of handlers for micawber
app.oembed_providers = bootstrap_basic()
@app.template_filter()
def onebox(value):
return make_oembedplus(
value, app.oembed_providers, maxwidth=600, maxheight=400
)
# Timezone helper
app.tz = timezone(app.config['TIME_ZONE'])
# Lambda filters for safe image_url's
app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&=')
# Custom filters
@app.template_filter()
def since_date(value):
return timesince(value)
@app.template_filter()
def until_date(value):
return timesince(value, default="now!", until=True)
@app.template_filter()
def format_date(value, format='%d.%m.%Y'):
if value is None: return ''
return value.strftime(format)
@app.template_filter()
def format_datetime(value, format='%d.%m.%Y %H:%M'):
if value is None: return ''
return value.strftime(format)
def register_loggers(app):
"""Initialize and configure logging."""
if 'DEBUG' in app.config and not app.config['DEBUG']:
import logging
stream_handler = logging.StreamHandler()
app.logger.addHandler(stream_handler)
app.logger.setLevel(logging.INFO)
def register_caching(app):
"""Prevent cached responses in debug."""
if 'DEBUG' in app.config and app.config['DEBUG']:
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate, public, max-age=0"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
|
[
"# -*- coding: utf-8 -*-\n\"\"\"The app module, containing the app factory function.\"\"\"\n\nfrom flask import Flask, render_template\nfrom flask_cors import CORS\nfrom flask_misaka import Misaka\nfrom flask_mailman import Mail\nfrom flask_talisman import Talisman\nfrom werkzeug.middleware.proxy_fix import ProxyFix\nfrom micawber.providers import bootstrap_basic\nfrom whitenoise import WhiteNoise\nfrom pytz import timezone\nfrom urllib.parse import quote_plus\nfrom dribdat import commands, public, admin\nfrom dribdat.assets import assets # noqa: I005\nfrom dribdat.sso import get_auth_blueprint\nfrom dribdat.extensions import (\n hashing,\n cache,\n db,\n login_manager,\n migrate,\n)\nfrom dribdat.settings import ProdConfig # noqa: I005\nfrom dribdat.utils import timesince\nfrom dribdat.onebox import make_oembedplus\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n # Set up cross-site access to the API\n if app.config['SERVER_CORS']:\n CORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n\n # Set up using an external proxy/static server\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n # Internally optimize static file hosting\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app,\n content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix=\"/oauth\")\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n # If a HTTPException, pull the `code` attribute; default to 500\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {\n 'db': db,\n 'User': User}\n\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n #\n # Conversion of Markdown to HTML\n Misaka(app, autolink=True, fenced_code=True,\n strikethrough=True, tables=True)\n\n # Registration of handlers for micawber\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(\n value, app.oembed_providers, maxwidth=600, maxheight=400\n )\n\n # Timezone helper\n app.tz = timezone(app.config['TIME_ZONE'])\n\n # Lambda filters for safe image_url's\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&=')\n\n # Custom filters\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default=\"now!\", until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None: return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None: return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n @app.after_request\n def after_request(response):\n response.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate, public, max-age=0\"\n response.headers[\"Expires\"] = 0\n response.headers[\"Pragma\"] = \"no-cache\"\n return response\n",
"<docstring token>\nfrom flask import Flask, render_template\nfrom flask_cors import CORS\nfrom flask_misaka import Misaka\nfrom flask_mailman import Mail\nfrom flask_talisman import Talisman\nfrom werkzeug.middleware.proxy_fix import ProxyFix\nfrom micawber.providers import bootstrap_basic\nfrom whitenoise import WhiteNoise\nfrom pytz import timezone\nfrom urllib.parse import quote_plus\nfrom dribdat import commands, public, admin\nfrom dribdat.assets import assets\nfrom dribdat.sso import get_auth_blueprint\nfrom dribdat.extensions import hashing, cache, db, login_manager, migrate\nfrom dribdat.settings import ProdConfig\nfrom dribdat.utils import timesince\nfrom dribdat.onebox import make_oembedplus\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app, content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix='/oauth')\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"<docstring token>\n<import token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app, content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\ndef register_oauthhandlers(app):\n \"\"\"Set up OAuth handlers based on configuration.\"\"\"\n blueprint = get_auth_blueprint(app)\n if blueprint is not None:\n app.register_blueprint(blueprint, url_prefix='/oauth')\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"<docstring token>\n<import token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\ndef init_talisman(app):\n \"\"\"Initialize Talisman support.\"\"\"\n if 'SERVER_SSL' in app.config and app.config['SERVER_SSL']:\n Talisman(app, content_security_policy=app.config['CSP_DIRECTIVES'],\n frame_options_allow_from='*')\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\n<function token>\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"<docstring token>\n<import token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\n<function token>\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\n<function token>\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\ndef register_shellcontext(app):\n \"\"\"Register shell context objects.\"\"\"\n\n def shell_context():\n \"\"\"Shell context objects.\"\"\"\n from dribdat.user.models import User\n return {'db': db, 'User': User}\n app.shell_context_processor(shell_context)\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"<docstring token>\n<import token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\n<function token>\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\n<function token>\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\n<function token>\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\ndef register_caching(app):\n \"\"\"Prevent cached responses in debug.\"\"\"\n if 'DEBUG' in app.config and app.config['DEBUG']:\n\n @app.after_request\n def after_request(response):\n response.headers['Cache-Control'\n ] = 'no-cache, no-store, must-revalidate, public, max-age=0'\n response.headers['Expires'] = 0\n response.headers['Pragma'] = 'no-cache'\n return response\n",
"<docstring token>\n<import token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\n<function token>\n\n\ndef register_blueprints(app):\n \"\"\"Register Flask blueprints.\"\"\"\n app.register_blueprint(public.views.blueprint)\n app.register_blueprint(public.project.blueprint)\n app.register_blueprint(public.auth.blueprint)\n app.register_blueprint(public.api.blueprint)\n app.register_blueprint(admin.views.blueprint)\n return None\n\n\n<function token>\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\n<function token>\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\ndef init_mailman(app):\n \"\"\"Initialize mailer support.\"\"\"\n if 'MAIL_SERVER' in app.config and app.config['MAIL_SERVER']:\n if not app.config['MAIL_DEFAULT_SENDER']:\n app.logger.warn('MAIL_DEFAULT_SENDER is required to send email')\n else:\n mail = Mail()\n mail.init_app(app)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\n<function token>\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef register_errorhandlers(app):\n \"\"\"Register error handlers.\"\"\"\n\n def render_error(error):\n \"\"\"Render error template.\"\"\"\n error_code = getattr(error, 'code', 500)\n return render_template('{0}.html'.format(error_code)), error_code\n for errcode in [401, 404, 500]:\n app.errorhandler(errcode)(render_error)\n return None\n\n\n<function token>\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef register_commands(app):\n \"\"\"Register Click commands.\"\"\"\n app.cli.add_command(commands.lint)\n app.cli.add_command(commands.clean)\n app.cli.add_command(commands.urls)\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef register_filters(app):\n \"\"\"Register filters for templates.\"\"\"\n Misaka(app, autolink=True, fenced_code=True, strikethrough=True, tables\n =True)\n app.oembed_providers = bootstrap_basic()\n\n @app.template_filter()\n def onebox(value):\n return make_oembedplus(value, app.oembed_providers, maxwidth=600,\n maxheight=400)\n app.tz = timezone(app.config['TIME_ZONE'])\n app.jinja_env.filters['quote_plus'] = lambda u: quote_plus(u or '', ':/?&='\n )\n\n @app.template_filter()\n def since_date(value):\n return timesince(value)\n\n @app.template_filter()\n def until_date(value):\n return timesince(value, default='now!', until=True)\n\n @app.template_filter()\n def format_date(value, format='%d.%m.%Y'):\n if value is None:\n return ''\n return value.strftime(format)\n\n @app.template_filter()\n def format_datetime(value, format='%d.%m.%Y %H:%M'):\n if value is None:\n return ''\n return value.strftime(format)\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\ndef register_extensions(app):\n \"\"\"Register Flask extensions.\"\"\"\n assets.init_app(app)\n hashing.init_app(app)\n cache.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n migrate.init_app(app, db)\n init_mailman(app)\n init_talisman(app)\n return None\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n\n\ndef init_app(config_object=ProdConfig):\n \"\"\"Define an application factory.\n\n See: http://flask.pocoo.org/docs/patterns/appfactories/\n\n :param config_object: The configuration object to use.\n \"\"\"\n app = Flask(__name__)\n app.config.from_object(config_object)\n if app.config['SERVER_CORS']:\n CORS(app, resources={'/api/*': {'origins': '*'}})\n app.config['CORS_HEADERS'] = 'Content-Type'\n if app.config['SERVER_PROXY']:\n app.wsgi_app = ProxyFix(app, x_for=1, x_proto=1, x_host=1)\n else:\n app.wsgi_app = WhiteNoise(app.wsgi_app, prefix='static/')\n for static in ('css', 'img', 'js', 'public'):\n app.wsgi_app.add_files('dribdat/static/' + static)\n register_extensions(app)\n register_blueprints(app)\n register_oauthhandlers(app)\n register_errorhandlers(app)\n register_filters(app)\n register_loggers(app)\n register_shellcontext(app)\n register_commands(app)\n register_caching(app)\n return app\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef register_loggers(app):\n \"\"\"Initialize and configure logging.\"\"\"\n if 'DEBUG' in app.config and not app.config['DEBUG']:\n import logging\n stream_handler = logging.StreamHandler()\n app.logger.addHandler(stream_handler)\n app.logger.setLevel(logging.INFO)\n\n\n<function token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
928 |
d39e3a552a7c558d3f5b410e0b228fb7409d732a
|
# -*- coding:utf-8 -*-
"""
Author:xufei
Date:2021/1/21
"""
|
[
"# -*- coding:utf-8 -*-\n\"\"\"\nAuthor:xufei\nDate:2021/1/21\n\"\"\"\n",
"<docstring token>\n"
] | false |
929 |
c18c407476375fb1647fefaedb5d7ea0e0aabe3a
|
import pandas as pd
import numpy as np
import csv
#import nltk
#nltk.download('punkt')
from nltk.tokenize import sent_tokenize
csv_file=open("/home/debajit15/train+dev.csv")
pd.set_option('display.max_colwidth', None)
df=pd.read_csv(csv_file,sep=',');
df = df[pd.notnull(df['Aspects'])]
#print(df['Opinion_Words'].iloc[0:1])
def train_validate_test_split(df, train_percent=.8, validate_percent=.2, seed=None):
np.random.seed(seed)
perm = np.random.permutation(df.index)
m = len(df.index)
train_end = int(train_percent * m)
train = df.iloc[:train_end]
validate = df.iloc[train_end:]
return train, validate
trainl,vall=train_validate_test_split(df)
def get(df):
col=df[['review_body']]
print(col.head())
aspect=df[['Aspects']]
opinions=df[['Sentiments']]
print(df.shape[0])
now=""
for o in range(0,df.shape[0]):
d=col.iloc[o:o+1]
sd=d.to_string(index=False,header=None)
sd=sd[1:]
l=sent_tokenize(sd)
a=aspect.iloc[o:o+1]
sa=a.to_string(index=False,header=None)
asp=sa.split(";")
a=opinions.iloc[o:o+1]
sa=a.to_string(index=False,header=None)
senti=sa.split(";")
if(len(asp)!=len(senti) or len(l)!=len(asp) or len(l)!=len(senti)):
continue
it=0
for i in l:
chks=[x.strip() for x in senti[it].split(",")]
chka=[x.strip() for x in asp[it].split(",")]
g=[]
itr=0
if(len(chks)!=len(chka)):
continue
for k in chka:
f=k.split(" ")
num=chks[itr]
if(len(f)>1):
h=0
for x in f:
x=x.strip(' ')
x=x.strip('"')
g+=[x]
if(h<len(f)-1):
chks.insert(itr,'1')
h+=1
else:
g+=f
itr+=1
chka=g
now+=i
now+="####"
j=i.split(" ")
itr=0
for word in j:
if itr<len(chka) and word==chka[itr] :
if chks[itr]=='1':
s=word+"=T-POS"
elif chks[itr]=='0':
s=word+"=T-NEU"
else:
s=word+"=T-NEG"
itr+=1
else:
s=word+"=O"
now+=s+" "
now+="\n"
it+=1
return now
train=get(trainl)
val=get(vall)
text_file = open("/home/debajit15/train.txt", "w")
n = text_file.write(train)
text_file.close()
text_file = open("/home/debajit15/dev.txt", "w")
n = text_file.write(val)
text_file.close()
# #print(df[['review_body']])
|
[
"import pandas as pd\nimport numpy as np\nimport csv\n#import nltk\n#nltk.download('punkt')\nfrom nltk.tokenize import sent_tokenize\ncsv_file=open(\"/home/debajit15/train+dev.csv\")\npd.set_option('display.max_colwidth', None)\ndf=pd.read_csv(csv_file,sep=',');\ndf = df[pd.notnull(df['Aspects'])]\n#print(df['Opinion_Words'].iloc[0:1])\n\ndef train_validate_test_split(df, train_percent=.8, validate_percent=.2, seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\ntrainl,vall=train_validate_test_split(df)\n\ndef get(df):\n\tcol=df[['review_body']]\n\tprint(col.head())\n\taspect=df[['Aspects']]\n\topinions=df[['Sentiments']]\n\tprint(df.shape[0])\n\tnow=\"\"\n\tfor o in range(0,df.shape[0]):\n\t\td=col.iloc[o:o+1]\n\t\tsd=d.to_string(index=False,header=None)\n\t\tsd=sd[1:]\n\t\tl=sent_tokenize(sd)\n\n\t\ta=aspect.iloc[o:o+1]\n\t\tsa=a.to_string(index=False,header=None)\n\t\tasp=sa.split(\";\")\n\n\t\ta=opinions.iloc[o:o+1]\n\t\tsa=a.to_string(index=False,header=None)\n\t\tsenti=sa.split(\";\")\n\n\t\tif(len(asp)!=len(senti) or len(l)!=len(asp) or len(l)!=len(senti)):\n\t\t\tcontinue\n\t\tit=0\n\t\tfor i in l:\n\t\t\tchks=[x.strip() for x in senti[it].split(\",\")]\n\t\t\tchka=[x.strip() for x in asp[it].split(\",\")]\n\n\t\t\tg=[]\n\t\t\titr=0\n\t\t\tif(len(chks)!=len(chka)):\n\t\t\t\tcontinue\n\t\t\tfor k in chka:\n\t\t\t\tf=k.split(\" \")\n\t\t\t\tnum=chks[itr]\n\t\t\t\tif(len(f)>1):\n\t\t\t\t\th=0\n\t\t\t\t\tfor x in f:\n\t\t\t\t\t\tx=x.strip(' ')\n\t\t\t\t\t\tx=x.strip('\"')\n\t\t\t\t\t\tg+=[x]\n\t\t\t\t\t\tif(h<len(f)-1):\n\t\t\t\t\t\t\tchks.insert(itr,'1')\n\t\t\t\t\t\th+=1\n\t\t\t\telse:\n\t\t\t\t\tg+=f\n\t\t\t\titr+=1\n\t\t\tchka=g\n\t\t\tnow+=i\n\t\t\tnow+=\"####\"\n\t\t\tj=i.split(\" \")\n\t\t\titr=0\n\t\t\tfor word in j:\n\t\t\t\tif itr<len(chka) and word==chka[itr] :\n\t\t\t\t\tif chks[itr]=='1':\n\t\t\t\t\t\ts=word+\"=T-POS\"\n\t\t\t\t\telif chks[itr]=='0':\n\t\t\t\t\t\ts=word+\"=T-NEU\"\n\t\t\t\t\telse:\n\t\t\t\t\t\ts=word+\"=T-NEG\"\n\t\t\t\t\titr+=1\n\t\t\t\telse:\n\t\t\t\t\ts=word+\"=O\"\n\t\t\t\tnow+=s+\" \"\n\t\t\tnow+=\"\\n\"\n\t\t\tit+=1\n\treturn now\n\n\ntrain=get(trainl)\nval=get(vall)\n\ntext_file = open(\"/home/debajit15/train.txt\", \"w\")\nn = text_file.write(train)\ntext_file.close()\ntext_file = open(\"/home/debajit15/dev.txt\", \"w\")\nn = text_file.write(val)\ntext_file.close()\n\n\n# #print(df[['review_body']])\n",
"import pandas as pd\nimport numpy as np\nimport csv\nfrom nltk.tokenize import sent_tokenize\ncsv_file = open('/home/debajit15/train+dev.csv')\npd.set_option('display.max_colwidth', None)\ndf = pd.read_csv(csv_file, sep=',')\ndf = df[pd.notnull(df['Aspects'])]\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\ntrainl, vall = train_validate_test_split(df)\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\ntrain = get(trainl)\nval = get(vall)\ntext_file = open('/home/debajit15/train.txt', 'w')\nn = text_file.write(train)\ntext_file.close()\ntext_file = open('/home/debajit15/dev.txt', 'w')\nn = text_file.write(val)\ntext_file.close()\n",
"<import token>\ncsv_file = open('/home/debajit15/train+dev.csv')\npd.set_option('display.max_colwidth', None)\ndf = pd.read_csv(csv_file, sep=',')\ndf = df[pd.notnull(df['Aspects'])]\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\ntrainl, vall = train_validate_test_split(df)\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\ntrain = get(trainl)\nval = get(vall)\ntext_file = open('/home/debajit15/train.txt', 'w')\nn = text_file.write(train)\ntext_file.close()\ntext_file = open('/home/debajit15/dev.txt', 'w')\nn = text_file.write(val)\ntext_file.close()\n",
"<import token>\n<assignment token>\npd.set_option('display.max_colwidth', None)\n<assignment token>\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\n<assignment token>\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\n<assignment token>\ntext_file.close()\n<assignment token>\ntext_file.close()\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\n<assignment token>\n\n\ndef get(df):\n col = df[['review_body']]\n print(col.head())\n aspect = df[['Aspects']]\n opinions = df[['Sentiments']]\n print(df.shape[0])\n now = ''\n for o in range(0, df.shape[0]):\n d = col.iloc[o:o + 1]\n sd = d.to_string(index=False, header=None)\n sd = sd[1:]\n l = sent_tokenize(sd)\n a = aspect.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n asp = sa.split(';')\n a = opinions.iloc[o:o + 1]\n sa = a.to_string(index=False, header=None)\n senti = sa.split(';')\n if len(asp) != len(senti) or len(l) != len(asp) or len(l) != len(senti\n ):\n continue\n it = 0\n for i in l:\n chks = [x.strip() for x in senti[it].split(',')]\n chka = [x.strip() for x in asp[it].split(',')]\n g = []\n itr = 0\n if len(chks) != len(chka):\n continue\n for k in chka:\n f = k.split(' ')\n num = chks[itr]\n if len(f) > 1:\n h = 0\n for x in f:\n x = x.strip(' ')\n x = x.strip('\"')\n g += [x]\n if h < len(f) - 1:\n chks.insert(itr, '1')\n h += 1\n else:\n g += f\n itr += 1\n chka = g\n now += i\n now += '####'\n j = i.split(' ')\n itr = 0\n for word in j:\n if itr < len(chka) and word == chka[itr]:\n if chks[itr] == '1':\n s = word + '=T-POS'\n elif chks[itr] == '0':\n s = word + '=T-NEU'\n else:\n s = word + '=T-NEG'\n itr += 1\n else:\n s = word + '=O'\n now += s + ' '\n now += '\\n'\n it += 1\n return now\n\n\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n\n\ndef train_validate_test_split(df, train_percent=0.8, validate_percent=0.2,\n seed=None):\n np.random.seed(seed)\n perm = np.random.permutation(df.index)\n m = len(df.index)\n train_end = int(train_percent * m)\n train = df.iloc[:train_end]\n validate = df.iloc[train_end:]\n return train, validate\n\n\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<function token>\n<assignment token>\n<function token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
930 |
74b0ccb5193380ce596313d1ac3f898ff1fdd2f3
|
from .mail_utils import send_mail
from .request_utils import get_host_url
|
[
"from .mail_utils import send_mail\nfrom .request_utils import get_host_url\n",
"<import token>\n"
] | false |
931 |
4e1f7fddb6bd3413dd6a8ca21520d309af75c811
|
import sys
import os
sys.path.insert(0, "main")
import main
workspace = os.path.abspath(sys.argv[1])
main.hammer(workspace)
|
[
"import sys\nimport os\nsys.path.insert(0, \"main\")\nimport main\nworkspace = os.path.abspath(sys.argv[1])\nmain.hammer(workspace)\n",
"import sys\nimport os\nsys.path.insert(0, 'main')\nimport main\nworkspace = os.path.abspath(sys.argv[1])\nmain.hammer(workspace)\n",
"<import token>\nsys.path.insert(0, 'main')\n<import token>\nworkspace = os.path.abspath(sys.argv[1])\nmain.hammer(workspace)\n",
"<import token>\nsys.path.insert(0, 'main')\n<import token>\n<assignment token>\nmain.hammer(workspace)\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
932 |
db1e3a109af2db2c8794a7c9c7dfb0c2ccee5800
|
#!/usr/bin/python3
"""0. How many subs"""
def number_of_subscribers(subreddit):
"""return the number of subscribers from an Reddit API"""
import requests
resInf = requests.get("https://www.reddit.com/r/{}/about.json"
.format(subreddit),
headers={"User-Agent": "My-User-Agent"},
allow_redirects=False)
if resInf.status_code >= 300:
return 0
return resInf.json().get("data").get("subscribers")
|
[
"#!/usr/bin/python3\n\"\"\"0. How many subs\"\"\"\n\ndef number_of_subscribers(subreddit):\n \"\"\"return the number of subscribers from an Reddit API\"\"\"\n\n import requests\n\n resInf = requests.get(\"https://www.reddit.com/r/{}/about.json\"\n .format(subreddit),\n headers={\"User-Agent\": \"My-User-Agent\"},\n allow_redirects=False)\n if resInf.status_code >= 300:\n return 0\n\n return resInf.json().get(\"data\").get(\"subscribers\")\n",
"<docstring token>\n\n\ndef number_of_subscribers(subreddit):\n \"\"\"return the number of subscribers from an Reddit API\"\"\"\n import requests\n resInf = requests.get('https://www.reddit.com/r/{}/about.json'.format(\n subreddit), headers={'User-Agent': 'My-User-Agent'},\n allow_redirects=False)\n if resInf.status_code >= 300:\n return 0\n return resInf.json().get('data').get('subscribers')\n",
"<docstring token>\n<function token>\n"
] | false |
933 |
d20e41dd7054ff133be264bebf13e4e218710ae5
|
from django.shortcuts import resolve_url as r
from django.test import TestCase
class coreGetHome(TestCase):
def setUp(self):
self.resp = self.client.get(r('core:core_home'))
def test_template_home(self):
self.assertTemplateUsed(self.resp, 'index.html')
def test_200_template_home(self):
self.assertEqual(200, self.resp.status_code)
|
[
"from django.shortcuts import resolve_url as r\nfrom django.test import TestCase\n\n\nclass coreGetHome(TestCase):\n def setUp(self):\n self.resp = self.client.get(r('core:core_home'))\n\n def test_template_home(self):\n self.assertTemplateUsed(self.resp, 'index.html')\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"from django.shortcuts import resolve_url as r\nfrom django.test import TestCase\n\n\nclass coreGetHome(TestCase):\n\n def setUp(self):\n self.resp = self.client.get(r('core:core_home'))\n\n def test_template_home(self):\n self.assertTemplateUsed(self.resp, 'index.html')\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"<import token>\n\n\nclass coreGetHome(TestCase):\n\n def setUp(self):\n self.resp = self.client.get(r('core:core_home'))\n\n def test_template_home(self):\n self.assertTemplateUsed(self.resp, 'index.html')\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"<import token>\n\n\nclass coreGetHome(TestCase):\n\n def setUp(self):\n self.resp = self.client.get(r('core:core_home'))\n <function token>\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"<import token>\n\n\nclass coreGetHome(TestCase):\n <function token>\n <function token>\n\n def test_200_template_home(self):\n self.assertEqual(200, self.resp.status_code)\n",
"<import token>\n\n\nclass coreGetHome(TestCase):\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
934 |
ff7a865822a4f8b343ab4cb490c24d6d530b14e1
|
#!/usr/bin/env python
kube_description= \
"""
Compute Server
"""
kube_instruction= \
"""
Not instructions yet
"""
#
# Standard geni-lib/portal libraries
#
import geni.portal as portal
import geni.rspec.pg as PG
import geni.rspec.emulab as elab
import geni.rspec.igext as IG
import geni.urn as URN
#
# PhantomNet extensions.
#
import geni.rspec.emulab.pnext as PN
#
# This geni-lib script is designed to run in the PhantomNet Portal.
#
pc = portal.Context()
params = pc.bindParameters()
#
# Give the library a chance to return nice JSON-formatted exception(s) and/or
# warnings; this might sys.exit().
#
pc.verifyParameters()
rspec = PG.Request()
compute = rspec.RawPC("compute")
compute.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD'
compute.hardware_type = 'd430'
compute.routable_control_ip = True
tour = IG.Tour()
tour.Description(IG.Tour.TEXT,kube_description)
tour.Instructions(IG.Tour.MARKDOWN,kube_instruction)
rspec.addTour(tour)
#
# Print and go!
#
pc.printRequestRSpec(rspec)
|
[
"#!/usr/bin/env python\n\nkube_description= \\\n\"\"\"\nCompute Server\n\"\"\"\nkube_instruction= \\\n\"\"\"\nNot instructions yet\n\"\"\"\n\n#\n# Standard geni-lib/portal libraries\n#\nimport geni.portal as portal\nimport geni.rspec.pg as PG\nimport geni.rspec.emulab as elab\nimport geni.rspec.igext as IG\nimport geni.urn as URN\n\n\n\n#\n# PhantomNet extensions.\n#\nimport geni.rspec.emulab.pnext as PN \n\n#\n# This geni-lib script is designed to run in the PhantomNet Portal.\n#\npc = portal.Context()\n\n\nparams = pc.bindParameters()\n\n#\n# Give the library a chance to return nice JSON-formatted exception(s) and/or\n# warnings; this might sys.exit().\n#\npc.verifyParameters()\n\nrspec = PG.Request()\ncompute = rspec.RawPC(\"compute\")\ncompute.disk_image = 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD'\ncompute.hardware_type = 'd430'\ncompute.routable_control_ip = True\n\ntour = IG.Tour()\ntour.Description(IG.Tour.TEXT,kube_description)\ntour.Instructions(IG.Tour.MARKDOWN,kube_instruction)\nrspec.addTour(tour)\n\n#\n# Print and go!\n#\npc.printRequestRSpec(rspec)\n",
"kube_description = \"\"\"\nCompute Server\n\"\"\"\nkube_instruction = \"\"\"\nNot instructions yet\n\"\"\"\nimport geni.portal as portal\nimport geni.rspec.pg as PG\nimport geni.rspec.emulab as elab\nimport geni.rspec.igext as IG\nimport geni.urn as URN\nimport geni.rspec.emulab.pnext as PN\npc = portal.Context()\nparams = pc.bindParameters()\npc.verifyParameters()\nrspec = PG.Request()\ncompute = rspec.RawPC('compute')\ncompute.disk_image = (\n 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD')\ncompute.hardware_type = 'd430'\ncompute.routable_control_ip = True\ntour = IG.Tour()\ntour.Description(IG.Tour.TEXT, kube_description)\ntour.Instructions(IG.Tour.MARKDOWN, kube_instruction)\nrspec.addTour(tour)\npc.printRequestRSpec(rspec)\n",
"kube_description = \"\"\"\nCompute Server\n\"\"\"\nkube_instruction = \"\"\"\nNot instructions yet\n\"\"\"\n<import token>\npc = portal.Context()\nparams = pc.bindParameters()\npc.verifyParameters()\nrspec = PG.Request()\ncompute = rspec.RawPC('compute')\ncompute.disk_image = (\n 'urn:publicid:IDN+emulab.net+image+emulab-ops:UBUNTU18-64-STD')\ncompute.hardware_type = 'd430'\ncompute.routable_control_ip = True\ntour = IG.Tour()\ntour.Description(IG.Tour.TEXT, kube_description)\ntour.Instructions(IG.Tour.MARKDOWN, kube_instruction)\nrspec.addTour(tour)\npc.printRequestRSpec(rspec)\n",
"<assignment token>\n<import token>\n<assignment token>\npc.verifyParameters()\n<assignment token>\ntour.Description(IG.Tour.TEXT, kube_description)\ntour.Instructions(IG.Tour.MARKDOWN, kube_instruction)\nrspec.addTour(tour)\npc.printRequestRSpec(rspec)\n",
"<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
935 |
a93884757069393b4d96de5ec9c7d815d58a2ea5
|
# coding: utf-8
import logging
import uuid
import json
import xmltodict
import bottle
from bottle import HTTPError
from bottle.ext import sqlalchemy
from database import Base, engine
from database import JdWaybillSendResp, JdWaybillApplyResp
jd = bottle.Bottle(catchall=False)
plugin = sqlalchemy.Plugin(
engine, # SQLAlchemy engine created with create_engine function.
Base.metadata, # SQLAlchemy metadata, required only if create=True.
keyword='db', # Keyword used to inject session database in a route (default 'db').
create=True, # If it is true, execute `metadata.create_all(engine)` when plugin is applied (default False).
commit=True, # If it is true, plugin commit changes after route is executed (default True).
use_kwargs=False
# If it is true and keyword is not defined,
# plugin uses **kwargs argument to inject session database (default False).
)
jd.install(plugin)
@jd.get('/routerjson')
def apply_jd_waybill(db):
query = bottle.request.query
if query['method'] == 'jingdong.etms.waybillcode.get':
jd_code, resp = jd_get_response_normal()
logging.debug('JD response: {} {}'.format(jd_code, resp))
db.add(JdWaybillApplyResp(jd_code, resp))
else: # '''jingdong.etms.waybillcode.send'''
jd_param = json.loads(query['360buy_param_json'])
delivery_id = jd_param['deliveryId']
order_id = jd_param['orderId']
resp = jd_send_response_normal(delivery_id, order_id)
db.add(JdWaybillSendResp(delivery_id, order_id, resp))
logging.debug('JD response: {}'.format(resp))
return resp
@jd.get('/jd_waybill')
def jd_waybill(db):
query = bottle.request.query
jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get('wms_order_code')).first()
if jd_rsp:
# return entities
return jd_rsp.body
return HTTPError(404, None)
def jd_get_response_normal():
code = str(uuid.uuid4()).split('-')[-1]
return code, json.dumps({
'jingdong_etms_waybillcode_get_responce':
{'resultInfo':
{'message': u'成功',
'code': 100,
'deliveryIdList': [code]
},
'code': u'0'
}
})
def jd_send_response_normal(deliver_id, order_id):
return json.dumps({
"jingdong_etms_waybill_send_responce": {
"resultInfo": {
"message": u"成功",
"deliveryId": deliver_id,
"code": 100,
"orderId": order_id
}
}
})
|
[
"# coding: utf-8\nimport logging\nimport uuid\nimport json\nimport xmltodict\nimport bottle\nfrom bottle import HTTPError\nfrom bottle.ext import sqlalchemy\nfrom database import Base, engine\nfrom database import JdWaybillSendResp, JdWaybillApplyResp\n\njd = bottle.Bottle(catchall=False)\n\nplugin = sqlalchemy.Plugin(\n engine, # SQLAlchemy engine created with create_engine function.\n Base.metadata, # SQLAlchemy metadata, required only if create=True.\n keyword='db', # Keyword used to inject session database in a route (default 'db').\n create=True, # If it is true, execute `metadata.create_all(engine)` when plugin is applied (default False).\n commit=True, # If it is true, plugin commit changes after route is executed (default True).\n use_kwargs=False\n # If it is true and keyword is not defined,\n # plugin uses **kwargs argument to inject session database (default False).\n)\n\njd.install(plugin)\n\n\[email protected]('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else: # '''jingdong.etms.waybillcode.send'''\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n\n return resp\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get('wms_order_code')).first()\n if jd_rsp:\n # return entities\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({\n 'jingdong_etms_waybillcode_get_responce':\n {'resultInfo':\n {'message': u'成功',\n 'code': 100,\n 'deliveryIdList': [code]\n },\n 'code': u'0'\n }\n })\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({\n \"jingdong_etms_waybill_send_responce\": {\n \"resultInfo\": {\n \"message\": u\"成功\",\n \"deliveryId\": deliver_id,\n \"code\": 100,\n \"orderId\": order_id\n }\n }\n })\n",
"import logging\nimport uuid\nimport json\nimport xmltodict\nimport bottle\nfrom bottle import HTTPError\nfrom bottle.ext import sqlalchemy\nfrom database import Base, engine\nfrom database import JdWaybillSendResp, JdWaybillApplyResp\njd = bottle.Bottle(catchall=False)\nplugin = sqlalchemy.Plugin(engine, Base.metadata, keyword='db', create=True,\n commit=True, use_kwargs=False)\njd.install(plugin)\n\n\[email protected]('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"<import token>\njd = bottle.Bottle(catchall=False)\nplugin = sqlalchemy.Plugin(engine, Base.metadata, keyword='db', create=True,\n commit=True, use_kwargs=False)\njd.install(plugin)\n\n\[email protected]('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"<import token>\n<assignment token>\njd.install(plugin)\n\n\[email protected]('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"<import token>\n<assignment token>\n<code token>\n\n\[email protected]('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\ndef jd_send_response_normal(deliver_id, order_id):\n return json.dumps({'jingdong_etms_waybill_send_responce': {'resultInfo':\n {'message': u'成功', 'deliveryId': deliver_id, 'code': 100, 'orderId':\n order_id}}})\n",
"<import token>\n<assignment token>\n<code token>\n\n\[email protected]('/routerjson')\ndef apply_jd_waybill(db):\n query = bottle.request.query\n if query['method'] == 'jingdong.etms.waybillcode.get':\n jd_code, resp = jd_get_response_normal()\n logging.debug('JD response: {} {}'.format(jd_code, resp))\n db.add(JdWaybillApplyResp(jd_code, resp))\n else:\n jd_param = json.loads(query['360buy_param_json'])\n delivery_id = jd_param['deliveryId']\n order_id = jd_param['orderId']\n resp = jd_send_response_normal(delivery_id, order_id)\n db.add(JdWaybillSendResp(delivery_id, order_id, resp))\n logging.debug('JD response: {}'.format(resp))\n return resp\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\ndef jd_get_response_normal():\n code = str(uuid.uuid4()).split('-')[-1]\n return code, json.dumps({'jingdong_etms_waybillcode_get_responce': {\n 'resultInfo': {'message': u'成功', 'code': 100, 'deliveryIdList': [\n code]}, 'code': u'0'}})\n\n\n<function token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n\n\[email protected]('/jd_waybill')\ndef jd_waybill(db):\n query = bottle.request.query\n jd_rsp = db.query(JdWaybillSendResp).filter_by(wms_order_code=query.get\n ('wms_order_code')).first()\n if jd_rsp:\n return jd_rsp.body\n return HTTPError(404, None)\n\n\n<function token>\n<function token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
936 |
0cc1aaa182fcf002ff2ae6cbcd6cbb84a08a3bc1
|
# Basic script which send some request via rest api to the test-management-tool.
# Be sure you setup host and api_token variable
import http.client
host = "localhost:8000"
api_token = "fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3"
# Connection
conn = http.client.HTTPConnection(host)
# Create a header of http request
headers = {
'authorization': "Bearer " + api_token,
'content-type': "application/json",
'cache-control': "no-cache",
'postman-token': "44709a5c-ca4a-bbce-4b24-f0632a29bde4"
}
################################################
payload = "{\n \"Name\": \"Create and edit project\"\n}"
conn.request("POST", "/api/v1/testsuites", payload, headers)
###
res = conn.getresponse()
data = res.read()
payload = "{\n \"Name\": \"Create and edit requirement\"\n}"
conn.request("POST", "/api/v1/testsuites", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Not selected project\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project without name\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Check if overview contains project\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 1,\n \"Name\": \"Edit project\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
################################################
###
payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Create project\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement without name\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Overview contains requirement\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Edit requirement\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 2,\n \"Name\": \"Cover requirement\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
################################################
payload = "{\n \"Name\": \"Create and edit TestSuites and TestCase\"\n}"
conn.request("POST", "/api/v1/testsuites", payload, headers)
###
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite without name\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains suite\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test suite\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without details\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case with details\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without name\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains case\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test case\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
################################################
payload = "{\n \"Name\": \"Create test set and run\"\n}"
conn.request("POST", "/api/v1/testsuites", payload, headers)
###
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create project\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains set\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without name\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without tests\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Edit test set\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Create test run\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains run\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 4,\n \"Name\": \"Execute contains tests\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
################################################
payload = "{\n \"Name\": \"Registration and log test\"\n}"
conn.request("POST", "/api/v1/testsuites", payload, headers)
###
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Redirect to login page\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Registration\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Registrate same user\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
res = conn.getresponse()
data = res.read()
payload = "{\n \"TestSuite_id\": 5,\n \"Name\": \"Log and logout\"\n}"
conn.request("POST", "/api/v1/testcases", payload, headers)
|
[
"# Basic script which send some request via rest api to the test-management-tool.\n# Be sure you setup host and api_token variable\n\nimport http.client\n\nhost = \"localhost:8000\"\napi_token = \"fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3\"\n\n# Connection\nconn = http.client.HTTPConnection(host)\n\n# Create a header of http request\nheaders = {\n 'authorization': \"Bearer \" + api_token,\n 'content-type': \"application/json\",\n 'cache-control': \"no-cache\",\n 'postman-token': \"44709a5c-ca4a-bbce-4b24-f0632a29bde4\"\n }\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Create and edit project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"Name\\\": \\\"Create and edit requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Not selected project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Create project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Create project without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Check if overview contains project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 1,\\n \\\"Name\\\": \\\"Edit project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n################################################\n\n###\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Create project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Create requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Create requirement without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Overview contains requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Edit requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 2,\\n \\\"Name\\\": \\\"Cover requirement\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Create and edit TestSuites and TestCase\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test suite\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test suite without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Check if overview contains suite\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Edit test suite\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test case without details\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test case with details\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Create test case without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Check if overview contains case\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 3,\\n \\\"Name\\\": \\\"Edit test case\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Create test set and run\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create project\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create set\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Overview contains set\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create set without name\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create set without tests\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Edit test set\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Create test run\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Overview contains run\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 4,\\n \\\"Name\\\": \\\"Execute contains tests\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\n\n################################################\npayload = \"{\\n \\\"Name\\\": \\\"Registration and log test\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testsuites\", payload, headers)\n###\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Redirect to login page\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Registration\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Registrate same user\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n\nres = conn.getresponse()\ndata = res.read()\n\npayload = \"{\\n \\\"TestSuite_id\\\": 5,\\n \\\"Name\\\": \\\"Log and logout\\\"\\n}\"\nconn.request(\"POST\", \"/api/v1/testcases\", payload, headers)\n",
"import http.client\nhost = 'localhost:8000'\napi_token = 'fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3'\nconn = http.client.HTTPConnection(host)\nheaders = {'authorization': 'Bearer ' + api_token, 'content-type':\n 'application/json', 'cache-control': 'no-cache', 'postman-token':\n '44709a5c-ca4a-bbce-4b24-f0632a29bde4'}\npayload = \"\"\"{\n \"Name\": \"Create and edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Not selected project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Check if overview contains project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Overview contains requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Cover requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit TestSuites and TestCase\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case with details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create test set and run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Edit test set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create test run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Execute contains tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Registration and log test\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Redirect to login page\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registration\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registrate same user\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Log and logout\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\n",
"<import token>\nhost = 'localhost:8000'\napi_token = 'fuukp8LhdxxwoVdtJu5K8LQtpTods8ddLMq66wSUFXGsqJKpmJAa1YyqkHN3'\nconn = http.client.HTTPConnection(host)\nheaders = {'authorization': 'Bearer ' + api_token, 'content-type':\n 'application/json', 'cache-control': 'no-cache', 'postman-token':\n '44709a5c-ca4a-bbce-4b24-f0632a29bde4'}\npayload = \"\"\"{\n \"Name\": \"Create and edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Not selected project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Create project without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Check if overview contains project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 1,\n \"Name\": \"Edit project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Create requirement without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Overview contains requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Edit requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 2,\n \"Name\": \"Cover requirement\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create and edit TestSuites and TestCase\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test suite without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test suite\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case with details\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Create test case without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Check if overview contains case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 3,\n \"Name\": \"Edit test case\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Create test set and run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create project\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without name\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create set without tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Edit test set\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Create test run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Overview contains run\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 4,\n \"Name\": \"Execute contains tests\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"Name\": \"Registration and log test\"\n}\"\"\"\nconn.request('POST', '/api/v1/testsuites', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Redirect to login page\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registration\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Registrate same user\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\nres = conn.getresponse()\ndata = res.read()\npayload = \"\"\"{\n \"TestSuite_id\": 5,\n \"Name\": \"Log and logout\"\n}\"\"\"\nconn.request('POST', '/api/v1/testcases', payload, headers)\n",
"<import token>\n<assignment token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testsuites', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n<assignment token>\nconn.request('POST', '/api/v1/testcases', payload, headers)\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
937 |
5bd8cee2595215fda6ab523a646cf918e3d84a50
|
from django.urls import path,include
from.import views
from user.views import DetailsChangeView, HomeView, PasswordChangeView,SignUpView,LoginView,SettingsView,LogoutView,CreatePostView,CommentPostView,PasswordChangeView
urlpatterns = [
path('', HomeView.as_view(), name = 'HomeView'),
path('LoginView/', LoginView.as_view(), name = 'LoginView'),
path('SignUpView/',SignUpView.as_view(), name = 'SignUpView' ),
path('SettingsView/', SettingsView.as_view(), name = 'SettingsView'),
path('LogoutView/', LogoutView.as_view(), name = 'LogoutView'),
path('social_auth/', include('social_django.urls', namespace = 'social')),
path('users_list/', views.users_list, name = 'users_list'),
path('CreatePostView/', CreatePostView.as_view(), name = 'CreatePostView'),
path('like/<int:id>/', views.like , name = 'like'),
path('CommentPostView/<int:id>/', CommentPostView.as_view(), name = 'CommentPostView'),
path('follow/<int:id>/', views.follow , name = 'follow'),
path('followback/<int:id>/', views.followback, name = 'followback'),
path('delete_request/<int:id>/',views.delete_request, name = 'delete_request'),
path('unfriend/<int:id>/', views.unfriend, name = 'unfriend'),
path('friendslist/<int:id>/',views.friendslist, name = 'friendslist'),
# path('FollowListView/<int:id>/',FollowListView.as_view(), name = 'FollowListView')
path('PasswordChangeView/', PasswordChangeView.as_view(), name = 'PasswordChangeView'),
path('DetailsChangeView/', DetailsChangeView.as_view(), name= 'DetailsChangeView'),
path('user_profile_view/<int:id>/',views.user_profile_view, name = 'user_profile_view'),
path('start_chat/<int:id>/', views.start_chat, name= 'start_chat'),
path('search_function/', views.search_function, name='search_function')
]
|
[
"from django.urls import path,include\nfrom.import views\nfrom user.views import DetailsChangeView, HomeView, PasswordChangeView,SignUpView,LoginView,SettingsView,LogoutView,CreatePostView,CommentPostView,PasswordChangeView\n\nurlpatterns = [\n path('', HomeView.as_view(), name = 'HomeView'),\n path('LoginView/', LoginView.as_view(), name = 'LoginView'),\n path('SignUpView/',SignUpView.as_view(), name = 'SignUpView' ),\n path('SettingsView/', SettingsView.as_view(), name = 'SettingsView'),\n path('LogoutView/', LogoutView.as_view(), name = 'LogoutView'),\n path('social_auth/', include('social_django.urls', namespace = 'social')),\n path('users_list/', views.users_list, name = 'users_list'),\n path('CreatePostView/', CreatePostView.as_view(), name = 'CreatePostView'),\n path('like/<int:id>/', views.like , name = 'like'),\n path('CommentPostView/<int:id>/', CommentPostView.as_view(), name = 'CommentPostView'),\n path('follow/<int:id>/', views.follow , name = 'follow'),\n path('followback/<int:id>/', views.followback, name = 'followback'),\n path('delete_request/<int:id>/',views.delete_request, name = 'delete_request'),\n path('unfriend/<int:id>/', views.unfriend, name = 'unfriend'),\n path('friendslist/<int:id>/',views.friendslist, name = 'friendslist'),\n # path('FollowListView/<int:id>/',FollowListView.as_view(), name = 'FollowListView')\n path('PasswordChangeView/', PasswordChangeView.as_view(), name = 'PasswordChangeView'),\n path('DetailsChangeView/', DetailsChangeView.as_view(), name= 'DetailsChangeView'),\n path('user_profile_view/<int:id>/',views.user_profile_view, name = 'user_profile_view'),\n path('start_chat/<int:id>/', views.start_chat, name= 'start_chat'),\n path('search_function/', views.search_function, name='search_function')\n \n \n]",
"from django.urls import path, include\nfrom . import views\nfrom user.views import DetailsChangeView, HomeView, PasswordChangeView, SignUpView, LoginView, SettingsView, LogoutView, CreatePostView, CommentPostView, PasswordChangeView\nurlpatterns = [path('', HomeView.as_view(), name='HomeView'), path(\n 'LoginView/', LoginView.as_view(), name='LoginView'), path(\n 'SignUpView/', SignUpView.as_view(), name='SignUpView'), path(\n 'SettingsView/', SettingsView.as_view(), name='SettingsView'), path(\n 'LogoutView/', LogoutView.as_view(), name='LogoutView'), path(\n 'social_auth/', include('social_django.urls', namespace='social')),\n path('users_list/', views.users_list, name='users_list'), path(\n 'CreatePostView/', CreatePostView.as_view(), name='CreatePostView'),\n path('like/<int:id>/', views.like, name='like'), path(\n 'CommentPostView/<int:id>/', CommentPostView.as_view(), name=\n 'CommentPostView'), path('follow/<int:id>/', views.follow, name=\n 'follow'), path('followback/<int:id>/', views.followback, name=\n 'followback'), path('delete_request/<int:id>/', views.delete_request,\n name='delete_request'), path('unfriend/<int:id>/', views.unfriend, name\n ='unfriend'), path('friendslist/<int:id>/', views.friendslist, name=\n 'friendslist'), path('PasswordChangeView/', PasswordChangeView.as_view(\n ), name='PasswordChangeView'), path('DetailsChangeView/',\n DetailsChangeView.as_view(), name='DetailsChangeView'), path(\n 'user_profile_view/<int:id>/', views.user_profile_view, name=\n 'user_profile_view'), path('start_chat/<int:id>/', views.start_chat,\n name='start_chat'), path('search_function/', views.search_function,\n name='search_function')]\n",
"<import token>\nurlpatterns = [path('', HomeView.as_view(), name='HomeView'), path(\n 'LoginView/', LoginView.as_view(), name='LoginView'), path(\n 'SignUpView/', SignUpView.as_view(), name='SignUpView'), path(\n 'SettingsView/', SettingsView.as_view(), name='SettingsView'), path(\n 'LogoutView/', LogoutView.as_view(), name='LogoutView'), path(\n 'social_auth/', include('social_django.urls', namespace='social')),\n path('users_list/', views.users_list, name='users_list'), path(\n 'CreatePostView/', CreatePostView.as_view(), name='CreatePostView'),\n path('like/<int:id>/', views.like, name='like'), path(\n 'CommentPostView/<int:id>/', CommentPostView.as_view(), name=\n 'CommentPostView'), path('follow/<int:id>/', views.follow, name=\n 'follow'), path('followback/<int:id>/', views.followback, name=\n 'followback'), path('delete_request/<int:id>/', views.delete_request,\n name='delete_request'), path('unfriend/<int:id>/', views.unfriend, name\n ='unfriend'), path('friendslist/<int:id>/', views.friendslist, name=\n 'friendslist'), path('PasswordChangeView/', PasswordChangeView.as_view(\n ), name='PasswordChangeView'), path('DetailsChangeView/',\n DetailsChangeView.as_view(), name='DetailsChangeView'), path(\n 'user_profile_view/<int:id>/', views.user_profile_view, name=\n 'user_profile_view'), path('start_chat/<int:id>/', views.start_chat,\n name='start_chat'), path('search_function/', views.search_function,\n name='search_function')]\n",
"<import token>\n<assignment token>\n"
] | false |
938 |
18dce1ce683b15201dbb5436cbd4288a0df99c28
|
from const import BORN_KEY, PRESIDENT_KEY, CAPITAL_KEY, PRIME_KEY, MINISTER_KEY, POPULATION_KEY, \
GOVERNMENT_KEY,AREA_KEY, WHO_KEY, IS_KEY, THE_KEY, OF_KEY, WHAT_KEY, WHEN_KEY, WAS_KEY
from geq_queries import capital_of_country_query, area_of_country_query, government_of_country_query, \
population_of_country_query, \
president_of_country_query, prime_minister_of_country_query, prime_minister_born_date_query, \
president_born_date_query, who_query
def get_last_argument(words):
return ' '.join(words)[:-1]
def parse_who_is(words):
question_number = None
arg = None
if len(words) > 5 and (words[3] == PRIME_KEY or words[3] == PRESIDENT_KEY):
# can be i, ii
if words[3] == PRESIDENT_KEY and words[4] == OF_KEY:
question_number = 1
arg = get_last_argument(words[5:])
elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5] == OF_KEY:
question_number = 2
arg = get_last_argument(words[6:])
elif len(words) > 2:
question_number = 9
arg = get_last_argument(words[2:])
return question_number, arg
def parse_what_is_the(words):
question_number = None
arg = None
# can be iii, iv, v ,vi
if words[3] == POPULATION_KEY:
# iii
question_number = 3
arg = get_last_argument(words[5:])
elif words[3] == AREA_KEY:
# iv
question_number = 4
arg = get_last_argument(words[5:])
elif words[3] == GOVERNMENT_KEY:
question_number = 5
arg = get_last_argument(words[5:])
# v
elif words[3] == CAPITAL_KEY:
# vi
question_number = 6
arg = get_last_argument(words[5:])
return question_number, arg
def parse_when_was_the(words):
question_number = None
arg = None
# can be vii, viii
if words[3] == PRESIDENT_KEY and words[4] == OF_KEY and words[len(words) - 1] == BORN_KEY:
question_number = 7
arg = get_last_argument(words[5:len(words) - 1])
# can be vii
elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5] == OF_KEY and words[len(words) - 1] == BORN_KEY:
question_number = 8
arg = get_last_argument(words[6:len(words) - 1])
return question_number, arg
def parse_user_question(string):
question_number = None
arg = None
words = string.lower().split(" ")
if len(words) == 0 or len(words) < 3 or words[len(words) - 1][-1] != '?':
return question_number, arg
if words[0] == WHO_KEY and words[1] == IS_KEY:
# can be only i, ii, ix
question_number, arg = parse_who_is(words)
elif len(words) > 5 and words[0] == WHAT_KEY and words[1] == IS_KEY and words[2] == THE_KEY and words[4] == OF_KEY:
question_number, arg = parse_what_is_the(words)
elif len(words) > 6 and words[0] == WHEN_KEY and words[1] == WAS_KEY and words[2] == THE_KEY:
question_number, arg = parse_when_was_the(words)
return question_number, arg
def do_request(question, arg):
ans = None
if question == 1:
print(president_of_country_query(arg))
elif question == 2:
print(prime_minister_of_country_query(arg))
elif question == 3:
print(population_of_country_query(arg))
elif question == 4:
print(area_of_country_query(arg))
elif question == 5:
print(government_of_country_query(arg))
elif question == 6:
print(capital_of_country_query(arg))
elif question == 7:
print(president_born_date_query(arg))
elif question == 8:
print(prime_minister_born_date_query(arg))
elif question == 9:
print(who_query(arg))
else:
print("ERROR")
def start_console(question):
question, arg = parse_user_question(question)
if question is None or arg is None:
print('Invalid question, please enter new question.')
else:
do_request(question, arg)
|
[
"from const import BORN_KEY, PRESIDENT_KEY, CAPITAL_KEY, PRIME_KEY, MINISTER_KEY, POPULATION_KEY, \\\n GOVERNMENT_KEY,AREA_KEY, WHO_KEY, IS_KEY, THE_KEY, OF_KEY, WHAT_KEY, WHEN_KEY, WAS_KEY\n\nfrom geq_queries import capital_of_country_query, area_of_country_query, government_of_country_query, \\\n population_of_country_query, \\\n president_of_country_query, prime_minister_of_country_query, prime_minister_born_date_query, \\\n president_born_date_query, who_query\n\n\ndef get_last_argument(words):\n return ' '.join(words)[:-1]\n\n\ndef parse_who_is(words):\n question_number = None\n arg = None\n if len(words) > 5 and (words[3] == PRIME_KEY or words[3] == PRESIDENT_KEY):\n # can be i, ii\n if words[3] == PRESIDENT_KEY and words[4] == OF_KEY:\n question_number = 1\n arg = get_last_argument(words[5:])\n elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5] == OF_KEY:\n question_number = 2\n arg = get_last_argument(words[6:])\n elif len(words) > 2:\n question_number = 9\n arg = get_last_argument(words[2:])\n return question_number, arg\n\n\ndef parse_what_is_the(words):\n question_number = None\n arg = None\n # can be iii, iv, v ,vi\n if words[3] == POPULATION_KEY:\n # iii\n question_number = 3\n arg = get_last_argument(words[5:])\n elif words[3] == AREA_KEY:\n # iv\n question_number = 4\n arg = get_last_argument(words[5:])\n elif words[3] == GOVERNMENT_KEY:\n question_number = 5\n arg = get_last_argument(words[5:])\n # v\n elif words[3] == CAPITAL_KEY:\n # vi\n question_number = 6\n arg = get_last_argument(words[5:])\n return question_number, arg\n\n\ndef parse_when_was_the(words):\n question_number = None\n arg = None\n # can be vii, viii\n if words[3] == PRESIDENT_KEY and words[4] == OF_KEY and words[len(words) - 1] == BORN_KEY:\n question_number = 7\n arg = get_last_argument(words[5:len(words) - 1])\n # can be vii\n elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5] == OF_KEY and words[len(words) - 1] == BORN_KEY:\n question_number = 8\n arg = get_last_argument(words[6:len(words) - 1])\n return question_number, arg\n\n\ndef parse_user_question(string):\n question_number = None\n arg = None\n words = string.lower().split(\" \")\n if len(words) == 0 or len(words) < 3 or words[len(words) - 1][-1] != '?':\n return question_number, arg\n if words[0] == WHO_KEY and words[1] == IS_KEY:\n # can be only i, ii, ix\n question_number, arg = parse_who_is(words)\n elif len(words) > 5 and words[0] == WHAT_KEY and words[1] == IS_KEY and words[2] == THE_KEY and words[4] == OF_KEY:\n question_number, arg = parse_what_is_the(words)\n elif len(words) > 6 and words[0] == WHEN_KEY and words[1] == WAS_KEY and words[2] == THE_KEY:\n question_number, arg = parse_when_was_the(words)\n return question_number, arg\n\n\ndef do_request(question, arg):\n ans = None\n if question == 1:\n print(president_of_country_query(arg))\n elif question == 2:\n print(prime_minister_of_country_query(arg))\n elif question == 3:\n print(population_of_country_query(arg))\n elif question == 4:\n print(area_of_country_query(arg))\n elif question == 5:\n print(government_of_country_query(arg))\n elif question == 6:\n print(capital_of_country_query(arg))\n elif question == 7:\n print(president_born_date_query(arg))\n elif question == 8:\n print(prime_minister_born_date_query(arg))\n elif question == 9:\n print(who_query(arg))\n else:\n print(\"ERROR\")\n\n\ndef start_console(question):\n question, arg = parse_user_question(question)\n if question is None or arg is None:\n print('Invalid question, please enter new question.')\n else:\n do_request(question, arg)\n\n",
"from const import BORN_KEY, PRESIDENT_KEY, CAPITAL_KEY, PRIME_KEY, MINISTER_KEY, POPULATION_KEY, GOVERNMENT_KEY, AREA_KEY, WHO_KEY, IS_KEY, THE_KEY, OF_KEY, WHAT_KEY, WHEN_KEY, WAS_KEY\nfrom geq_queries import capital_of_country_query, area_of_country_query, government_of_country_query, population_of_country_query, president_of_country_query, prime_minister_of_country_query, prime_minister_born_date_query, president_born_date_query, who_query\n\n\ndef get_last_argument(words):\n return ' '.join(words)[:-1]\n\n\ndef parse_who_is(words):\n question_number = None\n arg = None\n if len(words) > 5 and (words[3] == PRIME_KEY or words[3] == PRESIDENT_KEY):\n if words[3] == PRESIDENT_KEY and words[4] == OF_KEY:\n question_number = 1\n arg = get_last_argument(words[5:])\n elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5\n ] == OF_KEY:\n question_number = 2\n arg = get_last_argument(words[6:])\n elif len(words) > 2:\n question_number = 9\n arg = get_last_argument(words[2:])\n return question_number, arg\n\n\ndef parse_what_is_the(words):\n question_number = None\n arg = None\n if words[3] == POPULATION_KEY:\n question_number = 3\n arg = get_last_argument(words[5:])\n elif words[3] == AREA_KEY:\n question_number = 4\n arg = get_last_argument(words[5:])\n elif words[3] == GOVERNMENT_KEY:\n question_number = 5\n arg = get_last_argument(words[5:])\n elif words[3] == CAPITAL_KEY:\n question_number = 6\n arg = get_last_argument(words[5:])\n return question_number, arg\n\n\ndef parse_when_was_the(words):\n question_number = None\n arg = None\n if words[3] == PRESIDENT_KEY and words[4] == OF_KEY and words[len(words\n ) - 1] == BORN_KEY:\n question_number = 7\n arg = get_last_argument(words[5:len(words) - 1])\n elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5\n ] == OF_KEY and words[len(words) - 1] == BORN_KEY:\n question_number = 8\n arg = get_last_argument(words[6:len(words) - 1])\n return question_number, arg\n\n\ndef parse_user_question(string):\n question_number = None\n arg = None\n words = string.lower().split(' ')\n if len(words) == 0 or len(words) < 3 or words[len(words) - 1][-1] != '?':\n return question_number, arg\n if words[0] == WHO_KEY and words[1] == IS_KEY:\n question_number, arg = parse_who_is(words)\n elif len(words) > 5 and words[0] == WHAT_KEY and words[1\n ] == IS_KEY and words[2] == THE_KEY and words[4] == OF_KEY:\n question_number, arg = parse_what_is_the(words)\n elif len(words) > 6 and words[0] == WHEN_KEY and words[1\n ] == WAS_KEY and words[2] == THE_KEY:\n question_number, arg = parse_when_was_the(words)\n return question_number, arg\n\n\ndef do_request(question, arg):\n ans = None\n if question == 1:\n print(president_of_country_query(arg))\n elif question == 2:\n print(prime_minister_of_country_query(arg))\n elif question == 3:\n print(population_of_country_query(arg))\n elif question == 4:\n print(area_of_country_query(arg))\n elif question == 5:\n print(government_of_country_query(arg))\n elif question == 6:\n print(capital_of_country_query(arg))\n elif question == 7:\n print(president_born_date_query(arg))\n elif question == 8:\n print(prime_minister_born_date_query(arg))\n elif question == 9:\n print(who_query(arg))\n else:\n print('ERROR')\n\n\ndef start_console(question):\n question, arg = parse_user_question(question)\n if question is None or arg is None:\n print('Invalid question, please enter new question.')\n else:\n do_request(question, arg)\n",
"<import token>\n\n\ndef get_last_argument(words):\n return ' '.join(words)[:-1]\n\n\ndef parse_who_is(words):\n question_number = None\n arg = None\n if len(words) > 5 and (words[3] == PRIME_KEY or words[3] == PRESIDENT_KEY):\n if words[3] == PRESIDENT_KEY and words[4] == OF_KEY:\n question_number = 1\n arg = get_last_argument(words[5:])\n elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5\n ] == OF_KEY:\n question_number = 2\n arg = get_last_argument(words[6:])\n elif len(words) > 2:\n question_number = 9\n arg = get_last_argument(words[2:])\n return question_number, arg\n\n\ndef parse_what_is_the(words):\n question_number = None\n arg = None\n if words[3] == POPULATION_KEY:\n question_number = 3\n arg = get_last_argument(words[5:])\n elif words[3] == AREA_KEY:\n question_number = 4\n arg = get_last_argument(words[5:])\n elif words[3] == GOVERNMENT_KEY:\n question_number = 5\n arg = get_last_argument(words[5:])\n elif words[3] == CAPITAL_KEY:\n question_number = 6\n arg = get_last_argument(words[5:])\n return question_number, arg\n\n\ndef parse_when_was_the(words):\n question_number = None\n arg = None\n if words[3] == PRESIDENT_KEY and words[4] == OF_KEY and words[len(words\n ) - 1] == BORN_KEY:\n question_number = 7\n arg = get_last_argument(words[5:len(words) - 1])\n elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5\n ] == OF_KEY and words[len(words) - 1] == BORN_KEY:\n question_number = 8\n arg = get_last_argument(words[6:len(words) - 1])\n return question_number, arg\n\n\ndef parse_user_question(string):\n question_number = None\n arg = None\n words = string.lower().split(' ')\n if len(words) == 0 or len(words) < 3 or words[len(words) - 1][-1] != '?':\n return question_number, arg\n if words[0] == WHO_KEY and words[1] == IS_KEY:\n question_number, arg = parse_who_is(words)\n elif len(words) > 5 and words[0] == WHAT_KEY and words[1\n ] == IS_KEY and words[2] == THE_KEY and words[4] == OF_KEY:\n question_number, arg = parse_what_is_the(words)\n elif len(words) > 6 and words[0] == WHEN_KEY and words[1\n ] == WAS_KEY and words[2] == THE_KEY:\n question_number, arg = parse_when_was_the(words)\n return question_number, arg\n\n\ndef do_request(question, arg):\n ans = None\n if question == 1:\n print(president_of_country_query(arg))\n elif question == 2:\n print(prime_minister_of_country_query(arg))\n elif question == 3:\n print(population_of_country_query(arg))\n elif question == 4:\n print(area_of_country_query(arg))\n elif question == 5:\n print(government_of_country_query(arg))\n elif question == 6:\n print(capital_of_country_query(arg))\n elif question == 7:\n print(president_born_date_query(arg))\n elif question == 8:\n print(prime_minister_born_date_query(arg))\n elif question == 9:\n print(who_query(arg))\n else:\n print('ERROR')\n\n\ndef start_console(question):\n question, arg = parse_user_question(question)\n if question is None or arg is None:\n print('Invalid question, please enter new question.')\n else:\n do_request(question, arg)\n",
"<import token>\n\n\ndef get_last_argument(words):\n return ' '.join(words)[:-1]\n\n\ndef parse_who_is(words):\n question_number = None\n arg = None\n if len(words) > 5 and (words[3] == PRIME_KEY or words[3] == PRESIDENT_KEY):\n if words[3] == PRESIDENT_KEY and words[4] == OF_KEY:\n question_number = 1\n arg = get_last_argument(words[5:])\n elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5\n ] == OF_KEY:\n question_number = 2\n arg = get_last_argument(words[6:])\n elif len(words) > 2:\n question_number = 9\n arg = get_last_argument(words[2:])\n return question_number, arg\n\n\ndef parse_what_is_the(words):\n question_number = None\n arg = None\n if words[3] == POPULATION_KEY:\n question_number = 3\n arg = get_last_argument(words[5:])\n elif words[3] == AREA_KEY:\n question_number = 4\n arg = get_last_argument(words[5:])\n elif words[3] == GOVERNMENT_KEY:\n question_number = 5\n arg = get_last_argument(words[5:])\n elif words[3] == CAPITAL_KEY:\n question_number = 6\n arg = get_last_argument(words[5:])\n return question_number, arg\n\n\ndef parse_when_was_the(words):\n question_number = None\n arg = None\n if words[3] == PRESIDENT_KEY and words[4] == OF_KEY and words[len(words\n ) - 1] == BORN_KEY:\n question_number = 7\n arg = get_last_argument(words[5:len(words) - 1])\n elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5\n ] == OF_KEY and words[len(words) - 1] == BORN_KEY:\n question_number = 8\n arg = get_last_argument(words[6:len(words) - 1])\n return question_number, arg\n\n\ndef parse_user_question(string):\n question_number = None\n arg = None\n words = string.lower().split(' ')\n if len(words) == 0 or len(words) < 3 or words[len(words) - 1][-1] != '?':\n return question_number, arg\n if words[0] == WHO_KEY and words[1] == IS_KEY:\n question_number, arg = parse_who_is(words)\n elif len(words) > 5 and words[0] == WHAT_KEY and words[1\n ] == IS_KEY and words[2] == THE_KEY and words[4] == OF_KEY:\n question_number, arg = parse_what_is_the(words)\n elif len(words) > 6 and words[0] == WHEN_KEY and words[1\n ] == WAS_KEY and words[2] == THE_KEY:\n question_number, arg = parse_when_was_the(words)\n return question_number, arg\n\n\n<function token>\n\n\ndef start_console(question):\n question, arg = parse_user_question(question)\n if question is None or arg is None:\n print('Invalid question, please enter new question.')\n else:\n do_request(question, arg)\n",
"<import token>\n\n\ndef get_last_argument(words):\n return ' '.join(words)[:-1]\n\n\ndef parse_who_is(words):\n question_number = None\n arg = None\n if len(words) > 5 and (words[3] == PRIME_KEY or words[3] == PRESIDENT_KEY):\n if words[3] == PRESIDENT_KEY and words[4] == OF_KEY:\n question_number = 1\n arg = get_last_argument(words[5:])\n elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5\n ] == OF_KEY:\n question_number = 2\n arg = get_last_argument(words[6:])\n elif len(words) > 2:\n question_number = 9\n arg = get_last_argument(words[2:])\n return question_number, arg\n\n\ndef parse_what_is_the(words):\n question_number = None\n arg = None\n if words[3] == POPULATION_KEY:\n question_number = 3\n arg = get_last_argument(words[5:])\n elif words[3] == AREA_KEY:\n question_number = 4\n arg = get_last_argument(words[5:])\n elif words[3] == GOVERNMENT_KEY:\n question_number = 5\n arg = get_last_argument(words[5:])\n elif words[3] == CAPITAL_KEY:\n question_number = 6\n arg = get_last_argument(words[5:])\n return question_number, arg\n\n\n<function token>\n\n\ndef parse_user_question(string):\n question_number = None\n arg = None\n words = string.lower().split(' ')\n if len(words) == 0 or len(words) < 3 or words[len(words) - 1][-1] != '?':\n return question_number, arg\n if words[0] == WHO_KEY and words[1] == IS_KEY:\n question_number, arg = parse_who_is(words)\n elif len(words) > 5 and words[0] == WHAT_KEY and words[1\n ] == IS_KEY and words[2] == THE_KEY and words[4] == OF_KEY:\n question_number, arg = parse_what_is_the(words)\n elif len(words) > 6 and words[0] == WHEN_KEY and words[1\n ] == WAS_KEY and words[2] == THE_KEY:\n question_number, arg = parse_when_was_the(words)\n return question_number, arg\n\n\n<function token>\n\n\ndef start_console(question):\n question, arg = parse_user_question(question)\n if question is None or arg is None:\n print('Invalid question, please enter new question.')\n else:\n do_request(question, arg)\n",
"<import token>\n\n\ndef get_last_argument(words):\n return ' '.join(words)[:-1]\n\n\ndef parse_who_is(words):\n question_number = None\n arg = None\n if len(words) > 5 and (words[3] == PRIME_KEY or words[3] == PRESIDENT_KEY):\n if words[3] == PRESIDENT_KEY and words[4] == OF_KEY:\n question_number = 1\n arg = get_last_argument(words[5:])\n elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5\n ] == OF_KEY:\n question_number = 2\n arg = get_last_argument(words[6:])\n elif len(words) > 2:\n question_number = 9\n arg = get_last_argument(words[2:])\n return question_number, arg\n\n\ndef parse_what_is_the(words):\n question_number = None\n arg = None\n if words[3] == POPULATION_KEY:\n question_number = 3\n arg = get_last_argument(words[5:])\n elif words[3] == AREA_KEY:\n question_number = 4\n arg = get_last_argument(words[5:])\n elif words[3] == GOVERNMENT_KEY:\n question_number = 5\n arg = get_last_argument(words[5:])\n elif words[3] == CAPITAL_KEY:\n question_number = 6\n arg = get_last_argument(words[5:])\n return question_number, arg\n\n\n<function token>\n\n\ndef parse_user_question(string):\n question_number = None\n arg = None\n words = string.lower().split(' ')\n if len(words) == 0 or len(words) < 3 or words[len(words) - 1][-1] != '?':\n return question_number, arg\n if words[0] == WHO_KEY and words[1] == IS_KEY:\n question_number, arg = parse_who_is(words)\n elif len(words) > 5 and words[0] == WHAT_KEY and words[1\n ] == IS_KEY and words[2] == THE_KEY and words[4] == OF_KEY:\n question_number, arg = parse_what_is_the(words)\n elif len(words) > 6 and words[0] == WHEN_KEY and words[1\n ] == WAS_KEY and words[2] == THE_KEY:\n question_number, arg = parse_when_was_the(words)\n return question_number, arg\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\ndef get_last_argument(words):\n return ' '.join(words)[:-1]\n\n\ndef parse_who_is(words):\n question_number = None\n arg = None\n if len(words) > 5 and (words[3] == PRIME_KEY or words[3] == PRESIDENT_KEY):\n if words[3] == PRESIDENT_KEY and words[4] == OF_KEY:\n question_number = 1\n arg = get_last_argument(words[5:])\n elif words[3] == PRIME_KEY and words[4] == MINISTER_KEY and words[5\n ] == OF_KEY:\n question_number = 2\n arg = get_last_argument(words[6:])\n elif len(words) > 2:\n question_number = 9\n arg = get_last_argument(words[2:])\n return question_number, arg\n\n\ndef parse_what_is_the(words):\n question_number = None\n arg = None\n if words[3] == POPULATION_KEY:\n question_number = 3\n arg = get_last_argument(words[5:])\n elif words[3] == AREA_KEY:\n question_number = 4\n arg = get_last_argument(words[5:])\n elif words[3] == GOVERNMENT_KEY:\n question_number = 5\n arg = get_last_argument(words[5:])\n elif words[3] == CAPITAL_KEY:\n question_number = 6\n arg = get_last_argument(words[5:])\n return question_number, arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n\n\ndef get_last_argument(words):\n return ' '.join(words)[:-1]\n\n\n<function token>\n\n\ndef parse_what_is_the(words):\n question_number = None\n arg = None\n if words[3] == POPULATION_KEY:\n question_number = 3\n arg = get_last_argument(words[5:])\n elif words[3] == AREA_KEY:\n question_number = 4\n arg = get_last_argument(words[5:])\n elif words[3] == GOVERNMENT_KEY:\n question_number = 5\n arg = get_last_argument(words[5:])\n elif words[3] == CAPITAL_KEY:\n question_number = 6\n arg = get_last_argument(words[5:])\n return question_number, arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n\n\ndef parse_what_is_the(words):\n question_number = None\n arg = None\n if words[3] == POPULATION_KEY:\n question_number = 3\n arg = get_last_argument(words[5:])\n elif words[3] == AREA_KEY:\n question_number = 4\n arg = get_last_argument(words[5:])\n elif words[3] == GOVERNMENT_KEY:\n question_number = 5\n arg = get_last_argument(words[5:])\n elif words[3] == CAPITAL_KEY:\n question_number = 6\n arg = get_last_argument(words[5:])\n return question_number, arg\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
939 |
1968923cd923e68dc5ff2148802f18e40a5e6c33
|
'''
Created on Nov 16, 2013
@author: mo
'''
import unittest
from Board import TicTacToe_Board
from ComputerPlayer import ComputerPlayer
from utils import debug_print as d_pr
from main import StartNewGame
class Test(unittest.TestCase):
def setUp(self):
self.the_board = TicTacToe_Board()
def tearDown(self):
pass
#these may be impossible boards, but still it tests the win detector
def test_these_should_win_for_x(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', 'x'],
['o', 'x', 'o'],
['o', 'x', 'o']]), 'x', "should return x")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['x', 'o', 'o'],
['o', 'x', 'o'],
['x', 'o', 'x']
]) , 'x', 'should return x')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['x', 'x', 'x'],
['-', '-', '-']
]), 'x', 'should return x'
)
def test_these_should_win_for_o(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['o', 'x', 'o'],
['o', 'x', 'x'],
['o', 'o', 'x']]), 'o', "should return o")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['x', 'o', '-'],
['o', 'o', 'o'],
['o', 'x', 'x']
]) , 'o', 'should return o')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['x', 'o', 'x'],
['-', '-', 'o']
]), 'o', 'should return o'
)
def test_these_should_win_for_nobody(self):
self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', '-'],
['o', '-', 'o'],
['o', '-', 'o']]), None, "should return None")
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['-', '-', '-'],
['-', '-', '-'],
['x', 'o', 'x']
]) , None, 'should return None')
self.assertEqual(TicTacToe_Board.IsWinningBoard_static([
['o','x', 'o'],
['-', '-', 'x'],
['-', 'o', 'o']
]), None, 'should return None'
)
def test_make_move(self):
self.the_board.board_array=[ ['x', '-', 'x'],
['o', '-', 'o'],
['o', 'x', '-']
]
self.the_board.whose_turn='o'
self.the_board.MakeMove([1,1])
self.assertEqual(self.the_board.board_array[1][1], 'o', "should be an o")
self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')
def test_computer_player_get_outcome(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['-', '-', '-']
]
self.the_board.whose_turn = 'x'
move_seq_1 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}, {'player': 'x', 'move': [0,0]} ]
out=self.the_board.GetOutcomeOfMoveSequence(move_seq_1)
self.assertEqual(out, 'x', 'x should win: outcome should be x')
move_seq_2 = [{'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)
self.assertEqual(out, None, 'no one should win: outcome will be None')
move_seq_3 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [0,0] }, {'player': 'x', 'move' : [2,1]},
{'player': 'o', 'move' : [2,2] }
]
out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)
self.assertEqual(out, 'o', 'o should win')
def test_get_winning_moves_for_opponent(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['x', '-', 'x'],
['-', 'o', '-'],
['o', 'o', '-']
]
self.the_board.whose_turn = 'x'
winning_moves=self.the_board.GetWinningMovesFor( 'human')
d_pr(winning_moves)
self.assertIn([0,1], winning_moves)
self.assertIn([2,2], winning_moves)
comp_player = ComputerPlayer('o', self.the_board)
self.the_board.human_player_x_or_o = 'x'
self.the_board.c_player_x_or_o = 'o'
self.the_board.board_array = [ ['x', '-', 'x'],
['-', 'o', '-'],
['o', 'o', '-']
]
self.the_board.whose_turn = 'o'
winning_moves=self.the_board.GetWinningMovesFor( 'human')
d_pr(winning_moves)
self.assertIn([0,1], winning_moves)
def test_get_threatening_moves(self):
comp_player = ComputerPlayer('x', self.the_board)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['o', '-', '-']
]
self.the_board.whose_turn = 'x'
threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())
self.assertIn([0,0], threatening_moves)
self.assertIn([2,2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 2)
self.the_board.human_player_x_or_o = 'o'
self.the_board.c_player_x_or_o = 'x'
self.the_board.board_array = [ ['-', '-', 'o'],
['-', 'x', '-'],
['o', '-', '-']
]
self.the_board.whose_turn = 'x'
threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())
self.assertIn([0,1], threatening_moves)
self.assertIn([2,1], threatening_moves)
self.assertIn([1,0], threatening_moves)
self.assertIn([1,2], threatening_moves)
d_pr('threats without traps: ' + str(threatening_moves))
self.assertEqual(len(threatening_moves), 4)
def test_algorithm_by_playing_large_num_of_random_games(self):
NUM_GAMES = 10
#NUM_GAMES=100000 # this works but takes a long time
NUM_GAMES=10
for i in range(0, NUM_GAMES + 1):
win_result = StartNewGame(UseRandom=True)
self.assertTrue(win_result == 'Computer' or win_result == 'Tie')
def test_print(self):
self.the_board.board_array = [ ['-', '-', 'x'],
['-', 'o', '-'],
['x', 'o', '-']]
self.the_board.PrintBoardToConsole()
def test_empty_squares(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
[
"'''\nCreated on Nov 16, 2013\n\n@author: mo\n'''\nimport unittest\nfrom Board import TicTacToe_Board\nfrom ComputerPlayer import ComputerPlayer\nfrom utils import debug_print as d_pr\n\nfrom main import StartNewGame\n\nclass Test(unittest.TestCase):\n\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n \n def tearDown(self):\n pass\n\n #these may be impossible boards, but still it tests the win detector\n \n def test_these_should_win_for_x(self):\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', 'x'], \n ['o', 'x', 'o'], \n ['o', 'x', 'o']]), 'x', \"should return x\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['x', 'o', 'o'],\n ['o', 'x', 'o'],\n ['x', 'o', 'x']\n \n \n ]) , 'x', 'should return x')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['x', 'x', 'x'],\n ['-', '-', '-']\n ]), 'x', 'should return x'\n )\n \n \n \n def test_these_should_win_for_o(self):\n \n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['o', 'x', 'o'], \n ['o', 'x', 'x'], \n ['o', 'o', 'x']]), 'o', \"should return o\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['x', 'o', '-'],\n ['o', 'o', 'o'],\n ['o', 'x', 'x']\n \n \n ]) , 'o', 'should return o')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['x', 'o', 'x'],\n ['-', '-', 'o']\n ]), 'o', 'should return o'\n )\n \n\n\n def test_these_should_win_for_nobody(self):\n \n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static( [ ['x', 'x', '-'], \n ['o', '-', 'o'], \n ['o', '-', 'o']]), None, \"should return None\")\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['-', '-', '-'],\n ['-', '-', '-'],\n ['x', 'o', 'x']\n \n \n ]) , None, 'should return None')\n \n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([\n ['o','x', 'o'],\n ['-', '-', 'x'],\n ['-', 'o', 'o']\n ]), None, 'should return None'\n )\n \n def test_make_move(self):\n \n self.the_board.board_array=[ ['x', '-', 'x'],\n ['o', '-', 'o'],\n ['o', 'x', '-']\n ]\n \n self.the_board.whose_turn='o'\n \n self.the_board.MakeMove([1,1])\n \n self.assertEqual(self.the_board.board_array[1][1], 'o', \"should be an o\")\n \n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n \n \n\n def test_computer_player_get_outcome(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['-', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n move_seq_1 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}, {'player': 'x', 'move': [0,0]} ]\n \n out=self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n \n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n \n \n move_seq_2 = [{'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [2,1]}]\n \n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n\n move_seq_3 = [ {'player': 'x', 'move' : [0,1] }, {'player': 'o', 'move' : [0,0] }, {'player': 'x', 'move' : [2,1]},\n {'player': 'o', 'move' : [2,2] }\n ]\n \n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n \n self.assertEqual(out, 'o', 'o should win')\n \n \n def test_get_winning_moves_for_opponent(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['x', '-', 'x'],\n ['-', 'o', '-'],\n ['o', 'o', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n winning_moves=self.the_board.GetWinningMovesFor( 'human')\n \n d_pr(winning_moves)\n self.assertIn([0,1], winning_moves)\n self.assertIn([2,2], winning_moves)\n \n comp_player = ComputerPlayer('o', self.the_board)\n \n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n \n \n self.the_board.board_array = [ ['x', '-', 'x'],\n ['-', 'o', '-'],\n ['o', 'o', '-']\n ]\n self.the_board.whose_turn = 'o'\n \n winning_moves=self.the_board.GetWinningMovesFor( 'human')\n \n d_pr(winning_moves)\n self.assertIn([0,1], winning_moves)\n \n \n \n def test_get_threatening_moves(self):\n \n comp_player = ComputerPlayer('x', self.the_board)\n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['o', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())\n \n \n self.assertIn([0,0], threatening_moves)\n self.assertIn([2,2], threatening_moves)\n \n d_pr('threats without traps: ' + str(threatening_moves))\n \n self.assertEqual(len(threatening_moves), 2)\n \n \n \n \n \n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n \n \n self.the_board.board_array = [ ['-', '-', 'o'],\n ['-', 'x', '-'],\n ['o', '-', '-']\n ]\n self.the_board.whose_turn = 'x'\n \n threatening_moves=comp_player.GetThreateningMovesWithoutTraps(self.the_board.GetEmptySquares())\n \n \n self.assertIn([0,1], threatening_moves)\n self.assertIn([2,1], threatening_moves)\n self.assertIn([1,0], threatening_moves)\n self.assertIn([1,2], threatening_moves)\n \n \n \n d_pr('threats without traps: ' + str(threatening_moves))\n \n self.assertEqual(len(threatening_moves), 4)\n \n \n \n \n def test_algorithm_by_playing_large_num_of_random_games(self):\n \n NUM_GAMES = 10\n #NUM_GAMES=100000 # this works but takes a long time\n NUM_GAMES=10\n \n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n \n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n \n \n def test_print(self):\n \n \n self.the_board.board_array = [ ['-', '-', 'x'],\n ['-', 'o', '-'],\n ['x', 'o', '-']]\n \n self.the_board.PrintBoardToConsole()\n \n \n def test_empty_squares(self):\n pass\n \n\nif __name__ == \"__main__\":\n #import sys;sys.argv = ['', 'Test.testName']\n unittest.main()\n",
"<docstring token>\nimport unittest\nfrom Board import TicTacToe_Board\nfrom ComputerPlayer import ComputerPlayer\nfrom utils import debug_print as d_pr\nfrom main import StartNewGame\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n\n def test_computer_player_get_outcome(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n '-', '-', '-']]\n self.the_board.whose_turn = 'x'\n move_seq_1 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}, {'player': 'x', 'move': [0, 0]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n move_seq_2 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n move_seq_3 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [0, 0]}, {'player': 'x', 'move': [2, 1]}, {'player':\n 'o', 'move': [2, 2]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n self.assertEqual(out, 'o', 'o should win')\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n\n def test_computer_player_get_outcome(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n '-', '-', '-']]\n self.the_board.whose_turn = 'x'\n move_seq_1 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}, {'player': 'x', 'move': [0, 0]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n move_seq_2 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n move_seq_3 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [0, 0]}, {'player': 'x', 'move': [2, 1]}, {'player':\n 'o', 'move': [2, 2]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n self.assertEqual(out, 'o', 'o should win')\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n\n def test_computer_player_get_outcome(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n '-', '-', '-']]\n self.the_board.whose_turn = 'x'\n move_seq_1 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}, {'player': 'x', 'move': [0, 0]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_1)\n self.assertEqual(out, 'x', 'x should win: outcome should be x')\n move_seq_2 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [2, 1]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_2)\n self.assertEqual(out, None, 'no one should win: outcome will be None')\n move_seq_3 = [{'player': 'x', 'move': [0, 1]}, {'player': 'o',\n 'move': [0, 0]}, {'player': 'x', 'move': [2, 1]}, {'player':\n 'o', 'move': [2, 2]}]\n out = self.the_board.GetOutcomeOfMoveSequence(move_seq_3)\n self.assertEqual(out, 'o', 'o should win')\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <function token>\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n\n def test_algorithm_by_playing_large_num_of_random_games(self):\n NUM_GAMES = 10\n NUM_GAMES = 10\n for i in range(0, NUM_GAMES + 1):\n win_result = StartNewGame(UseRandom=True)\n self.assertTrue(win_result == 'Computer' or win_result == 'Tie')\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n\n def tearDown(self):\n pass\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <function token>\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n <function token>\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n <function token>\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n\n def test_these_should_win_for_nobody(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n '-'], ['o', '-', 'o'], ['o', '-', 'o']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['-', '-',\n '-'], ['-', '-', '-'], ['x', 'o', 'x']]), None,\n 'should return None')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['-', '-', 'x'], ['-', 'o', 'o']]), None,\n 'should return None')\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <function token>\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n <function token>\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.the_board = TicTacToe_Board()\n <function token>\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n <function token>\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <function token>\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n <function token>\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n <function token>\n <function token>\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n <function token>\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <function token>\n\n def test_get_winning_moves_for_opponent(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'x'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n self.assertIn([2, 2], winning_moves)\n comp_player = ComputerPlayer('o', self.the_board)\n self.the_board.human_player_x_or_o = 'x'\n self.the_board.c_player_x_or_o = 'o'\n self.the_board.board_array = [['x', '-', 'x'], ['-', 'o', '-'], [\n 'o', 'o', '-']]\n self.the_board.whose_turn = 'o'\n winning_moves = self.the_board.GetWinningMovesFor('human')\n d_pr(winning_moves)\n self.assertIn([0, 1], winning_moves)\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n <function token>\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n <function token>\n <function token>\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n <function token>\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <function token>\n <function token>\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n <function token>\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n\n def test_empty_squares(self):\n pass\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n <function token>\n <function token>\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n <function token>\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <function token>\n <function token>\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n <function token>\n\n def test_print(self):\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'x', 'o', '-']]\n self.the_board.PrintBoardToConsole()\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n <function token>\n <function token>\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n <function token>\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <function token>\n <function token>\n\n def test_get_threatening_moves(self):\n comp_player = ComputerPlayer('x', self.the_board)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'x'], ['-', 'o', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 0], threatening_moves)\n self.assertIn([2, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 2)\n self.the_board.human_player_x_or_o = 'o'\n self.the_board.c_player_x_or_o = 'x'\n self.the_board.board_array = [['-', '-', 'o'], ['-', 'x', '-'], [\n 'o', '-', '-']]\n self.the_board.whose_turn = 'x'\n threatening_moves = comp_player.GetThreateningMovesWithoutTraps(self\n .the_board.GetEmptySquares())\n self.assertIn([0, 1], threatening_moves)\n self.assertIn([2, 1], threatening_moves)\n self.assertIn([1, 0], threatening_moves)\n self.assertIn([1, 2], threatening_moves)\n d_pr('threats without traps: ' + str(threatening_moves))\n self.assertEqual(len(threatening_moves), 4)\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n <function token>\n <function token>\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n <function token>\n\n def test_make_move(self):\n self.the_board.board_array = [['x', '-', 'x'], ['o', '-', 'o'], [\n 'o', 'x', '-']]\n self.the_board.whose_turn = 'o'\n self.the_board.MakeMove([1, 1])\n self.assertEqual(self.the_board.board_array[1][1], 'o',\n 'should be an o')\n self.assertEqual(self.the_board.whose_turn, 'x', 'turn should change')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n <function token>\n <function token>\n\n def test_these_should_win_for_x(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'x',\n 'x'], ['o', 'x', 'o'], ['o', 'x', 'o']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n 'o'], ['o', 'x', 'o'], ['x', 'o', 'x']]), 'x', 'should return x')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'x', 'x'], ['-', '-', '-']]), 'x', 'should return x')\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n\n def test_these_should_win_for_o(self):\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['o', 'x', 'x'], ['o', 'o', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['x', 'o',\n '-'], ['o', 'o', 'o'], ['o', 'x', 'x']]), 'o', 'should return o')\n self.assertEqual(TicTacToe_Board.IsWinningBoard_static([['o', 'x',\n 'o'], ['x', 'o', 'x'], ['-', '-', 'o']]), 'o', 'should return o')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\nclass Test(unittest.TestCase):\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<class token>\n<code token>\n"
] | false |
940 |
8e629ee53f11e29aa026763508d13b06f6ced5ba
|
# -*- coding:utf-8 -*-
__author__ = 'yangxin_ryan'
"""
Solutions:
题目要求非递归的中序遍历,
中序遍历的意思其实就是先遍历左孩子、然后是根结点、最后是右孩子。我们按照这个逻辑,应该先循环到root的最左孩子,
然后依次出栈,然后将结果放入结果集合result,然后是根的val,然后右孩子。
"""
class BinaryTreeInorderTraversal(object):
def inorderTraversal(self, root: TreeNode) -> List[int]:
result = list()
inorder_stack = list()
while root or inorder_stack:
if root:
inorder_stack.append(root)
root = root.left
else:
root = inorder_stack.pop()
result.append(root.val)
root = root.right
return result
|
[
"# -*- coding:utf-8 -*-\n__author__ = 'yangxin_ryan'\n\"\"\"\nSolutions:\n题目要求非递归的中序遍历,\n中序遍历的意思其实就是先遍历左孩子、然后是根结点、最后是右孩子。我们按照这个逻辑,应该先循环到root的最左孩子,\n然后依次出栈,然后将结果放入结果集合result,然后是根的val,然后右孩子。\n\"\"\"\n\n\nclass BinaryTreeInorderTraversal(object):\n\n def inorderTraversal(self, root: TreeNode) -> List[int]:\n result = list()\n inorder_stack = list()\n while root or inorder_stack:\n if root:\n inorder_stack.append(root)\n root = root.left\n else:\n root = inorder_stack.pop()\n result.append(root.val)\n root = root.right\n return result\n\n",
"__author__ = 'yangxin_ryan'\n<docstring token>\n\n\nclass BinaryTreeInorderTraversal(object):\n\n def inorderTraversal(self, root: TreeNode) ->List[int]:\n result = list()\n inorder_stack = list()\n while root or inorder_stack:\n if root:\n inorder_stack.append(root)\n root = root.left\n else:\n root = inorder_stack.pop()\n result.append(root.val)\n root = root.right\n return result\n",
"<assignment token>\n<docstring token>\n\n\nclass BinaryTreeInorderTraversal(object):\n\n def inorderTraversal(self, root: TreeNode) ->List[int]:\n result = list()\n inorder_stack = list()\n while root or inorder_stack:\n if root:\n inorder_stack.append(root)\n root = root.left\n else:\n root = inorder_stack.pop()\n result.append(root.val)\n root = root.right\n return result\n",
"<assignment token>\n<docstring token>\n\n\nclass BinaryTreeInorderTraversal(object):\n <function token>\n",
"<assignment token>\n<docstring token>\n<class token>\n"
] | false |
941 |
bc837d95ef22bd376f8b095e7aeb1f7d15c0e22e
|
"""Write a program that asks the user to enter a word and then
capitalizes every other letter of that word. So if the user enters "rhinoceros",
the program should print "rHiNoCeRoS"""
word=str(input("please enter the word\n"))
count=0
for char in word:
if count==0:
print(char.upper(),end="")
count=1
else:
print(char.lower(),end="")
count=0
|
[
"\"\"\"Write a program that asks the user to enter a word and then\ncapitalizes every other letter of that word. So if the user enters \"rhinoceros\",\nthe program should print \"rHiNoCeRoS\"\"\"\n\nword=str(input(\"please enter the word\\n\"))\ncount=0\nfor char in word:\n if count==0:\n print(char.upper(),end=\"\")\n count=1\n else:\n print(char.lower(),end=\"\")\n count=0\n",
"<docstring token>\nword = str(input('please enter the word\\n'))\ncount = 0\nfor char in word:\n if count == 0:\n print(char.upper(), end='')\n count = 1\n else:\n print(char.lower(), end='')\n count = 0\n",
"<docstring token>\n<assignment token>\nfor char in word:\n if count == 0:\n print(char.upper(), end='')\n count = 1\n else:\n print(char.lower(), end='')\n count = 0\n",
"<docstring token>\n<assignment token>\n<code token>\n"
] | false |
942 |
da34eb25ec08c8311fa839a0cdcd164eff036a5d
|
import bnn
#get
#!wget http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
#!wget http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
#unzip
#!gzip -d t10k-images-idx3-ubyte.gz
#!gzip -d t10k-labels-idx1-ubyte.gz
#read labels
print("Reading labels")
labels = []
with open("/home/xilinx/jupyter_notebooks/bnn/t10k-labels-idx1-ubyte","rb") as lbl_file:
#read magic number and number of labels (MSB first) -> MNIST header
magicNum = int.from_bytes(lbl_file.read(4), byteorder="big")
countLbl = int.from_bytes(lbl_file.read(4), byteorder="big")
#now the labels are following byte-wise
for idx in range(countLbl):
labels.append(int.from_bytes(lbl_file.read(1), byteorder="big"))
lbl_file.close()
print("Initiating classifier")
lfcW1A1_classifier = bnn.LfcClassifier(bnn.NETWORK_LFCW1A1,"mnist",bnn.RUNTIME_HW)
print("Testing throughput")
result_W1A1 = lfcW1A1_classifier.classify_mnists("/home/xilinx/jupyter_notebooks/bnn/t10k-images-idx3-ubyte")
|
[
"import bnn\n\n#get\n#!wget http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz\n#!wget http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz\n#unzip\n#!gzip -d t10k-images-idx3-ubyte.gz\n#!gzip -d t10k-labels-idx1-ubyte.gz\n\n#read labels\nprint(\"Reading labels\")\nlabels = []\nwith open(\"/home/xilinx/jupyter_notebooks/bnn/t10k-labels-idx1-ubyte\",\"rb\") as lbl_file:\n #read magic number and number of labels (MSB first) -> MNIST header\n magicNum = int.from_bytes(lbl_file.read(4), byteorder=\"big\")\n countLbl = int.from_bytes(lbl_file.read(4), byteorder=\"big\")\n #now the labels are following byte-wise\n for idx in range(countLbl):\n labels.append(int.from_bytes(lbl_file.read(1), byteorder=\"big\"))\n lbl_file.close()\nprint(\"Initiating classifier\")\nlfcW1A1_classifier = bnn.LfcClassifier(bnn.NETWORK_LFCW1A1,\"mnist\",bnn.RUNTIME_HW)\n\nprint(\"Testing throughput\")\nresult_W1A1 = lfcW1A1_classifier.classify_mnists(\"/home/xilinx/jupyter_notebooks/bnn/t10k-images-idx3-ubyte\")",
"import bnn\nprint('Reading labels')\nlabels = []\nwith open('/home/xilinx/jupyter_notebooks/bnn/t10k-labels-idx1-ubyte', 'rb'\n ) as lbl_file:\n magicNum = int.from_bytes(lbl_file.read(4), byteorder='big')\n countLbl = int.from_bytes(lbl_file.read(4), byteorder='big')\n for idx in range(countLbl):\n labels.append(int.from_bytes(lbl_file.read(1), byteorder='big'))\n lbl_file.close()\nprint('Initiating classifier')\nlfcW1A1_classifier = bnn.LfcClassifier(bnn.NETWORK_LFCW1A1, 'mnist', bnn.\n RUNTIME_HW)\nprint('Testing throughput')\nresult_W1A1 = lfcW1A1_classifier.classify_mnists(\n '/home/xilinx/jupyter_notebooks/bnn/t10k-images-idx3-ubyte')\n",
"<import token>\nprint('Reading labels')\nlabels = []\nwith open('/home/xilinx/jupyter_notebooks/bnn/t10k-labels-idx1-ubyte', 'rb'\n ) as lbl_file:\n magicNum = int.from_bytes(lbl_file.read(4), byteorder='big')\n countLbl = int.from_bytes(lbl_file.read(4), byteorder='big')\n for idx in range(countLbl):\n labels.append(int.from_bytes(lbl_file.read(1), byteorder='big'))\n lbl_file.close()\nprint('Initiating classifier')\nlfcW1A1_classifier = bnn.LfcClassifier(bnn.NETWORK_LFCW1A1, 'mnist', bnn.\n RUNTIME_HW)\nprint('Testing throughput')\nresult_W1A1 = lfcW1A1_classifier.classify_mnists(\n '/home/xilinx/jupyter_notebooks/bnn/t10k-images-idx3-ubyte')\n",
"<import token>\nprint('Reading labels')\n<assignment token>\nwith open('/home/xilinx/jupyter_notebooks/bnn/t10k-labels-idx1-ubyte', 'rb'\n ) as lbl_file:\n magicNum = int.from_bytes(lbl_file.read(4), byteorder='big')\n countLbl = int.from_bytes(lbl_file.read(4), byteorder='big')\n for idx in range(countLbl):\n labels.append(int.from_bytes(lbl_file.read(1), byteorder='big'))\n lbl_file.close()\nprint('Initiating classifier')\n<assignment token>\nprint('Testing throughput')\n<assignment token>\n",
"<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n"
] | false |
943 |
04b5df5cfd052390f057c6f13b2e21d27bac6449
|
"""
This example shows how to communicate with a SH05 (shutter) connected to a KSC101 (KCube Solenoid).
"""
# this "if" statement is used so that Sphinx does not execute this script when the docs are being built
if __name__ == '__main__':
import os
import time
from msl.equipment import EquipmentRecord, ConnectionRecord, Backend
from msl.equipment.resources.thorlabs import MotionControl
# ensure that the Kinesis folder is available on PATH
os.environ['PATH'] += os.pathsep + 'C:/Program Files/Thorlabs/Kinesis'
# rather than reading the EquipmentRecord from a database we can create it manually
record = EquipmentRecord(
manufacturer='Thorlabs',
model='KSC101',
serial='68000297', # update the serial number for your KSC101
connection=ConnectionRecord(
backend=Backend.MSL,
address='SDK::Thorlabs.MotionControl.KCube.Solenoid.dll',
),
)
def is_open():
return shutter.get_operating_state() == 1
# avoid the FT_DeviceNotFound error
MotionControl.build_device_list()
# connect to the KCube Solenoid
shutter = record.connect()
print('Connected to {}'.format(shutter))
# start polling at 200 ms
shutter.start_polling(200)
# set the operating mode to SC_OperatingModes.SC_Manual
shutter.set_operating_mode('Manual')
for i in range(5):
# set the operating state to SC_OperatingStates.SC_Active
print('Opening the shutter...')
shutter.set_operating_state('Active')
while not is_open():
time.sleep(0.05)
print(' Is the shutter open? {}'.format(is_open()))
time.sleep(1)
# set the operating state to SC_OperatingStates.SC_Inactive
print('Closing the shutter...')
shutter.set_operating_state('Inactive')
while is_open():
time.sleep(0.05)
print(' Is the shutter open? {}'.format(is_open()))
time.sleep(1)
# stop polling and close the connection
shutter.stop_polling()
shutter.disconnect()
|
[
"\"\"\"\nThis example shows how to communicate with a SH05 (shutter) connected to a KSC101 (KCube Solenoid).\n\"\"\"\n\n# this \"if\" statement is used so that Sphinx does not execute this script when the docs are being built\nif __name__ == '__main__':\n import os\n import time\n\n from msl.equipment import EquipmentRecord, ConnectionRecord, Backend\n from msl.equipment.resources.thorlabs import MotionControl\n\n # ensure that the Kinesis folder is available on PATH\n os.environ['PATH'] += os.pathsep + 'C:/Program Files/Thorlabs/Kinesis'\n\n # rather than reading the EquipmentRecord from a database we can create it manually\n record = EquipmentRecord(\n manufacturer='Thorlabs',\n model='KSC101',\n serial='68000297', # update the serial number for your KSC101\n connection=ConnectionRecord(\n backend=Backend.MSL,\n address='SDK::Thorlabs.MotionControl.KCube.Solenoid.dll',\n ),\n )\n\n def is_open():\n return shutter.get_operating_state() == 1\n\n # avoid the FT_DeviceNotFound error\n MotionControl.build_device_list()\n\n # connect to the KCube Solenoid\n shutter = record.connect()\n print('Connected to {}'.format(shutter))\n\n # start polling at 200 ms\n shutter.start_polling(200)\n\n # set the operating mode to SC_OperatingModes.SC_Manual\n shutter.set_operating_mode('Manual')\n\n for i in range(5):\n\n # set the operating state to SC_OperatingStates.SC_Active\n print('Opening the shutter...')\n shutter.set_operating_state('Active')\n while not is_open():\n time.sleep(0.05)\n print(' Is the shutter open? {}'.format(is_open()))\n\n time.sleep(1)\n\n # set the operating state to SC_OperatingStates.SC_Inactive\n print('Closing the shutter...')\n shutter.set_operating_state('Inactive')\n while is_open():\n time.sleep(0.05)\n print(' Is the shutter open? {}'.format(is_open()))\n\n time.sleep(1)\n\n # stop polling and close the connection\n shutter.stop_polling()\n shutter.disconnect()\n",
"<docstring token>\nif __name__ == '__main__':\n import os\n import time\n from msl.equipment import EquipmentRecord, ConnectionRecord, Backend\n from msl.equipment.resources.thorlabs import MotionControl\n os.environ['PATH'] += os.pathsep + 'C:/Program Files/Thorlabs/Kinesis'\n record = EquipmentRecord(manufacturer='Thorlabs', model='KSC101',\n serial='68000297', connection=ConnectionRecord(backend=Backend.MSL,\n address='SDK::Thorlabs.MotionControl.KCube.Solenoid.dll'))\n\n def is_open():\n return shutter.get_operating_state() == 1\n MotionControl.build_device_list()\n shutter = record.connect()\n print('Connected to {}'.format(shutter))\n shutter.start_polling(200)\n shutter.set_operating_mode('Manual')\n for i in range(5):\n print('Opening the shutter...')\n shutter.set_operating_state('Active')\n while not is_open():\n time.sleep(0.05)\n print(' Is the shutter open? {}'.format(is_open()))\n time.sleep(1)\n print('Closing the shutter...')\n shutter.set_operating_state('Inactive')\n while is_open():\n time.sleep(0.05)\n print(' Is the shutter open? {}'.format(is_open()))\n time.sleep(1)\n shutter.stop_polling()\n shutter.disconnect()\n",
"<docstring token>\n<code token>\n"
] | false |
944 |
11f29508d52e856f4751a5dc8911a1f1c9832374
|
def test(d_iter):
from cqlengine import columns
from cqlengine.models import Model
from cqlengine.query import ModelQuerySet
from cqlengine import connection
from cqlengine.management import sync_table
from urllib2 import urlopen, Request
from pyspark.sql import SQLContext
import json
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
import operator
from sets import Set
CASSANDRA_KEYSPACE = "playground"
class table3_timeline(Model):
link_id = columns.Text(primary_key=True)
counts = columns.Integer()
time = columns.Integer(primary_key=True, partition_key=False)
class table3_comments(Model):
link_id = columns.Text()
author = columns.Text()
body = columns.Text()
created_utc = columns.Text()
parent_id = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
name = columns.Text(primary_key=True)
score = columns.Integer(index = True)
class table3_links(Model):
link_id = columns.Text(primary_key=True)
title = columns.Text()
permalink = columns.Text()
subreddit = columns.Text()
subreddit_id = columns.Text()
selftext = columns.Text()
created = columns.Integer()
score = columns.Integer()
url = columns.Text()
top_comment = columns.Text()
top_score = columns.Integer()
connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)
cluster = Cluster(['54.193.123.92'])
session = cluster.connect(CASSANDRA_KEYSPACE)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
for d in d_iter:
table3_comments.create(**d)
input = {}
createdtime = 0
obj = table3_links.objects(link_id=d['link_id'])
cql = "SELECT top_score, created FROM table3_links WHERE link_id='"+d['link_id']+"'"
stmt = session.execute(cql)
current = []
for repo in stmt:
current.append(repo)
if len(current) > 0:
createdtime = current[0][1]
if int(current[0][0]) < int(d['score']):
obj.update(top_comment = d['name'])
obj.update(top_score = d['score'])
else:
source = "http://www.reddit.com/by_id/"+d['link_id']+"/.json"
request = Request(source)
response = urlopen(request)
data = json.loads(response.read())
input['title'] = data['data']['children'][0]['data']['title']
input['permalink'] = data['data']['children'][0]['data']['permalink']
input['subreddit'] = data['data']['children'][0]['data']['subreddit']
input['selftext'] = data['data']['children'][0]['data']['selftext']
input['subreddit_id'] = data['data']['children'][0]['data']['subreddit_id']
input['created'] = int(data['data']['children'][0]['data']['created'])
createdtime = input['created']
input['url'] = data['data']['children'][0]['data']['url']
input['score'] = data['data']['children'][0]['data']['score']
table3_links.create( link_id = d['link_id'],
title = input['title'],
permalink = input['permalink'],
subreddit = input['subreddit'],
selftext = input['selftext'],
subreddit_id = input['subreddit_id'],
created = input['created'],
url = input['url'],
score = input['score'],
top_comment = d['name'],
top_score = d['score'])
table3_timeline.create(link_id=d['link_id'], time=0, counts=0)
timegap = int(abs(int(d['created_utc']) - createdtime)/3600) # one hour
cql2 = "SELECT counts FROM table3_timeline WHERE link_id='"+d['link_id']+"' AND time=" + str(timegap)
stmt = session.execute(cql2)
count_tmp = []
for rep in stmt:
count_tmp.append(rep)
if len(count_tmp) > 0:
timeslot = table3_timeline.objects(link_id=d['link_id'], time=timegap)
timeslot.update(counts=(count_tmp[0][0]+1))
else:
table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1)
sync_table(table3_links)
sync_table(table3_comments)
sync_table(table3_timeline)
df = sqlContext.read.json("s3n://yy-data/testJSON.json")
# s3n://reddit-comments/2007/RC_2007-10
rdd = df.map(lambda x: {"link_id": x.link_id,
"author": x.author,
"body": x.body,
"created_utc": x.created_utc,
"parent_id": x.parent_id,
"subreddit": x.subreddit,
"subreddit_id": x.subreddit_id,
"name": x.name,
"score": x.score})
test([])
rdd.foreachPartition(test)
|
[
"def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n\n CASSANDRA_KEYSPACE = \"playground\"\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index = True)\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = \"SELECT top_score, created FROM table3_links WHERE link_id='\"+d['link_id']+\"'\"\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment = d['name'])\n obj.update(top_score = d['score'])\n else:\n source = \"http://www.reddit.com/by_id/\"+d['link_id']+\"/.json\"\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data']['permalink']\n input['subreddit'] = data['data']['children'][0]['data']['subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data']['subreddit_id'] \n input['created'] = int(data['data']['children'][0]['data']['created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create( link_id = d['link_id'],\n title = input['title'],\n permalink = input['permalink'],\n subreddit = input['subreddit'],\n selftext = input['selftext'],\n subreddit_id = input['subreddit_id'],\n created = input['created'],\n url = input['url'],\n score = input['score'],\n top_comment = d['name'],\n top_score = d['score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime)/3600) # one hour\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\"+d['link_id']+\"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=timegap)\n timeslot.update(counts=(count_tmp[0][0]+1))\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\ndf = sqlContext.read.json(\"s3n://yy-data/testJSON.json\")\n# s3n://reddit-comments/2007/RC_2007-10\nrdd = df.map(lambda x: {\"link_id\": x.link_id, \n \"author\": x.author,\n \"body\": x.body,\n \"created_utc\": x.created_utc,\n \"parent_id\": x.parent_id,\n \"subreddit\": x.subreddit,\n \"subreddit_id\": x.subreddit_id,\n \"name\": x.name,\n \"score\": x.score})\ntest([])\nrdd.foreachPartition(test)",
"def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n CASSANDRA_KEYSPACE = 'playground'\n\n\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n\n\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index=True)\n\n\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = (\n \"SELECT top_score, created FROM table3_links WHERE link_id='\" +\n d['link_id'] + \"'\")\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment=d['name'])\n obj.update(top_score=d['score'])\n else:\n source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data'][\n 'permalink']\n input['subreddit'] = data['data']['children'][0]['data'][\n 'subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data'][\n 'subreddit_id']\n input['created'] = int(data['data']['children'][0]['data'][\n 'created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create(link_id=d['link_id'], title=input['title'],\n permalink=input['permalink'], subreddit=input['subreddit'],\n selftext=input['selftext'], subreddit_id=input[\n 'subreddit_id'], created=input['created'], url=input['url'],\n score=input['score'], top_comment=d['name'], top_score=d[\n 'score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\" + d[\n 'link_id'] + \"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=\n timegap)\n timeslot.update(counts=count_tmp[0][0] + 1)\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1\n )\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\n\ndf = sqlContext.read.json('s3n://yy-data/testJSON.json')\nrdd = df.map(lambda x: {'link_id': x.link_id, 'author': x.author, 'body': x\n .body, 'created_utc': x.created_utc, 'parent_id': x.parent_id,\n 'subreddit': x.subreddit, 'subreddit_id': x.subreddit_id, 'name': x.\n name, 'score': x.score})\ntest([])\nrdd.foreachPartition(test)\n",
"def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n CASSANDRA_KEYSPACE = 'playground'\n\n\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n\n\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index=True)\n\n\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = (\n \"SELECT top_score, created FROM table3_links WHERE link_id='\" +\n d['link_id'] + \"'\")\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment=d['name'])\n obj.update(top_score=d['score'])\n else:\n source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data'][\n 'permalink']\n input['subreddit'] = data['data']['children'][0]['data'][\n 'subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data'][\n 'subreddit_id']\n input['created'] = int(data['data']['children'][0]['data'][\n 'created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create(link_id=d['link_id'], title=input['title'],\n permalink=input['permalink'], subreddit=input['subreddit'],\n selftext=input['selftext'], subreddit_id=input[\n 'subreddit_id'], created=input['created'], url=input['url'],\n score=input['score'], top_comment=d['name'], top_score=d[\n 'score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\" + d[\n 'link_id'] + \"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=\n timegap)\n timeslot.update(counts=count_tmp[0][0] + 1)\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1\n )\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\n\n<assignment token>\ntest([])\nrdd.foreachPartition(test)\n",
"def test(d_iter):\n from cqlengine import columns\n from cqlengine.models import Model\n from cqlengine.query import ModelQuerySet\n from cqlengine import connection\n from cqlengine.management import sync_table\n from urllib2 import urlopen, Request\n from pyspark.sql import SQLContext\n import json\n from cassandra.cluster import Cluster\n from cassandra.query import SimpleStatement\n import operator\n from sets import Set\n CASSANDRA_KEYSPACE = 'playground'\n\n\n class table3_timeline(Model):\n link_id = columns.Text(primary_key=True)\n counts = columns.Integer()\n time = columns.Integer(primary_key=True, partition_key=False)\n\n\n class table3_comments(Model):\n link_id = columns.Text()\n author = columns.Text()\n body = columns.Text()\n created_utc = columns.Text()\n parent_id = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n name = columns.Text(primary_key=True)\n score = columns.Integer(index=True)\n\n\n class table3_links(Model):\n link_id = columns.Text(primary_key=True)\n title = columns.Text()\n permalink = columns.Text()\n subreddit = columns.Text()\n subreddit_id = columns.Text()\n selftext = columns.Text()\n created = columns.Integer()\n score = columns.Integer()\n url = columns.Text()\n top_comment = columns.Text()\n top_score = columns.Integer()\n connection.setup(['172.31.6.150'], CASSANDRA_KEYSPACE)\n cluster = Cluster(['54.193.123.92'])\n session = cluster.connect(CASSANDRA_KEYSPACE)\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n for d in d_iter:\n table3_comments.create(**d)\n input = {}\n createdtime = 0\n obj = table3_links.objects(link_id=d['link_id'])\n cql = (\n \"SELECT top_score, created FROM table3_links WHERE link_id='\" +\n d['link_id'] + \"'\")\n stmt = session.execute(cql)\n current = []\n for repo in stmt:\n current.append(repo)\n if len(current) > 0:\n createdtime = current[0][1]\n if int(current[0][0]) < int(d['score']):\n obj.update(top_comment=d['name'])\n obj.update(top_score=d['score'])\n else:\n source = 'http://www.reddit.com/by_id/' + d['link_id'] + '/.json'\n request = Request(source)\n response = urlopen(request)\n data = json.loads(response.read())\n input['title'] = data['data']['children'][0]['data']['title']\n input['permalink'] = data['data']['children'][0]['data'][\n 'permalink']\n input['subreddit'] = data['data']['children'][0]['data'][\n 'subreddit']\n input['selftext'] = data['data']['children'][0]['data']['selftext']\n input['subreddit_id'] = data['data']['children'][0]['data'][\n 'subreddit_id']\n input['created'] = int(data['data']['children'][0]['data'][\n 'created'])\n createdtime = input['created']\n input['url'] = data['data']['children'][0]['data']['url']\n input['score'] = data['data']['children'][0]['data']['score']\n table3_links.create(link_id=d['link_id'], title=input['title'],\n permalink=input['permalink'], subreddit=input['subreddit'],\n selftext=input['selftext'], subreddit_id=input[\n 'subreddit_id'], created=input['created'], url=input['url'],\n score=input['score'], top_comment=d['name'], top_score=d[\n 'score'])\n table3_timeline.create(link_id=d['link_id'], time=0, counts=0)\n timegap = int(abs(int(d['created_utc']) - createdtime) / 3600)\n cql2 = \"SELECT counts FROM table3_timeline WHERE link_id='\" + d[\n 'link_id'] + \"' AND time=\" + str(timegap)\n stmt = session.execute(cql2)\n count_tmp = []\n for rep in stmt:\n count_tmp.append(rep)\n if len(count_tmp) > 0:\n timeslot = table3_timeline.objects(link_id=d['link_id'], time=\n timegap)\n timeslot.update(counts=count_tmp[0][0] + 1)\n else:\n table3_timeline.create(link_id=d['link_id'], time=timegap, counts=1\n )\n sync_table(table3_links)\n sync_table(table3_comments)\n sync_table(table3_timeline)\n\n\n<assignment token>\n<code token>\n",
"<function token>\n<assignment token>\n<code token>\n"
] | false |
945 |
71cdddfdd7c1327a8a77808dbdd0ff98d827231f
|
from flask.ext.restful import Resource, abort
from flask_login import current_user, login_required
from peewee import DoesNotExist
from redash.authentication.org_resolving import current_org
from redash.tasks import record_event
class BaseResource(Resource):
decorators = [login_required]
def __init__(self, *args, **kwargs):
super(BaseResource, self).__init__(*args, **kwargs)
self._user = None
def dispatch_request(self, *args, **kwargs):
kwargs.pop('org_slug', None)
return super(BaseResource, self).dispatch_request(*args, **kwargs)
@property
def current_user(self):
return current_user._get_current_object()
@property
def current_org(self):
return current_org._get_current_object()
def record_event(self, options):
options.update({
'user_id': self.current_user.id,
'org_id': self.current_org.id
})
record_event.delay(options)
def require_fields(req, fields):
for f in fields:
if f not in req:
abort(400)
def get_object_or_404(fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except DoesNotExist:
abort(404)
|
[
"from flask.ext.restful import Resource, abort\nfrom flask_login import current_user, login_required\nfrom peewee import DoesNotExist\n\nfrom redash.authentication.org_resolving import current_org\nfrom redash.tasks import record_event\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({\n 'user_id': self.current_user.id,\n 'org_id': self.current_org.id\n })\n\n record_event.delay(options)\n\n\ndef require_fields(req, fields):\n for f in fields:\n if f not in req:\n abort(400)\n\n\ndef get_object_or_404(fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except DoesNotExist:\n abort(404)\n",
"from flask.ext.restful import Resource, abort\nfrom flask_login import current_user, login_required\nfrom peewee import DoesNotExist\nfrom redash.authentication.org_resolving import current_org\nfrom redash.tasks import record_event\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\ndef require_fields(req, fields):\n for f in fields:\n if f not in req:\n abort(400)\n\n\ndef get_object_or_404(fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except DoesNotExist:\n abort(404)\n",
"<import token>\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\ndef require_fields(req, fields):\n for f in fields:\n if f not in req:\n abort(400)\n\n\ndef get_object_or_404(fn, *args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except DoesNotExist:\n abort(404)\n",
"<import token>\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\ndef require_fields(req, fields):\n for f in fields:\n if f not in req:\n abort(400)\n\n\n<function token>\n",
"<import token>\n\n\nclass BaseResource(Resource):\n decorators = [login_required]\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\nclass BaseResource(Resource):\n <assignment token>\n\n def __init__(self, *args, **kwargs):\n super(BaseResource, self).__init__(*args, **kwargs)\n self._user = None\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\nclass BaseResource(Resource):\n <assignment token>\n <function token>\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n\n def record_event(self, options):\n options.update({'user_id': self.current_user.id, 'org_id': self.\n current_org.id})\n record_event.delay(options)\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\nclass BaseResource(Resource):\n <assignment token>\n <function token>\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n\n @property\n def current_user(self):\n return current_user._get_current_object()\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n <function token>\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\nclass BaseResource(Resource):\n <assignment token>\n <function token>\n\n def dispatch_request(self, *args, **kwargs):\n kwargs.pop('org_slug', None)\n return super(BaseResource, self).dispatch_request(*args, **kwargs)\n <function token>\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n <function token>\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\nclass BaseResource(Resource):\n <assignment token>\n <function token>\n <function token>\n <function token>\n\n @property\n def current_org(self):\n return current_org._get_current_object()\n <function token>\n\n\n<function token>\n<function token>\n",
"<import token>\n\n\nclass BaseResource(Resource):\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n",
"<import token>\n<class token>\n<function token>\n<function token>\n"
] | false |
946 |
0b3f16ee9b287c6c77acde674abec9deb4053c83
|
import tensorflow as tf
import keras
import numpy as np
def house_model(y_new):
xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float) # Your Code Here#
ys = np.array([0.50, 0.100, 1.50, 2.50, 3.50, 4.50, 5.50], dtype=float) # Your Code Here#
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])]) # Your Code Here#
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs,ys, epochs=100)
return model.predict(y_new)[0]
prediction = house_model([7.0])
print(prediction)
|
[
"import tensorflow as tf\nimport keras\nimport numpy as np\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float) # Your Code Here#\n ys = np.array([0.50, 0.100, 1.50, 2.50, 3.50, 4.50, 5.50], dtype=float) # Your Code Here#\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])]) # Your Code Here#\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs,ys, epochs=100)\n return model.predict(y_new)[0]\n\nprediction = house_model([7.0])\nprint(prediction)\n\n",
"import tensorflow as tf\nimport keras\nimport numpy as np\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\nprediction = house_model([7.0])\nprint(prediction)\n",
"<import token>\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\nprediction = house_model([7.0])\nprint(prediction)\n",
"<import token>\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\n<assignment token>\nprint(prediction)\n",
"<import token>\n\n\ndef house_model(y_new):\n xs = np.array([0, 1, 2, 4, 6, 8, 10], dtype=float)\n ys = np.array([0.5, 0.1, 1.5, 2.5, 3.5, 4.5, 5.5], dtype=float)\n model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])\n model.compile(optimizer='sgd', loss='mean_squared_error')\n model.fit(xs, ys, epochs=100)\n return model.predict(y_new)[0]\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
947 |
1754bce54a47cb78dce3b545d3dce835a4e0e69f
|
#!/usr/bin/env python
# coding: utf-8
import logging
import config
def get_common_logger(name='common', logfile=None):
'''
args: name (str): logger name
logfile (str): log file, use stream handler (stdout) as default.
return:
logger obj
'''
my_logger = logging.getLogger(name)
my_logger.setLevel(config.LOG_LEVEL)
if logfile:
handler = logging.FileHandler(logfile)
else:
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')
handler.setFormatter(formatter)
my_logger.addHandler(handler)
# Stop logger propagate, forbiden duplicate log.
my_logger.propagate = False
return my_logger
COMMON_LOGGER = get_common_logger('common logger')
if __name__ == '__main__':
COMMON_LOGGER.debug('test')
|
[
"#!/usr/bin/env python\n# coding: utf-8\n\nimport logging\n\nimport config\n\n\ndef get_common_logger(name='common', logfile=None):\n '''\n args: name (str): logger name\n logfile (str): log file, use stream handler (stdout) as default.\n return:\n logger obj\n '''\n my_logger = logging.getLogger(name)\n my_logger.setLevel(config.LOG_LEVEL)\n if logfile:\n handler = logging.FileHandler(logfile)\n else:\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s')\n handler.setFormatter(formatter)\n my_logger.addHandler(handler)\n # Stop logger propagate, forbiden duplicate log.\n my_logger.propagate = False\n return my_logger\n\n\nCOMMON_LOGGER = get_common_logger('common logger')\n\nif __name__ == '__main__':\n COMMON_LOGGER.debug('test')\n",
"import logging\nimport config\n\n\ndef get_common_logger(name='common', logfile=None):\n \"\"\"\n args: name (str): logger name\n logfile (str): log file, use stream handler (stdout) as default.\n return:\n logger obj\n \"\"\"\n my_logger = logging.getLogger(name)\n my_logger.setLevel(config.LOG_LEVEL)\n if logfile:\n handler = logging.FileHandler(logfile)\n else:\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'\n )\n handler.setFormatter(formatter)\n my_logger.addHandler(handler)\n my_logger.propagate = False\n return my_logger\n\n\nCOMMON_LOGGER = get_common_logger('common logger')\nif __name__ == '__main__':\n COMMON_LOGGER.debug('test')\n",
"<import token>\n\n\ndef get_common_logger(name='common', logfile=None):\n \"\"\"\n args: name (str): logger name\n logfile (str): log file, use stream handler (stdout) as default.\n return:\n logger obj\n \"\"\"\n my_logger = logging.getLogger(name)\n my_logger.setLevel(config.LOG_LEVEL)\n if logfile:\n handler = logging.FileHandler(logfile)\n else:\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'\n )\n handler.setFormatter(formatter)\n my_logger.addHandler(handler)\n my_logger.propagate = False\n return my_logger\n\n\nCOMMON_LOGGER = get_common_logger('common logger')\nif __name__ == '__main__':\n COMMON_LOGGER.debug('test')\n",
"<import token>\n\n\ndef get_common_logger(name='common', logfile=None):\n \"\"\"\n args: name (str): logger name\n logfile (str): log file, use stream handler (stdout) as default.\n return:\n logger obj\n \"\"\"\n my_logger = logging.getLogger(name)\n my_logger.setLevel(config.LOG_LEVEL)\n if logfile:\n handler = logging.FileHandler(logfile)\n else:\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'\n )\n handler.setFormatter(formatter)\n my_logger.addHandler(handler)\n my_logger.propagate = False\n return my_logger\n\n\n<assignment token>\nif __name__ == '__main__':\n COMMON_LOGGER.debug('test')\n",
"<import token>\n\n\ndef get_common_logger(name='common', logfile=None):\n \"\"\"\n args: name (str): logger name\n logfile (str): log file, use stream handler (stdout) as default.\n return:\n logger obj\n \"\"\"\n my_logger = logging.getLogger(name)\n my_logger.setLevel(config.LOG_LEVEL)\n if logfile:\n handler = logging.FileHandler(logfile)\n else:\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(filename)s - %(funcName)s - %(lineno)s - %(message)s'\n )\n handler.setFormatter(formatter)\n my_logger.addHandler(handler)\n my_logger.propagate = False\n return my_logger\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
948 |
6affc182f5d3353d46f6e9a21344bc85bf894165
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
# pylint: disable=dangerous-default-value,wrong-import-position,unused-import, import-outside-toplevel
def create_app(settings_override={}):
app = Flask(__name__)
app.config.from_object('zezin.settings.Configuration')
app.config.update(settings_override)
db.init_app(app)
from zezin.views import partners_routes
app.register_blueprint(blueprint=partners_routes)
return app
import zezin.models # isort:skip
|
[
"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\n\n\n# pylint: disable=dangerous-default-value,wrong-import-position,unused-import, import-outside-toplevel\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n\n db.init_app(app)\n\n from zezin.views import partners_routes\n\n app.register_blueprint(blueprint=partners_routes)\n\n return app\n\n\nimport zezin.models # isort:skip\n",
"from flask import Flask\nfrom flask_sqlalchemy import SQLAlchemy\ndb = SQLAlchemy()\n\n\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n db.init_app(app)\n from zezin.views import partners_routes\n app.register_blueprint(blueprint=partners_routes)\n return app\n\n\nimport zezin.models\n",
"<import token>\ndb = SQLAlchemy()\n\n\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n db.init_app(app)\n from zezin.views import partners_routes\n app.register_blueprint(blueprint=partners_routes)\n return app\n\n\n<import token>\n",
"<import token>\n<assignment token>\n\n\ndef create_app(settings_override={}):\n app = Flask(__name__)\n app.config.from_object('zezin.settings.Configuration')\n app.config.update(settings_override)\n db.init_app(app)\n from zezin.views import partners_routes\n app.register_blueprint(blueprint=partners_routes)\n return app\n\n\n<import token>\n",
"<import token>\n<assignment token>\n<function token>\n<import token>\n"
] | false |
949 |
9abf2b9b90d18332ede94cf1af778e0dda54330b
|
# Stubs for docutils.parsers.rst.directives.tables (Python 3.6)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
import csv
from docutils.statemachine import StringList
from docutils.nodes import Node, system_message, table, title
from docutils.parsers.rst import Directive
from typing import Any, Callable, Dict, List, Tuple, TypeVar
N_co = TypeVar('N_co', bound=Node, covariant=True)
__docformat__: str
def align(argument: str) -> str: ...
class Table(Directive):
optional_arguments: int = ...
final_argument_whitespace: bool = ...
option_spec: Dict[str, Callable[[str], Any]] = ...
has_content: bool = ...
def make_title(self) -> Tuple[title, List[system_message]]: ...
def process_header_option(self) -> Tuple[List[Node], int]: ...
def check_table_dimensions(self, rows: List[List[N_co]], header_rows: int, stub_columns: int) -> None: ...
def set_table_width(self, table_node: table) -> None: ...
@property
def widths(self) -> str: ...
def get_column_widths(self, max_cols: int) -> List[int]: ...
def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple[List[N_co], List[N_co]]) -> None: ...
class RSTTable(Table):
def run(self) -> List[Node]: ...
class CSVTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
class DocutilsDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
escapechar: str = ...
def __init__(self, options: Dict[str, Any]) -> None: ...
class HeaderDialect(csv.Dialect):
delimiter: str = ...
quotechar: str = ...
escapechar: str = ...
doublequote: bool = ...
skipinitialspace: bool = ...
strict: bool = ...
lineterminator: str = ...
quoting: Any = ...
def check_requirements(self) -> None: ...
def run(self) -> List[Node]: ...
def get_csv_data(self) -> Tuple[List[str], str]: ...
decode_from_csv: Callable[[str], str] = ...
encode_for_csv: Callable[[str], str] = ...
def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any, source: str) -> Tuple[List[Tuple[int, int, int, StringList]], int]: ...
class ListTable(Table):
option_spec: Dict[str, Callable[[str], Any]] = ...
def run(self) -> List[Node]: ...
def check_list_content(self, node: Node) -> Tuple[int, List[int]]: ...
def build_table_from_list(self, table_data: List[List[N_co]], col_widths: List[int], header_rows: int, stub_columns: int) -> table: ...
|
[
"# Stubs for docutils.parsers.rst.directives.tables (Python 3.6)\n#\n# NOTE: This dynamically typed stub was automatically generated by stubgen.\n\nimport csv\nfrom docutils.statemachine import StringList\nfrom docutils.nodes import Node, system_message, table, title\nfrom docutils.parsers.rst import Directive\nfrom typing import Any, Callable, Dict, List, Tuple, TypeVar\n\nN_co = TypeVar('N_co', bound=Node, covariant=True)\n\n__docformat__: str\n\ndef align(argument: str) -> str: ...\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n def make_title(self) -> Tuple[title, List[system_message]]: ...\n def process_header_option(self) -> Tuple[List[Node], int]: ...\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows: int, stub_columns: int) -> None: ...\n def set_table_width(self, table_node: table) -> None: ...\n @property\n def widths(self) -> str: ...\n def get_column_widths(self, max_cols: int) -> List[int]: ...\n def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple[List[N_co], List[N_co]]) -> None: ...\n\nclass RSTTable(Table):\n def run(self) -> List[Node]: ...\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n def __init__(self, options: Dict[str, Any]) -> None: ...\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n def check_requirements(self) -> None: ...\n def run(self) -> List[Node]: ...\n def get_csv_data(self) -> Tuple[List[str], str]: ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any, source: str) -> Tuple[List[Tuple[int, int, int, StringList]], int]: ...\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n def run(self) -> List[Node]: ...\n def check_list_content(self, node: Node) -> Tuple[int, List[int]]: ...\n def build_table_from_list(self, table_data: List[List[N_co]], col_widths: List[int], header_rows: int, stub_columns: int) -> table: ...\n",
"import csv\nfrom docutils.statemachine import StringList\nfrom docutils.nodes import Node, system_message, table, title\nfrom docutils.parsers.rst import Directive\nfrom typing import Any, Callable, Dict, List, Tuple, TypeVar\nN_co = TypeVar('N_co', bound=Node, covariant=True)\n__docformat__: str\n\n\ndef align(argument: str) ->str:\n ...\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n\n def set_table_width(self, table_node: table) ->None:\n ...\n\n @property\n def widths(self) ->str:\n ...\n\n def get_column_widths(self, max_cols: int) ->List[int]:\n ...\n\n def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple\n [List[N_co], List[N_co]]) ->None:\n ...\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\nN_co = TypeVar('N_co', bound=Node, covariant=True)\n__docformat__: str\n\n\ndef align(argument: str) ->str:\n ...\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n\n def set_table_width(self, table_node: table) ->None:\n ...\n\n @property\n def widths(self) ->str:\n ...\n\n def get_column_widths(self, max_cols: int) ->List[int]:\n ...\n\n def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple\n [List[N_co], List[N_co]]) ->None:\n ...\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n__docformat__: str\n\n\ndef align(argument: str) ->str:\n ...\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n\n def set_table_width(self, table_node: table) ->None:\n ...\n\n @property\n def widths(self) ->str:\n ...\n\n def get_column_widths(self, max_cols: int) ->List[int]:\n ...\n\n def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple\n [List[N_co], List[N_co]]) ->None:\n ...\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef align(argument: str) ->str:\n ...\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n\n def set_table_width(self, table_node: table) ->None:\n ...\n\n @property\n def widths(self) ->str:\n ...\n\n def get_column_widths(self, max_cols: int) ->List[int]:\n ...\n\n def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple\n [List[N_co], List[N_co]]) ->None:\n ...\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n\n def set_table_width(self, table_node: table) ->None:\n ...\n\n @property\n def widths(self) ->str:\n ...\n\n def get_column_widths(self, max_cols: int) ->List[int]:\n ...\n\n def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple\n [List[N_co], List[N_co]]) ->None:\n ...\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n\n def set_table_width(self, table_node: table) ->None:\n ...\n <function token>\n\n def get_column_widths(self, max_cols: int) ->List[int]:\n ...\n\n def extend_short_rows_with_empty_cells(self, columns: int, parts: Tuple\n [List[N_co], List[N_co]]) ->None:\n ...\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n\n def set_table_width(self, table_node: table) ->None:\n ...\n <function token>\n\n def get_column_widths(self, max_cols: int) ->List[int]:\n ...\n <function token>\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n <function token>\n <function token>\n\n def get_column_widths(self, max_cols: int) ->List[int]:\n ...\n <function token>\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n\n def make_title(self) ->Tuple[title, List[system_message]]:\n ...\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n <function token>\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n\n def check_table_dimensions(self, rows: List[List[N_co]], header_rows:\n int, stub_columns: int) ->None:\n ...\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n <function token>\n\n def process_header_option(self) ->Tuple[List[Node], int]:\n ...\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n\n\nclass Table(Directive):\n optional_arguments: int = ...\n final_argument_whitespace: bool = ...\n option_spec: Dict[str, Callable[[str], Any]] = ...\n has_content: bool = ...\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n\n\nclass RSTTable(Table):\n\n def run(self) ->List[Node]:\n ...\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n\n\nclass RSTTable(Table):\n <function token>\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n\n def parse_csv_data_into_rows(self, csv_data: List[str], dialect: Any,\n source: str) ->Tuple[List[Tuple[int, int, int, StringList]], int]:\n ...\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n\n def run(self) ->List[Node]:\n ...\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n <function token>\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n\n def check_requirements(self) ->None:\n ...\n <function token>\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n <function token>\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n <function token>\n <function token>\n\n def get_csv_data(self) ->Tuple[List[str], str]:\n ...\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n <function token>\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n\n\nclass CSVTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n\n class DocutilsDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n escapechar: str = ...\n\n def __init__(self, options: Dict[str, Any]) ->None:\n ...\n\n\n class HeaderDialect(csv.Dialect):\n delimiter: str = ...\n quotechar: str = ...\n escapechar: str = ...\n doublequote: bool = ...\n skipinitialspace: bool = ...\n strict: bool = ...\n lineterminator: str = ...\n quoting: Any = ...\n <function token>\n <function token>\n <function token>\n decode_from_csv: Callable[[str], str] = ...\n encode_for_csv: Callable[[str], str] = ...\n <function token>\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n\n def build_table_from_list(self, table_data: List[List[N_co]],\n col_widths: List[int], header_rows: int, stub_columns: int) ->table:\n ...\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n\n def check_list_content(self, node: Node) ->Tuple[int, List[int]]:\n ...\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n\n def run(self) ->List[Node]:\n ...\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass ListTable(Table):\n option_spec: Dict[str, Callable[[str], Any]] = ...\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
950 |
94e8f0532da76c803b23fe2217b07dc8cf285710
|
# -*- coding: utf-8 -*-
"""
Created on Mon May 27 17:38:50 2019
@author: User
"""
import numpy as np
import pandas as pd
dataset = pd.read_csv('University_data.csv')
print(dataset.info())
features = dataset.iloc[:, :-1].values
labels = dataset.iloc[:, -1:].values
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
features[:, 0] = labelencoder.fit_transform(features[:, 0])
from sklearn.preprocessing import OneHotEncoder
onehotencoder = OneHotEncoder(categorical_features = [0])
features = onehotencoder.fit_transform(features).toarray()
features = features[:, 1:]
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(features, labels)
x = ["Cabrini",337,1.5,2.3,9.0,0]
x = np.array(x).reshape(1,-1)
x[:,0] = labelencoder.transform(x[:,0])
x = onehotencoder.transform(x).toarray()
x = x[:,1:]
regressor.predict(x)
|
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 27 17:38:50 2019\n\n@author: User\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\ndataset = pd.read_csv('University_data.csv') \nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values \nlabels = dataset.iloc[:, -1:].values \nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\nfrom sklearn.preprocessing import OneHotEncoder\nonehotencoder = OneHotEncoder(categorical_features = [0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\n\n\nfrom sklearn.linear_model import LinearRegression \nregressor = LinearRegression() \nregressor.fit(features, labels)\n\n\n\nx = [\"Cabrini\",337,1.5,2.3,9.0,0]\nx = np.array(x).reshape(1,-1)\nx[:,0] = labelencoder.transform(x[:,0])\nx = onehotencoder.transform(x).toarray()\nx = x[:,1:]\nregressor.predict(x)",
"<docstring token>\nimport numpy as np\nimport pandas as pd\ndataset = pd.read_csv('University_data.csv')\nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values\nlabels = dataset.iloc[:, -1:].values\nfrom sklearn.preprocessing import LabelEncoder\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\nfrom sklearn.preprocessing import OneHotEncoder\nonehotencoder = OneHotEncoder(categorical_features=[0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\nfrom sklearn.linear_model import LinearRegression\nregressor = LinearRegression()\nregressor.fit(features, labels)\nx = ['Cabrini', 337, 1.5, 2.3, 9.0, 0]\nx = np.array(x).reshape(1, -1)\nx[:, 0] = labelencoder.transform(x[:, 0])\nx = onehotencoder.transform(x).toarray()\nx = x[:, 1:]\nregressor.predict(x)\n",
"<docstring token>\n<import token>\ndataset = pd.read_csv('University_data.csv')\nprint(dataset.info())\nfeatures = dataset.iloc[:, :-1].values\nlabels = dataset.iloc[:, -1:].values\n<import token>\nlabelencoder = LabelEncoder()\nfeatures[:, 0] = labelencoder.fit_transform(features[:, 0])\n<import token>\nonehotencoder = OneHotEncoder(categorical_features=[0])\nfeatures = onehotencoder.fit_transform(features).toarray()\nfeatures = features[:, 1:]\n<import token>\nregressor = LinearRegression()\nregressor.fit(features, labels)\nx = ['Cabrini', 337, 1.5, 2.3, 9.0, 0]\nx = np.array(x).reshape(1, -1)\nx[:, 0] = labelencoder.transform(x[:, 0])\nx = onehotencoder.transform(x).toarray()\nx = x[:, 1:]\nregressor.predict(x)\n",
"<docstring token>\n<import token>\n<assignment token>\nprint(dataset.info())\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\nregressor.fit(features, labels)\n<assignment token>\nregressor.predict(x)\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
951 |
f5d353694a719472320f4d6fa28bc9d2cc5a69b0
|
# -*- coding: utf-8 -*-
"""
This is the very first A.I. in this series.
The vision is to devlop 'protocol droid' to talk to, to help with tasks, and with whom to play games.
The droid will be able to translate langages and connect ppl.
"""
import speech_recognition as sr
import pyttsx3
import pywhatkit
import datetime
import wikipedia
import dadjokes
listener = sr.Recognizer()
engine = pyttsx3.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[1].id)
engine.say("hey, My name is 'lisa, human cyborg relations. Please see the console for what I can do for you.")
#engine.say("hey, .")
engine.runAndWait()
print("I can play videos (Lisa, play....),\n teach (Lisa, teach me about...),\n tell you more (Lisa, tell me more about...),\n tell time (Lisa, what time is it),\n and tell jokes (Lisa, tell me a joke...).")
def talk(text):
engine.say("heyo"+text)
engine.runAndWait()
def take_command():
try:
with sr.Microphone() as source:
print('listening....')
voice = listener.listen(source)
command = listener.recognize_google(voice)
command = command.lower()
if 'lisa' in command:
command = command.replace('lisa','')
except:
print("something went wrong")
return command
def run_lisa():
command = take_command()
if 'play' in command:
song = command.replace('play','')
talk('hey playing' + song)
print('playing...'+ song)
pywhatkit.playonyt(song)
elif 'time' in command:
#needs a more natural way of expressing time
#i would like mil time
time = datetime.datetime.now().strftime('%H %M')
talk('Right now it is '+time)
elif "teach me about" in command:
info = command.replace('teach me about','')
teach = wikipedia.summary(info,2)
print(teach)
talk(teach)
elif "tell me more about" in command:
info = command.replace('tell me more about','')
teach = wikipedia.summary(info,6)
print(teach)
talk(teach)
elif "joke" in command:
talk(dadjokes.joke())
elif "good one" in command:
talk("yeah thanks! I'll be here all week folks!")
while True:
run_lisa()
|
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nThis is the very first A.I. in this series. \r\nThe vision is to devlop 'protocol droid' to talk to, to help with tasks, and with whom to play games.\r\nThe droid will be able to translate langages and connect ppl.\r\n\r\n\r\n\"\"\"\r\n\r\nimport speech_recognition as sr\r\nimport pyttsx3\r\nimport pywhatkit\r\nimport datetime\r\nimport wikipedia\r\nimport dadjokes\r\n\r\n\r\nlistener = sr.Recognizer()\r\nengine = pyttsx3.init()\r\nvoices = engine.getProperty('voices')\r\nengine.setProperty('voice', voices[1].id)\r\nengine.say(\"hey, My name is 'lisa, human cyborg relations. Please see the console for what I can do for you.\")\r\n#engine.say(\"hey, .\")\r\nengine.runAndWait()\r\nprint(\"I can play videos (Lisa, play....),\\n teach (Lisa, teach me about...),\\n tell you more (Lisa, tell me more about...),\\n tell time (Lisa, what time is it),\\n and tell jokes (Lisa, tell me a joke...).\")\r\n\r\ndef talk(text):\r\n engine.say(\"heyo\"+text)\r\n engine.runAndWait()\r\n\r\n\r\n\r\ndef take_command():\r\n\r\n try:\r\n with sr.Microphone() as source:\r\n print('listening....')\r\n voice = listener.listen(source)\r\n command = listener.recognize_google(voice)\r\n command = command.lower()\r\n if 'lisa' in command:\r\n command = command.replace('lisa','')\r\n \r\n \r\n \r\n \r\n \r\n except:\r\n print(\"something went wrong\")\r\n \r\n \r\n return command\r\ndef run_lisa():\r\n command = take_command()\r\n if 'play' in command:\r\n song = command.replace('play','')\r\n talk('hey playing' + song)\r\n print('playing...'+ song)\r\n pywhatkit.playonyt(song)\r\n elif 'time' in command:\r\n #needs a more natural way of expressing time\r\n #i would like mil time\r\n time = datetime.datetime.now().strftime('%H %M')\r\n talk('Right now it is '+time)\r\n elif \"teach me about\" in command:\r\n info = command.replace('teach me about','')\r\n teach = wikipedia.summary(info,2)\r\n print(teach)\r\n talk(teach)\r\n elif \"tell me more about\" in command:\r\n info = command.replace('tell me more about','')\r\n teach = wikipedia.summary(info,6)\r\n print(teach)\r\n talk(teach)\r\n elif \"joke\" in command:\r\n talk(dadjokes.joke())\r\n elif \"good one\" in command:\r\n talk(\"yeah thanks! I'll be here all week folks!\")\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\nwhile True: \r\n run_lisa()\r\n ",
"<docstring token>\nimport speech_recognition as sr\nimport pyttsx3\nimport pywhatkit\nimport datetime\nimport wikipedia\nimport dadjokes\nlistener = sr.Recognizer()\nengine = pyttsx3.init()\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[1].id)\nengine.say(\n \"hey, My name is 'lisa, human cyborg relations. Please see the console for what I can do for you.\"\n )\nengine.runAndWait()\nprint(\n \"\"\"I can play videos (Lisa, play....),\n teach (Lisa, teach me about...),\n tell you more (Lisa, tell me more about...),\n tell time (Lisa, what time is it),\n and tell jokes (Lisa, tell me a joke...).\"\"\"\n )\n\n\ndef talk(text):\n engine.say('heyo' + text)\n engine.runAndWait()\n\n\ndef take_command():\n try:\n with sr.Microphone() as source:\n print('listening....')\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n command = command.lower()\n if 'lisa' in command:\n command = command.replace('lisa', '')\n except:\n print('something went wrong')\n return command\n\n\ndef run_lisa():\n command = take_command()\n if 'play' in command:\n song = command.replace('play', '')\n talk('hey playing' + song)\n print('playing...' + song)\n pywhatkit.playonyt(song)\n elif 'time' in command:\n time = datetime.datetime.now().strftime('%H %M')\n talk('Right now it is ' + time)\n elif 'teach me about' in command:\n info = command.replace('teach me about', '')\n teach = wikipedia.summary(info, 2)\n print(teach)\n talk(teach)\n elif 'tell me more about' in command:\n info = command.replace('tell me more about', '')\n teach = wikipedia.summary(info, 6)\n print(teach)\n talk(teach)\n elif 'joke' in command:\n talk(dadjokes.joke())\n elif 'good one' in command:\n talk(\"yeah thanks! I'll be here all week folks!\")\n\n\nwhile True:\n run_lisa()\n",
"<docstring token>\n<import token>\nlistener = sr.Recognizer()\nengine = pyttsx3.init()\nvoices = engine.getProperty('voices')\nengine.setProperty('voice', voices[1].id)\nengine.say(\n \"hey, My name is 'lisa, human cyborg relations. Please see the console for what I can do for you.\"\n )\nengine.runAndWait()\nprint(\n \"\"\"I can play videos (Lisa, play....),\n teach (Lisa, teach me about...),\n tell you more (Lisa, tell me more about...),\n tell time (Lisa, what time is it),\n and tell jokes (Lisa, tell me a joke...).\"\"\"\n )\n\n\ndef talk(text):\n engine.say('heyo' + text)\n engine.runAndWait()\n\n\ndef take_command():\n try:\n with sr.Microphone() as source:\n print('listening....')\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n command = command.lower()\n if 'lisa' in command:\n command = command.replace('lisa', '')\n except:\n print('something went wrong')\n return command\n\n\ndef run_lisa():\n command = take_command()\n if 'play' in command:\n song = command.replace('play', '')\n talk('hey playing' + song)\n print('playing...' + song)\n pywhatkit.playonyt(song)\n elif 'time' in command:\n time = datetime.datetime.now().strftime('%H %M')\n talk('Right now it is ' + time)\n elif 'teach me about' in command:\n info = command.replace('teach me about', '')\n teach = wikipedia.summary(info, 2)\n print(teach)\n talk(teach)\n elif 'tell me more about' in command:\n info = command.replace('tell me more about', '')\n teach = wikipedia.summary(info, 6)\n print(teach)\n talk(teach)\n elif 'joke' in command:\n talk(dadjokes.joke())\n elif 'good one' in command:\n talk(\"yeah thanks! I'll be here all week folks!\")\n\n\nwhile True:\n run_lisa()\n",
"<docstring token>\n<import token>\n<assignment token>\nengine.setProperty('voice', voices[1].id)\nengine.say(\n \"hey, My name is 'lisa, human cyborg relations. Please see the console for what I can do for you.\"\n )\nengine.runAndWait()\nprint(\n \"\"\"I can play videos (Lisa, play....),\n teach (Lisa, teach me about...),\n tell you more (Lisa, tell me more about...),\n tell time (Lisa, what time is it),\n and tell jokes (Lisa, tell me a joke...).\"\"\"\n )\n\n\ndef talk(text):\n engine.say('heyo' + text)\n engine.runAndWait()\n\n\ndef take_command():\n try:\n with sr.Microphone() as source:\n print('listening....')\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n command = command.lower()\n if 'lisa' in command:\n command = command.replace('lisa', '')\n except:\n print('something went wrong')\n return command\n\n\ndef run_lisa():\n command = take_command()\n if 'play' in command:\n song = command.replace('play', '')\n talk('hey playing' + song)\n print('playing...' + song)\n pywhatkit.playonyt(song)\n elif 'time' in command:\n time = datetime.datetime.now().strftime('%H %M')\n talk('Right now it is ' + time)\n elif 'teach me about' in command:\n info = command.replace('teach me about', '')\n teach = wikipedia.summary(info, 2)\n print(teach)\n talk(teach)\n elif 'tell me more about' in command:\n info = command.replace('tell me more about', '')\n teach = wikipedia.summary(info, 6)\n print(teach)\n talk(teach)\n elif 'joke' in command:\n talk(dadjokes.joke())\n elif 'good one' in command:\n talk(\"yeah thanks! I'll be here all week folks!\")\n\n\nwhile True:\n run_lisa()\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n\n\ndef talk(text):\n engine.say('heyo' + text)\n engine.runAndWait()\n\n\ndef take_command():\n try:\n with sr.Microphone() as source:\n print('listening....')\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n command = command.lower()\n if 'lisa' in command:\n command = command.replace('lisa', '')\n except:\n print('something went wrong')\n return command\n\n\ndef run_lisa():\n command = take_command()\n if 'play' in command:\n song = command.replace('play', '')\n talk('hey playing' + song)\n print('playing...' + song)\n pywhatkit.playonyt(song)\n elif 'time' in command:\n time = datetime.datetime.now().strftime('%H %M')\n talk('Right now it is ' + time)\n elif 'teach me about' in command:\n info = command.replace('teach me about', '')\n teach = wikipedia.summary(info, 2)\n print(teach)\n talk(teach)\n elif 'tell me more about' in command:\n info = command.replace('tell me more about', '')\n teach = wikipedia.summary(info, 6)\n print(teach)\n talk(teach)\n elif 'joke' in command:\n talk(dadjokes.joke())\n elif 'good one' in command:\n talk(\"yeah thanks! I'll be here all week folks!\")\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef take_command():\n try:\n with sr.Microphone() as source:\n print('listening....')\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n command = command.lower()\n if 'lisa' in command:\n command = command.replace('lisa', '')\n except:\n print('something went wrong')\n return command\n\n\ndef run_lisa():\n command = take_command()\n if 'play' in command:\n song = command.replace('play', '')\n talk('hey playing' + song)\n print('playing...' + song)\n pywhatkit.playonyt(song)\n elif 'time' in command:\n time = datetime.datetime.now().strftime('%H %M')\n talk('Right now it is ' + time)\n elif 'teach me about' in command:\n info = command.replace('teach me about', '')\n teach = wikipedia.summary(info, 2)\n print(teach)\n talk(teach)\n elif 'tell me more about' in command:\n info = command.replace('tell me more about', '')\n teach = wikipedia.summary(info, 6)\n print(teach)\n talk(teach)\n elif 'joke' in command:\n talk(dadjokes.joke())\n elif 'good one' in command:\n talk(\"yeah thanks! I'll be here all week folks!\")\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<function token>\n\n\ndef take_command():\n try:\n with sr.Microphone() as source:\n print('listening....')\n voice = listener.listen(source)\n command = listener.recognize_google(voice)\n command = command.lower()\n if 'lisa' in command:\n command = command.replace('lisa', '')\n except:\n print('something went wrong')\n return command\n\n\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
952 |
27a12a0f5ea6120036b66ee1cdd903da868a037f
|
# coding=utf-8
import base64
from sandcrawler.scraper import ScraperBase, SimpleScraperBase
class Hdmovie14Ag(SimpleScraperBase):
BASE_URL = 'http://www1.solarmovie.net'
OTHER_URLS = ['http://solarmovie.net', 'http://hdmovie14.ag']
SCRAPER_TYPES = [ ScraperBase.SCRAPER_TYPE_OSP, ]
LANGUAGE = 'eng'
MEDIA_TYPES = [ ScraperBase.MEDIA_TYPE_FILM, ScraperBase.MEDIA_TYPE_TV, ]
URL_TYPES = [ScraperBase.URL_TYPE_SEARCH, ScraperBase.URL_TYPE_LISTING, ]
def _fetch_search_url(self, search_term, media_type):
return '{base_url}/search-movies/{search_term}.html'.format(base_url=self.BASE_URL, search_term=search_term)
def _fetch_no_results_text(self):
return None
def _fetch_next_button(self, soup):
next_button = soup.find('a', text=u'»')
if next_button:
return next_button.href
return None
def _parse_search_result_page(self, soup):
found=0
for result in soup.select('div.ml-item'):
link = result.select_one('a')
self.submit_search_result(
link_url=link.href,
link_title=link.text,
image=self.util.find_image_src_or_none(result, 'img'),
)
found=1
if not found:
return self.submit_search_no_results()
def _parse_parse_page(self, soup):
index_page_title = self.util.get_page_title(soup)
series_season = series_episode = None
title = soup.select_one('h1')
if title and title.text:
series_season, series_episode = self.util.extract_season_episode(title.text)
for results in soup.select('div.server_line'):
try:
movie_link = self.make_soup(base64.decodestring(self.get_soup(results.select_one('a').href).
select_one('div#media-player script').text.split('("')[-1].
split('")')[0])).select_one('iframe')['src']
except AttributeError:
movie_link = self.get_soup(results.select_one('a').href).select_one('div#media-player a')['href']
self.submit_parse_result(
index_page_title=index_page_title,
link_url=movie_link,
link_title=movie_link,
series_season=series_season,
series_episode=series_episode,
)
|
[
"# coding=utf-8\nimport base64\nfrom sandcrawler.scraper import ScraperBase, SimpleScraperBase\n\nclass Hdmovie14Ag(SimpleScraperBase):\n BASE_URL = 'http://www1.solarmovie.net'\n OTHER_URLS = ['http://solarmovie.net', 'http://hdmovie14.ag']\n SCRAPER_TYPES = [ ScraperBase.SCRAPER_TYPE_OSP, ]\n LANGUAGE = 'eng'\n MEDIA_TYPES = [ ScraperBase.MEDIA_TYPE_FILM, ScraperBase.MEDIA_TYPE_TV, ]\n\n URL_TYPES = [ScraperBase.URL_TYPE_SEARCH, ScraperBase.URL_TYPE_LISTING, ]\n\n def _fetch_search_url(self, search_term, media_type):\n return '{base_url}/search-movies/{search_term}.html'.format(base_url=self.BASE_URL, search_term=search_term)\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n\n def _parse_search_result_page(self, soup):\n found=0\n for result in soup.select('div.ml-item'):\n link = result.select_one('a')\n self.submit_search_result(\n link_url=link.href,\n link_title=link.text,\n image=self.util.find_image_src_or_none(result, 'img'),\n )\n found=1\n if not found:\n return self.submit_search_no_results()\n\n def _parse_parse_page(self, soup):\n index_page_title = self.util.get_page_title(soup)\n series_season = series_episode = None\n title = soup.select_one('h1')\n if title and title.text:\n series_season, series_episode = self.util.extract_season_episode(title.text)\n for results in soup.select('div.server_line'):\n try:\n movie_link = self.make_soup(base64.decodestring(self.get_soup(results.select_one('a').href).\n select_one('div#media-player script').text.split('(\"')[-1].\n split('\")')[0])).select_one('iframe')['src']\n except AttributeError:\n movie_link = self.get_soup(results.select_one('a').href).select_one('div#media-player a')['href']\n self.submit_parse_result(\n index_page_title=index_page_title,\n link_url=movie_link,\n link_title=movie_link,\n series_season=series_season,\n series_episode=series_episode,\n )\n",
"import base64\nfrom sandcrawler.scraper import ScraperBase, SimpleScraperBase\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n BASE_URL = 'http://www1.solarmovie.net'\n OTHER_URLS = ['http://solarmovie.net', 'http://hdmovie14.ag']\n SCRAPER_TYPES = [ScraperBase.SCRAPER_TYPE_OSP]\n LANGUAGE = 'eng'\n MEDIA_TYPES = [ScraperBase.MEDIA_TYPE_FILM, ScraperBase.MEDIA_TYPE_TV]\n URL_TYPES = [ScraperBase.URL_TYPE_SEARCH, ScraperBase.URL_TYPE_LISTING]\n\n def _fetch_search_url(self, search_term, media_type):\n return '{base_url}/search-movies/{search_term}.html'.format(base_url\n =self.BASE_URL, search_term=search_term)\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n\n def _parse_search_result_page(self, soup):\n found = 0\n for result in soup.select('div.ml-item'):\n link = result.select_one('a')\n self.submit_search_result(link_url=link.href, link_title=link.\n text, image=self.util.find_image_src_or_none(result, 'img'))\n found = 1\n if not found:\n return self.submit_search_no_results()\n\n def _parse_parse_page(self, soup):\n index_page_title = self.util.get_page_title(soup)\n series_season = series_episode = None\n title = soup.select_one('h1')\n if title and title.text:\n series_season, series_episode = self.util.extract_season_episode(\n title.text)\n for results in soup.select('div.server_line'):\n try:\n movie_link = self.make_soup(base64.decodestring(self.\n get_soup(results.select_one('a').href).select_one(\n 'div#media-player script').text.split('(\"')[-1].split(\n '\")')[0])).select_one('iframe')['src']\n except AttributeError:\n movie_link = self.get_soup(results.select_one('a').href\n ).select_one('div#media-player a')['href']\n self.submit_parse_result(index_page_title=index_page_title,\n link_url=movie_link, link_title=movie_link, series_season=\n series_season, series_episode=series_episode)\n",
"<import token>\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n BASE_URL = 'http://www1.solarmovie.net'\n OTHER_URLS = ['http://solarmovie.net', 'http://hdmovie14.ag']\n SCRAPER_TYPES = [ScraperBase.SCRAPER_TYPE_OSP]\n LANGUAGE = 'eng'\n MEDIA_TYPES = [ScraperBase.MEDIA_TYPE_FILM, ScraperBase.MEDIA_TYPE_TV]\n URL_TYPES = [ScraperBase.URL_TYPE_SEARCH, ScraperBase.URL_TYPE_LISTING]\n\n def _fetch_search_url(self, search_term, media_type):\n return '{base_url}/search-movies/{search_term}.html'.format(base_url\n =self.BASE_URL, search_term=search_term)\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n\n def _parse_search_result_page(self, soup):\n found = 0\n for result in soup.select('div.ml-item'):\n link = result.select_one('a')\n self.submit_search_result(link_url=link.href, link_title=link.\n text, image=self.util.find_image_src_or_none(result, 'img'))\n found = 1\n if not found:\n return self.submit_search_no_results()\n\n def _parse_parse_page(self, soup):\n index_page_title = self.util.get_page_title(soup)\n series_season = series_episode = None\n title = soup.select_one('h1')\n if title and title.text:\n series_season, series_episode = self.util.extract_season_episode(\n title.text)\n for results in soup.select('div.server_line'):\n try:\n movie_link = self.make_soup(base64.decodestring(self.\n get_soup(results.select_one('a').href).select_one(\n 'div#media-player script').text.split('(\"')[-1].split(\n '\")')[0])).select_one('iframe')['src']\n except AttributeError:\n movie_link = self.get_soup(results.select_one('a').href\n ).select_one('div#media-player a')['href']\n self.submit_parse_result(index_page_title=index_page_title,\n link_url=movie_link, link_title=movie_link, series_season=\n series_season, series_episode=series_episode)\n",
"<import token>\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def _fetch_search_url(self, search_term, media_type):\n return '{base_url}/search-movies/{search_term}.html'.format(base_url\n =self.BASE_URL, search_term=search_term)\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n\n def _parse_search_result_page(self, soup):\n found = 0\n for result in soup.select('div.ml-item'):\n link = result.select_one('a')\n self.submit_search_result(link_url=link.href, link_title=link.\n text, image=self.util.find_image_src_or_none(result, 'img'))\n found = 1\n if not found:\n return self.submit_search_no_results()\n\n def _parse_parse_page(self, soup):\n index_page_title = self.util.get_page_title(soup)\n series_season = series_episode = None\n title = soup.select_one('h1')\n if title and title.text:\n series_season, series_episode = self.util.extract_season_episode(\n title.text)\n for results in soup.select('div.server_line'):\n try:\n movie_link = self.make_soup(base64.decodestring(self.\n get_soup(results.select_one('a').href).select_one(\n 'div#media-player script').text.split('(\"')[-1].split(\n '\")')[0])).select_one('iframe')['src']\n except AttributeError:\n movie_link = self.get_soup(results.select_one('a').href\n ).select_one('div#media-player a')['href']\n self.submit_parse_result(index_page_title=index_page_title,\n link_url=movie_link, link_title=movie_link, series_season=\n series_season, series_episode=series_episode)\n",
"<import token>\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n\n def _parse_search_result_page(self, soup):\n found = 0\n for result in soup.select('div.ml-item'):\n link = result.select_one('a')\n self.submit_search_result(link_url=link.href, link_title=link.\n text, image=self.util.find_image_src_or_none(result, 'img'))\n found = 1\n if not found:\n return self.submit_search_no_results()\n\n def _parse_parse_page(self, soup):\n index_page_title = self.util.get_page_title(soup)\n series_season = series_episode = None\n title = soup.select_one('h1')\n if title and title.text:\n series_season, series_episode = self.util.extract_season_episode(\n title.text)\n for results in soup.select('div.server_line'):\n try:\n movie_link = self.make_soup(base64.decodestring(self.\n get_soup(results.select_one('a').href).select_one(\n 'div#media-player script').text.split('(\"')[-1].split(\n '\")')[0])).select_one('iframe')['src']\n except AttributeError:\n movie_link = self.get_soup(results.select_one('a').href\n ).select_one('div#media-player a')['href']\n self.submit_parse_result(index_page_title=index_page_title,\n link_url=movie_link, link_title=movie_link, series_season=\n series_season, series_episode=series_episode)\n",
"<import token>\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n\n def _parse_search_result_page(self, soup):\n found = 0\n for result in soup.select('div.ml-item'):\n link = result.select_one('a')\n self.submit_search_result(link_url=link.href, link_title=link.\n text, image=self.util.find_image_src_or_none(result, 'img'))\n found = 1\n if not found:\n return self.submit_search_no_results()\n <function token>\n",
"<import token>\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n def _fetch_no_results_text(self):\n return None\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n <function token>\n <function token>\n",
"<import token>\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n\n def _fetch_next_button(self, soup):\n next_button = soup.find('a', text=u'»')\n if next_button:\n return next_button.href\n return None\n <function token>\n <function token>\n",
"<import token>\n\n\nclass Hdmovie14Ag(SimpleScraperBase):\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
953 |
aba2a0a262c14f286c278f21ba42871410c174f0
|
from django.shortcuts import render
from django.shortcuts import redirect
# Create your views here.
from .forms import AddBookForm ,UpdateBookForm,BookCreateModelForm,SearchForm,RegistrationForm,SignInForm
from book.models import Books
from django.contrib.auth import authenticate,login,logout
def book_add(request):
if request.user.is_authenticated:
context = {}
if request.method == "GET":
form = BookCreateModelForm()
context["form"] = form
return render(request, "addbook.html", context)
elif request.method == "POST":
context = {}
form = BookCreateModelForm(request.POST)
if form.is_valid():
form.save()
# context["form"] = form
# book_name = form.cleaned_data["book_name"]
# author= form.cleaned_data["author"]
# category=form.cleaned_data["category"]
# prices=form.cleaned_data["price"]
# copies=form.cleaned_data["number_copies"]
# print(book_name,author,category,prices,copies)
# book=Books(book_name=book_name,author=author,category=category,price=prices,copies=copies)
# book.save()
return redirect("index")
else:
return render(request, "addbook.html",context)
else:
return redirect('singn')
def get_books(request):
if request.user.is_authenticated:
form=SearchForm()
context = {}
books=Books.objects.all()
context["books"]=books
context['form']=form
if request.method=="POST":
form=SearchForm(request.POST)
if form.is_valid():
book_name=form.cleaned_data["book_name"]
books=Books.objects.filter(book_name__contains=book_name)
context['books']=books
return render(request,"book_list.html",context)
else:
context['form']=form
return render(request, "book_list.html", context)
return render(request, "book_list.html", context)
else:
return redirect('singn')
def book_details(request,id):
if request.user.is_authenticated:
book=Books.objects.get(id=id)
context = {}
context["book"]=book
return render(request,"book_details.html",context)
else:
return redirect('singn')
def remove_book(request,id):
if request.user.is_authenticated:
book=Books.objects.get(id=id)
book.delete()
return redirect("books")
else:
return redirect('singn')
def update_book(request,id):
if request.user.is_authenticated:
book = Books.objects.get(id=id)
form=BookCreateModelForm(instance=book)
# form=BookCreateModelForm(initial={
# "book_name":book.book_name,
# "author":book.author,
# "category":book.category,
# "price":book.price,
# "number_copies":book.copies})
context = {}
context['form']=form
if request.method=="POST":
book = Books.objects.get(id=id)
form=BookCreateModelForm(instance=book,data=request.POST)
if form.is_valid():
form.save()
# form=BookCreateModelForm(request.POST)
#
# if form.is_valid():
# book.book_name=form.cleaned_data["book_name"]
# book.author=form.cleaned_data["author"]
# book.category=form.cleaned_data["category"]
# book.price=form.cleaned_data["price"]
# book.copies=form.cleaned_data["number_copies"]
# book.save()
return redirect("books")
else:
form=BookCreateModelForm(request.POST)
context["form"]=form
print(form)
return render(request, "edit.html", context)
return render(request,"edit.html",context)
else:
return redirect('singn')
def create_account(request):
form=RegistrationForm()
context={'form':form}
if request.method=="POST":
form=RegistrationForm(request.POST)
if form.is_valid():
form.save()
print("account created")
return redirect("singn")
else:
context["form"]=form
return render(request, "createaccount.html", context)
return render(request,"createaccount.html",context)
def singn_in(request):
form=SignInForm()
context={'form':form}
if request.method=="POST":
form=SignInForm(request.POST)
if form.is_valid():
username=form.cleaned_data["username"]
password=form.cleaned_data["password"]
user=authenticate(request,username=username,password=password)
if user:
login(request,user)
return redirect("index")
else:
context['form']=form
return render(request, "signin.html", context)
return render(request,"signin.html",context)
def signout(request):
if request.user.is_authenticated:
logout(request)
return redirect("singn")
else:
return redirect('singn')
|
[
"from django.shortcuts import render\nfrom django.shortcuts import redirect\n\n\n\n# Create your views here.\nfrom .forms import AddBookForm ,UpdateBookForm,BookCreateModelForm,SearchForm,RegistrationForm,SignInForm\nfrom book.models import Books\nfrom django.contrib.auth import authenticate,login,logout\n\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == \"GET\":\n form = BookCreateModelForm()\n context[\"form\"] = form\n return render(request, \"addbook.html\", context)\n elif request.method == \"POST\":\n context = {}\n form = BookCreateModelForm(request.POST)\n \n if form.is_valid():\n form.save()\n # context[\"form\"] = form\n # book_name = form.cleaned_data[\"book_name\"]\n # author= form.cleaned_data[\"author\"]\n # category=form.cleaned_data[\"category\"]\n # prices=form.cleaned_data[\"price\"]\n # copies=form.cleaned_data[\"number_copies\"]\n # print(book_name,author,category,prices,copies)\n # book=Books(book_name=book_name,author=author,category=category,price=prices,copies=copies)\n # book.save()\n return redirect(\"index\")\n else:\n return render(request, \"addbook.html\",context)\n else:\n return redirect('singn')\n\ndef get_books(request):\n if request.user.is_authenticated:\n form=SearchForm()\n context = {}\n books=Books.objects.all()\n context[\"books\"]=books\n context['form']=form\n if request.method==\"POST\":\n form=SearchForm(request.POST)\n if form.is_valid():\n book_name=form.cleaned_data[\"book_name\"]\n books=Books.objects.filter(book_name__contains=book_name)\n context['books']=books\n return render(request,\"book_list.html\",context)\n else:\n context['form']=form\n return render(request, \"book_list.html\", context)\n return render(request, \"book_list.html\", context)\n else:\n return redirect('singn')\n\ndef book_details(request,id):\n if request.user.is_authenticated:\n book=Books.objects.get(id=id)\n context = {}\n context[\"book\"]=book\n return render(request,\"book_details.html\",context)\n else:\n return redirect('singn')\n\n\ndef remove_book(request,id):\n if request.user.is_authenticated:\n book=Books.objects.get(id=id)\n book.delete()\n return redirect(\"books\")\n else:\n return redirect('singn')\ndef update_book(request,id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n form=BookCreateModelForm(instance=book)\n\n # form=BookCreateModelForm(initial={\n # \"book_name\":book.book_name,\n # \"author\":book.author,\n # \"category\":book.category,\n # \"price\":book.price,\n # \"number_copies\":book.copies})\n context = {}\n context['form']=form\n if request.method==\"POST\":\n book = Books.objects.get(id=id)\n form=BookCreateModelForm(instance=book,data=request.POST)\n if form.is_valid():\n form.save()\n # form=BookCreateModelForm(request.POST)\n #\n # if form.is_valid():\n # book.book_name=form.cleaned_data[\"book_name\"]\n # book.author=form.cleaned_data[\"author\"]\n # book.category=form.cleaned_data[\"category\"]\n # book.price=form.cleaned_data[\"price\"]\n # book.copies=form.cleaned_data[\"number_copies\"]\n # book.save()\n return redirect(\"books\")\n else:\n form=BookCreateModelForm(request.POST)\n context[\"form\"]=form\n print(form)\n return render(request, \"edit.html\", context)\n return render(request,\"edit.html\",context)\n else:\n return redirect('singn')\n\n\ndef create_account(request):\n form=RegistrationForm()\n context={'form':form}\n if request.method==\"POST\":\n form=RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print(\"account created\")\n return redirect(\"singn\")\n else:\n context[\"form\"]=form\n return render(request, \"createaccount.html\", context)\n\n return render(request,\"createaccount.html\",context)\n\n\ndef singn_in(request):\n form=SignInForm()\n context={'form':form}\n if request.method==\"POST\":\n form=SignInForm(request.POST)\n if form.is_valid():\n username=form.cleaned_data[\"username\"]\n password=form.cleaned_data[\"password\"]\n user=authenticate(request,username=username,password=password)\n if user:\n login(request,user)\n return redirect(\"index\")\n else:\n context['form']=form\n return render(request, \"signin.html\", context)\n\n\n \n return render(request,\"signin.html\",context)\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect(\"singn\")\n else:\n return redirect('singn')\n\n\n\n\n\n\n\n\n\n\n",
"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom .forms import AddBookForm, UpdateBookForm, BookCreateModelForm, SearchForm, RegistrationForm, SignInForm\nfrom book.models import Books\nfrom django.contrib.auth import authenticate, login, logout\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == 'GET':\n form = BookCreateModelForm()\n context['form'] = form\n return render(request, 'addbook.html', context)\n elif request.method == 'POST':\n context = {}\n form = BookCreateModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n return render(request, 'addbook.html', context)\n else:\n return redirect('singn')\n\n\ndef get_books(request):\n if request.user.is_authenticated:\n form = SearchForm()\n context = {}\n books = Books.objects.all()\n context['books'] = books\n context['form'] = form\n if request.method == 'POST':\n form = SearchForm(request.POST)\n if form.is_valid():\n book_name = form.cleaned_data['book_name']\n books = Books.objects.filter(book_name__contains=book_name)\n context['books'] = books\n return render(request, 'book_list.html', context)\n else:\n context['form'] = form\n return render(request, 'book_list.html', context)\n return render(request, 'book_list.html', context)\n else:\n return redirect('singn')\n\n\ndef book_details(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n context = {}\n context['book'] = book\n return render(request, 'book_details.html', context)\n else:\n return redirect('singn')\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\ndef update_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n form = BookCreateModelForm(instance=book)\n context = {}\n context['form'] = form\n if request.method == 'POST':\n book = Books.objects.get(id=id)\n form = BookCreateModelForm(instance=book, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('books')\n else:\n form = BookCreateModelForm(request.POST)\n context['form'] = form\n print(form)\n return render(request, 'edit.html', context)\n return render(request, 'edit.html', context)\n else:\n return redirect('singn')\n\n\ndef create_account(request):\n form = RegistrationForm()\n context = {'form': form}\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print('account created')\n return redirect('singn')\n else:\n context['form'] = form\n return render(request, 'createaccount.html', context)\n return render(request, 'createaccount.html', context)\n\n\ndef singn_in(request):\n form = SignInForm()\n context = {'form': form}\n if request.method == 'POST':\n form = SignInForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('index')\n else:\n context['form'] = form\n return render(request, 'signin.html', context)\n return render(request, 'signin.html', context)\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"<import token>\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == 'GET':\n form = BookCreateModelForm()\n context['form'] = form\n return render(request, 'addbook.html', context)\n elif request.method == 'POST':\n context = {}\n form = BookCreateModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n return render(request, 'addbook.html', context)\n else:\n return redirect('singn')\n\n\ndef get_books(request):\n if request.user.is_authenticated:\n form = SearchForm()\n context = {}\n books = Books.objects.all()\n context['books'] = books\n context['form'] = form\n if request.method == 'POST':\n form = SearchForm(request.POST)\n if form.is_valid():\n book_name = form.cleaned_data['book_name']\n books = Books.objects.filter(book_name__contains=book_name)\n context['books'] = books\n return render(request, 'book_list.html', context)\n else:\n context['form'] = form\n return render(request, 'book_list.html', context)\n return render(request, 'book_list.html', context)\n else:\n return redirect('singn')\n\n\ndef book_details(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n context = {}\n context['book'] = book\n return render(request, 'book_details.html', context)\n else:\n return redirect('singn')\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\ndef update_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n form = BookCreateModelForm(instance=book)\n context = {}\n context['form'] = form\n if request.method == 'POST':\n book = Books.objects.get(id=id)\n form = BookCreateModelForm(instance=book, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('books')\n else:\n form = BookCreateModelForm(request.POST)\n context['form'] = form\n print(form)\n return render(request, 'edit.html', context)\n return render(request, 'edit.html', context)\n else:\n return redirect('singn')\n\n\ndef create_account(request):\n form = RegistrationForm()\n context = {'form': form}\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print('account created')\n return redirect('singn')\n else:\n context['form'] = form\n return render(request, 'createaccount.html', context)\n return render(request, 'createaccount.html', context)\n\n\ndef singn_in(request):\n form = SignInForm()\n context = {'form': form}\n if request.method == 'POST':\n form = SignInForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('index')\n else:\n context['form'] = form\n return render(request, 'signin.html', context)\n return render(request, 'signin.html', context)\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"<import token>\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == 'GET':\n form = BookCreateModelForm()\n context['form'] = form\n return render(request, 'addbook.html', context)\n elif request.method == 'POST':\n context = {}\n form = BookCreateModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n return render(request, 'addbook.html', context)\n else:\n return redirect('singn')\n\n\n<function token>\n\n\ndef book_details(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n context = {}\n context['book'] = book\n return render(request, 'book_details.html', context)\n else:\n return redirect('singn')\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\ndef update_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n form = BookCreateModelForm(instance=book)\n context = {}\n context['form'] = form\n if request.method == 'POST':\n book = Books.objects.get(id=id)\n form = BookCreateModelForm(instance=book, data=request.POST)\n if form.is_valid():\n form.save()\n return redirect('books')\n else:\n form = BookCreateModelForm(request.POST)\n context['form'] = form\n print(form)\n return render(request, 'edit.html', context)\n return render(request, 'edit.html', context)\n else:\n return redirect('singn')\n\n\ndef create_account(request):\n form = RegistrationForm()\n context = {'form': form}\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print('account created')\n return redirect('singn')\n else:\n context['form'] = form\n return render(request, 'createaccount.html', context)\n return render(request, 'createaccount.html', context)\n\n\ndef singn_in(request):\n form = SignInForm()\n context = {'form': form}\n if request.method == 'POST':\n form = SignInForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('index')\n else:\n context['form'] = form\n return render(request, 'signin.html', context)\n return render(request, 'signin.html', context)\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"<import token>\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == 'GET':\n form = BookCreateModelForm()\n context['form'] = form\n return render(request, 'addbook.html', context)\n elif request.method == 'POST':\n context = {}\n form = BookCreateModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n return render(request, 'addbook.html', context)\n else:\n return redirect('singn')\n\n\n<function token>\n\n\ndef book_details(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n context = {}\n context['book'] = book\n return render(request, 'book_details.html', context)\n else:\n return redirect('singn')\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\n<function token>\n\n\ndef create_account(request):\n form = RegistrationForm()\n context = {'form': form}\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print('account created')\n return redirect('singn')\n else:\n context['form'] = form\n return render(request, 'createaccount.html', context)\n return render(request, 'createaccount.html', context)\n\n\ndef singn_in(request):\n form = SignInForm()\n context = {'form': form}\n if request.method == 'POST':\n form = SignInForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('index')\n else:\n context['form'] = form\n return render(request, 'signin.html', context)\n return render(request, 'signin.html', context)\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"<import token>\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == 'GET':\n form = BookCreateModelForm()\n context['form'] = form\n return render(request, 'addbook.html', context)\n elif request.method == 'POST':\n context = {}\n form = BookCreateModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n return render(request, 'addbook.html', context)\n else:\n return redirect('singn')\n\n\n<function token>\n<function token>\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\n<function token>\n\n\ndef create_account(request):\n form = RegistrationForm()\n context = {'form': form}\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print('account created')\n return redirect('singn')\n else:\n context['form'] = form\n return render(request, 'createaccount.html', context)\n return render(request, 'createaccount.html', context)\n\n\ndef singn_in(request):\n form = SignInForm()\n context = {'form': form}\n if request.method == 'POST':\n form = SignInForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data['username']\n password = form.cleaned_data['password']\n user = authenticate(request, username=username, password=password)\n if user:\n login(request, user)\n return redirect('index')\n else:\n context['form'] = form\n return render(request, 'signin.html', context)\n return render(request, 'signin.html', context)\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"<import token>\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == 'GET':\n form = BookCreateModelForm()\n context['form'] = form\n return render(request, 'addbook.html', context)\n elif request.method == 'POST':\n context = {}\n form = BookCreateModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n return render(request, 'addbook.html', context)\n else:\n return redirect('singn')\n\n\n<function token>\n<function token>\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\n<function token>\n\n\ndef create_account(request):\n form = RegistrationForm()\n context = {'form': form}\n if request.method == 'POST':\n form = RegistrationForm(request.POST)\n if form.is_valid():\n form.save()\n print('account created')\n return redirect('singn')\n else:\n context['form'] = form\n return render(request, 'createaccount.html', context)\n return render(request, 'createaccount.html', context)\n\n\n<function token>\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"<import token>\n\n\ndef book_add(request):\n if request.user.is_authenticated:\n context = {}\n if request.method == 'GET':\n form = BookCreateModelForm()\n context['form'] = form\n return render(request, 'addbook.html', context)\n elif request.method == 'POST':\n context = {}\n form = BookCreateModelForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('index')\n else:\n return render(request, 'addbook.html', context)\n else:\n return redirect('singn')\n\n\n<function token>\n<function token>\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef remove_book(request, id):\n if request.user.is_authenticated:\n book = Books.objects.get(id=id)\n book.delete()\n return redirect('books')\n else:\n return redirect('singn')\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef signout(request):\n if request.user.is_authenticated:\n logout(request)\n return redirect('singn')\n else:\n return redirect('singn')\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
954 |
d5903698eb8ed6be531b0cc522d4feff6b79da4e
|
import argparse
import pandas as pd
import random
import time
class Deck:
def __init__(self, num_cols, front, back):
self.flashcards = []
self.num_cols = num_cols
self.front = front
self.back = back
class Flashcard:
def __init__(self, deck, front, back, column, row):
self.deck = deck
self.front = front
self.back = back
self.column = column
self.row = row
self.correct = False
def show_front(self):
r = "{}: {}".format(self.deck.front, self.front)
return r
def show_back(self):
return "{}: {}".format(self.deck.back, self.back)
def show_card(self):
return "{}: {}, {}: {}".format(self.deck.front, self.front, self.deck.back, self.back)
def show_reverse(self):
return "{}: {}, {}: {}".format(self.deck.back, self.back, self.deck.front, self.front)
def create_deck(filename, num_cols):
df = pd.read_excel(filename)
front = df.columns.values[0]
back = df.columns.values[1]
deck = Deck(num_cols, front, back)
for i in range(num_cols):
front_column = "{}.{}".format(front, i) if i else front
back_column = "{}.{}".format(back, i) if i else back
for row in range(df[front_column].size):
f = df[front_column][row]
b = df[back_column][row]
if not (pd.isnull(f) or pd.isnull(b)):
fc = Flashcard(deck, f.strip(), b.strip(), i, row)
deck.flashcards.append(fc)
return deck
def get_cards_from_deck(deck, first_letter, start_index, number_of_cards):
flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or first_letter == -1]
return flashcards[start_index:number_of_cards+start_index]
def play_game(deck, mode, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)
play_cards(mode, deck, flashcards)
def play_cards(mode, deck, cards):
source = deck.front if mode%2 == 0 else deck.back
target = deck.back if mode%2 == 0 else deck.front
if mode >= 2:
random.shuffle(cards)
num_cards = len(cards)
start_time = time.time()
for i, fc in enumerate(cards):
source_word = fc.front if mode%2==0 else fc.back
target_word = fc.back if mode%2==0 else fc.front
quiz(fc, source, source_word, target, target_word, i, num_cards)
print("All Done!")
correct = sum(fc.correct == True for fc in cards)
incorrect = len(cards) - correct
print("Correct: {}".format(correct))
print("Incorrect: {}".format(incorrect))
if (incorrect):
incorrect_cards = [fc for fc in cards if not fc.correct]
print("\n".join([fc.show_card() for fc in incorrect_cards]))
again = input("review incorrect words (y/n): ")
if again == 'y' or again == '1' or again == 'да':
play_cards(mode, deck, incorrect_cards)
else:
finish_time = time.time()
time_diff = time.gmtime(finish_time - start_time)
avg_time = time.gmtime((finish_time - start_time) / num_cards)
print("Total Time: {}".format(time.strftime("%H:%M:%S", time_diff)))
print("Time per card: {}".format(time.strftime("%H:%M:%S", avg_time)))
def quiz(fc, source_language, source_word, target_language, target_word, i, number_of_cards):
print("Card {}/{}".format(i+1, number_of_cards))
print("{} word: {}".format(source_language, source_word))
answer = input("Enter {} translation: ".format(target_language))
if is_correct(answer, target_word):
fc.correct = True
print("Correct!")
else:
print("Incorrect! Correct answer was: {}".format(target_word))
n = input("Enter {} translation for {}: ".format(target_language, source_word))
def is_correct(answer, target):
return format_for_comparison(answer) == format_for_comparison(target)
def format_for_comparison(word):
# strip whitespace and lowercase
word = word.strip().lower()
# pop off the declensions from the end
word = word.split('(')
# sort the list of meanings
word[0] = word[0].split(', ')
word[0].sort()
# join the first part back together:
word[0] = ', '.join(word[0])
# now add the declensions back on
word = '('.join(word)
return word
def learn_words(deck, first_letter, start_index, number_of_cards):
flashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)
for i, card in enumerate(flashcards):
print("Card {}/{}".format(i+1, number_of_cards))
input("{}\nPractice: ".format(card.show_card()))
input("{}\nPractice: ".format(card.show_front()))
input("{}\nPractice: ".format(card.show_back()))
print("Done! Review learned words:")
for card in flashcards:
print("{}".format(card.show_card()))
def main(filename, first_letter, start_index, number_of_cards, mode):
num_cols = 9
deck = create_deck(filename, num_cols)
print("Welcome to The Flashcard Learner!")
# print("Available Modes:")
# print("0: Quiz - Given a word in {}, provide {} translation".format(deck.front.lower(), deck.back.lower()))
# print("1: Quiz - Given a word in {}, provide {} translation".format(deck.back.lower(), deck.front.lower()))
# print("2: Mode 0 with cards given in random order")
# print("3: Mode 1 with cards given in random order")
# print("4: Learning - Shown {} and {} side by side, practice typing both".format(deck.front.lower(), deck.back.lower()))
# mode = int(input("Enter mode: "))
print("Okay! Let's play!")
if mode == 4:
learn_words(deck, first_letter, start_index, number_of_cards)
else:
play_game(deck, mode, first_letter, start_index, number_of_cards)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Learn flashcards")
parser.add_argument("filename", help="name of .xlsx file with vocab", default="RussianVocab.xlsx")
parser.add_argument("category", type=int, help="e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)")
parser.add_argument("start", type=int, help="start index (lists are 0-indexed)")
parser.add_argument("num", type=int, help="number of cards you'd like to see")
parser.add_argument("mode", type=int)
args = parser.parse_args()
main(args.filename, args.category, args.start, args.num, args.mode)
|
[
"import argparse\nimport pandas as pd\nimport random\nimport time\n\nclass Deck:\n\tdef __init__(self, num_cols, front, back):\n\t\tself.flashcards = []\n\t\tself.num_cols = num_cols\n\t\tself.front = front\n\t\tself.back = back\n\nclass Flashcard:\n\tdef __init__(self, deck, front, back, column, row):\n\t\tself.deck = deck\n\t\tself.front = front\n\t\tself.back = back\n\t\tself.column = column\n\t\tself.row = row\n\t\tself.correct = False\n\n\tdef show_front(self):\n\t\tr = \"{}: {}\".format(self.deck.front, self.front)\n\t\treturn r\n\n\tdef show_back(self):\n\t\treturn \"{}: {}\".format(self.deck.back, self.back)\n\n\tdef show_card(self):\n\t\treturn \"{}: {}, {}: {}\".format(self.deck.front, self.front, self.deck.back, self.back)\n\n\tdef show_reverse(self):\n\t\treturn \"{}: {}, {}: {}\".format(self.deck.back, self.back, self.deck.front, self.front)\n\n\ndef create_deck(filename, num_cols):\n\tdf = pd.read_excel(filename)\n\tfront = df.columns.values[0]\n\tback = df.columns.values[1]\n\n\tdeck = Deck(num_cols, front, back)\n\tfor i in range(num_cols):\n\t\tfront_column = \"{}.{}\".format(front, i) if i else front\n\t\tback_column = \"{}.{}\".format(back, i) if i else back\n\t\tfor row in range(df[front_column].size):\n\t\t\tf = df[front_column][row]\n\t\t\tb = df[back_column][row]\n\t\t\tif not (pd.isnull(f) or pd.isnull(b)):\t\n\t\t\t\tfc = Flashcard(deck, f.strip(), b.strip(), i, row)\n\t\t\t\tdeck.flashcards.append(fc)\n\t\n\treturn deck\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n\tflashcards = [fc for fc in deck.flashcards if fc.column == first_letter or first_letter == -1]\n\treturn flashcards[start_index:number_of_cards+start_index]\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n\tflashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)\n\tplay_cards(mode, deck, flashcards)\n\ndef play_cards(mode, deck, cards):\n\tsource = deck.front if mode%2 == 0 else deck.back\n\ttarget = deck.back if mode%2 == 0 else deck.front\n\n\tif mode >= 2:\n\t\trandom.shuffle(cards)\n\n\tnum_cards = len(cards)\n\tstart_time = time.time()\n\n\tfor i, fc in enumerate(cards):\n\t\tsource_word = fc.front if mode%2==0 else fc.back\n\t\ttarget_word = fc.back if mode%2==0 else fc.front\n\n\t\tquiz(fc, source, source_word, target, target_word, i, num_cards)\n\n\tprint(\"All Done!\")\n\tcorrect = sum(fc.correct == True for fc in cards)\n\tincorrect = len(cards) - correct\n\tprint(\"Correct: {}\".format(correct))\n\tprint(\"Incorrect: {}\".format(incorrect))\n\n\tif (incorrect):\n\t\tincorrect_cards = [fc for fc in cards if not fc.correct]\n\t\tprint(\"\\n\".join([fc.show_card() for fc in incorrect_cards]))\n\t\tagain = input(\"review incorrect words (y/n): \")\n\t\tif again == 'y' or again == '1' or again == 'да':\n\t\t\tplay_cards(mode, deck, incorrect_cards)\n\telse:\n\t\tfinish_time = time.time()\n\t\ttime_diff = time.gmtime(finish_time - start_time)\n\t\tavg_time = time.gmtime((finish_time - start_time) / num_cards)\n\t\tprint(\"Total Time: {}\".format(time.strftime(\"%H:%M:%S\", time_diff)))\n\t\tprint(\"Time per card: {}\".format(time.strftime(\"%H:%M:%S\", avg_time)))\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i, number_of_cards):\n\t\tprint(\"Card {}/{}\".format(i+1, number_of_cards))\n\t\tprint(\"{} word: {}\".format(source_language, source_word))\n\t\tanswer = input(\"Enter {} translation: \".format(target_language))\n\t\t\n\t\tif is_correct(answer, target_word):\n\t\t\tfc.correct = True\n\t\t\tprint(\"Correct!\")\n\t\t\n\t\telse:\n\t\t\tprint(\"Incorrect! Correct answer was: {}\".format(target_word))\n\t\t\tn = input(\"Enter {} translation for {}: \".format(target_language, source_word))\n\n\ndef is_correct(answer, target):\n\treturn format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n\t# strip whitespace and lowercase\n\tword = word.strip().lower()\n\n\t# pop off the declensions from the end\n\tword = word.split('(')\n\n\t# sort the list of meanings\n\tword[0] = word[0].split(', ')\n\tword[0].sort()\n\n\t# join the first part back together:\n\tword[0] = ', '.join(word[0])\n\n\t# now add the declensions back on\n\tword = '('.join(word)\n\t\n\treturn word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n\tflashcards = get_cards_from_deck(deck, first_letter, start_index, number_of_cards)\n\tfor i, card in enumerate(flashcards):\n\t\tprint(\"Card {}/{}\".format(i+1, number_of_cards))\n\t\tinput(\"{}\\nPractice: \".format(card.show_card()))\n\t\tinput(\"{}\\nPractice: \".format(card.show_front()))\n\t\tinput(\"{}\\nPractice: \".format(card.show_back()))\n\t\n\tprint(\"Done! Review learned words:\")\n\tfor card in flashcards:\n\t\tprint(\"{}\".format(card.show_card()))\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n\tnum_cols = 9\n\tdeck = create_deck(filename, num_cols)\n\tprint(\"Welcome to The Flashcard Learner!\")\n\t# print(\"Available Modes:\")\n\t# print(\"0: Quiz - Given a word in {}, provide {} translation\".format(deck.front.lower(), deck.back.lower()))\n\t# print(\"1: Quiz - Given a word in {}, provide {} translation\".format(deck.back.lower(), deck.front.lower()))\n\t# print(\"2: Mode 0 with cards given in random order\")\n\t# print(\"3: Mode 1 with cards given in random order\")\n\t# print(\"4: Learning - Shown {} and {} side by side, practice typing both\".format(deck.front.lower(), deck.back.lower()))\n\t# mode = int(input(\"Enter mode: \"))\n\t\n\tprint(\"Okay! Let's play!\")\n\tif mode == 4:\n\t\tlearn_words(deck, first_letter, start_index, number_of_cards)\n\telse:\n\t\tplay_game(deck, mode, first_letter, start_index, number_of_cards)\n\nif __name__ == \"__main__\":\n\tparser = argparse.ArgumentParser(description=\"Learn flashcards\")\n\tparser.add_argument(\"filename\", help=\"name of .xlsx file with vocab\", default=\"RussianVocab.xlsx\")\n\tparser.add_argument(\"category\", type=int, help=\"e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)\")\n\tparser.add_argument(\"start\", type=int, help=\"start index (lists are 0-indexed)\")\n\tparser.add_argument(\"num\", type=int, help=\"number of cards you'd like to see\")\n\tparser.add_argument(\"mode\", type=int)\n\targs = parser.parse_args()\n\tmain(args.filename, args.category, args.start, args.num, args.mode)\n\n",
"import argparse\nimport pandas as pd\nimport random\nimport time\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\ndef create_deck(filename, num_cols):\n df = pd.read_excel(filename)\n front = df.columns.values[0]\n back = df.columns.values[1]\n deck = Deck(num_cols, front, back)\n for i in range(num_cols):\n front_column = '{}.{}'.format(front, i) if i else front\n back_column = '{}.{}'.format(back, i) if i else back\n for row in range(df[front_column].size):\n f = df[front_column][row]\n b = df[back_column][row]\n if not (pd.isnull(f) or pd.isnull(b)):\n fc = Flashcard(deck, f.strip(), b.strip(), i, row)\n deck.flashcards.append(fc)\n return deck\n\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or\n first_letter == -1]\n return flashcards[start_index:number_of_cards + start_index]\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\ndef is_correct(answer, target):\n return format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Learn flashcards')\n parser.add_argument('filename', help='name of .xlsx file with vocab',\n default='RussianVocab.xlsx')\n parser.add_argument('category', type=int, help=\n 'e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)')\n parser.add_argument('start', type=int, help=\n 'start index (lists are 0-indexed)')\n parser.add_argument('num', type=int, help=\n \"number of cards you'd like to see\")\n parser.add_argument('mode', type=int)\n args = parser.parse_args()\n main(args.filename, args.category, args.start, args.num, args.mode)\n",
"<import token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\ndef create_deck(filename, num_cols):\n df = pd.read_excel(filename)\n front = df.columns.values[0]\n back = df.columns.values[1]\n deck = Deck(num_cols, front, back)\n for i in range(num_cols):\n front_column = '{}.{}'.format(front, i) if i else front\n back_column = '{}.{}'.format(back, i) if i else back\n for row in range(df[front_column].size):\n f = df[front_column][row]\n b = df[back_column][row]\n if not (pd.isnull(f) or pd.isnull(b)):\n fc = Flashcard(deck, f.strip(), b.strip(), i, row)\n deck.flashcards.append(fc)\n return deck\n\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or\n first_letter == -1]\n return flashcards[start_index:number_of_cards + start_index]\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\ndef is_correct(answer, target):\n return format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Learn flashcards')\n parser.add_argument('filename', help='name of .xlsx file with vocab',\n default='RussianVocab.xlsx')\n parser.add_argument('category', type=int, help=\n 'e.g. which letter are you learning? (-1: all, 0:a, 1:б, 2:в, etc.)')\n parser.add_argument('start', type=int, help=\n 'start index (lists are 0-indexed)')\n parser.add_argument('num', type=int, help=\n \"number of cards you'd like to see\")\n parser.add_argument('mode', type=int)\n args = parser.parse_args()\n main(args.filename, args.category, args.start, args.num, args.mode)\n",
"<import token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\ndef create_deck(filename, num_cols):\n df = pd.read_excel(filename)\n front = df.columns.values[0]\n back = df.columns.values[1]\n deck = Deck(num_cols, front, back)\n for i in range(num_cols):\n front_column = '{}.{}'.format(front, i) if i else front\n back_column = '{}.{}'.format(back, i) if i else back\n for row in range(df[front_column].size):\n f = df[front_column][row]\n b = df[back_column][row]\n if not (pd.isnull(f) or pd.isnull(b)):\n fc = Flashcard(deck, f.strip(), b.strip(), i, row)\n deck.flashcards.append(fc)\n return deck\n\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or\n first_letter == -1]\n return flashcards[start_index:number_of_cards + start_index]\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\ndef is_correct(answer, target):\n return format_for_comparison(answer) == format_for_comparison(target)\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\n<code token>\n",
"<import token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\ndef create_deck(filename, num_cols):\n df = pd.read_excel(filename)\n front = df.columns.values[0]\n back = df.columns.values[1]\n deck = Deck(num_cols, front, back)\n for i in range(num_cols):\n front_column = '{}.{}'.format(front, i) if i else front\n back_column = '{}.{}'.format(back, i) if i else back\n for row in range(df[front_column].size):\n f = df[front_column][row]\n b = df[back_column][row]\n if not (pd.isnull(f) or pd.isnull(b)):\n fc = Flashcard(deck, f.strip(), b.strip(), i, row)\n deck.flashcards.append(fc)\n return deck\n\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or\n first_letter == -1]\n return flashcards[start_index:number_of_cards + start_index]\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\n<function token>\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\n<code token>\n",
"<import token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n\n\ndef get_cards_from_deck(deck, first_letter, start_index, number_of_cards):\n flashcards = [fc for fc in deck.flashcards if fc.column == first_letter or\n first_letter == -1]\n return flashcards[start_index:number_of_cards + start_index]\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\n<function token>\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\n<code token>\n",
"<import token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n<function token>\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\n<function token>\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\ndef main(filename, first_letter, start_index, number_of_cards, mode):\n num_cols = 9\n deck = create_deck(filename, num_cols)\n print('Welcome to The Flashcard Learner!')\n print(\"Okay! Let's play!\")\n if mode == 4:\n learn_words(deck, first_letter, start_index, number_of_cards)\n else:\n play_game(deck, mode, first_letter, start_index, number_of_cards)\n\n\n<code token>\n",
"<import token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n<function token>\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\ndef quiz(fc, source_language, source_word, target_language, target_word, i,\n number_of_cards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n print('{} word: {}'.format(source_language, source_word))\n answer = input('Enter {} translation: '.format(target_language))\n if is_correct(answer, target_word):\n fc.correct = True\n print('Correct!')\n else:\n print('Incorrect! Correct answer was: {}'.format(target_word))\n n = input('Enter {} translation for {}: '.format(target_language,\n source_word))\n\n\n<function token>\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n<function token>\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\ndef play_cards(mode, deck, cards):\n source = deck.front if mode % 2 == 0 else deck.back\n target = deck.back if mode % 2 == 0 else deck.front\n if mode >= 2:\n random.shuffle(cards)\n num_cards = len(cards)\n start_time = time.time()\n for i, fc in enumerate(cards):\n source_word = fc.front if mode % 2 == 0 else fc.back\n target_word = fc.back if mode % 2 == 0 else fc.front\n quiz(fc, source, source_word, target, target_word, i, num_cards)\n print('All Done!')\n correct = sum(fc.correct == True for fc in cards)\n incorrect = len(cards) - correct\n print('Correct: {}'.format(correct))\n print('Incorrect: {}'.format(incorrect))\n if incorrect:\n incorrect_cards = [fc for fc in cards if not fc.correct]\n print('\\n'.join([fc.show_card() for fc in incorrect_cards]))\n again = input('review incorrect words (y/n): ')\n if again == 'y' or again == '1' or again == 'да':\n play_cards(mode, deck, incorrect_cards)\n else:\n finish_time = time.time()\n time_diff = time.gmtime(finish_time - start_time)\n avg_time = time.gmtime((finish_time - start_time) / num_cards)\n print('Total Time: {}'.format(time.strftime('%H:%M:%S', time_diff)))\n print('Time per card: {}'.format(time.strftime('%H:%M:%S', avg_time)))\n\n\n<function token>\n<function token>\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n<function token>\n\n\ndef play_game(deck, mode, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n play_cards(mode, deck, flashcards)\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\ndef learn_words(deck, first_letter, start_index, number_of_cards):\n flashcards = get_cards_from_deck(deck, first_letter, start_index,\n number_of_cards)\n for i, card in enumerate(flashcards):\n print('Card {}/{}'.format(i + 1, number_of_cards))\n input('{}\\nPractice: '.format(card.show_card()))\n input('{}\\nPractice: '.format(card.show_front()))\n input('{}\\nPractice: '.format(card.show_back()))\n print('Done! Review learned words:')\n for card in flashcards:\n print('{}'.format(card.show_card()))\n\n\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef format_for_comparison(word):\n word = word.strip().lower()\n word = word.split('(')\n word[0] = word[0].split(', ')\n word[0].sort()\n word[0] = ', '.join(word[0])\n word = '('.join(word)\n return word\n\n\n<function token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Deck:\n\n def __init__(self, num_cols, front, back):\n self.flashcards = []\n self.num_cols = num_cols\n self.front = front\n self.back = back\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n\n\nclass Deck:\n <function token>\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n\n def show_card(self):\n return '{}: {}, {}: {}'.format(self.deck.front, self.front, self.\n deck.back, self.back)\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n\n def show_front(self):\n r = '{}: {}'.format(self.deck.front, self.front)\n return r\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n <function token>\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n <function token>\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n <function token>\n\n def show_reverse(self):\n return '{}: {}, {}: {}'.format(self.deck.back, self.back, self.deck\n .front, self.front)\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n\n\nclass Flashcard:\n\n def __init__(self, deck, front, back, column, row):\n self.deck = deck\n self.front = front\n self.back = back\n self.column = column\n self.row = row\n self.correct = False\n <function token>\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n\n\nclass Flashcard:\n <function token>\n <function token>\n\n def show_back(self):\n return '{}: {}'.format(self.deck.back, self.back)\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n\n\nclass Flashcard:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<import token>\n<class token>\n<class token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
955 |
48291ab3deb1ca1ba672d3e642d55635a7270171
|
import serial
from settings import *
class CommunicationController:
def __init__(self):
global board
board = serial.Serial(ROBOT_SERIAL, BAUDRATE, serial.EIGHTBITS, timeout=0)
self.count = 0
print("Communication controller")
def sendCommand(self, right, back, left):
self.count += 1
if self.count >= BUFFER_RESET_BOUND:
board.reset_output_buffer()
board.reset_input_buffer()
self.count = 0
#format:
#sd:BACKWHEEL:RIGHTWHEEL:LEFTWHEEL\n
command = ":".join(("sd", str(right), str(left), str(back) ))
if board.is_open:
board.write(command + '\n')
# print(command)
def throwBall(self, value):
if board.is_open:
command = ":".join(("d",str(value)))
print(command)
board.write(command + '\r\n')
print("Throw")
else:
print("No board")
|
[
"import serial\nfrom settings import *\nclass CommunicationController:\n def __init__(self):\n global board\n board = serial.Serial(ROBOT_SERIAL, BAUDRATE, serial.EIGHTBITS, timeout=0)\n self.count = 0\n print(\"Communication controller\")\n\n def sendCommand(self, right, back, left):\n self.count += 1\n if self.count >= BUFFER_RESET_BOUND:\n board.reset_output_buffer()\n board.reset_input_buffer()\n self.count = 0\n #format:\n #sd:BACKWHEEL:RIGHTWHEEL:LEFTWHEEL\\n\n command = \":\".join((\"sd\", str(right), str(left), str(back) ))\n if board.is_open:\n board.write(command + '\\n')\n # print(command)\n\n def throwBall(self, value):\n if board.is_open:\n command = \":\".join((\"d\",str(value)))\n print(command)\n board.write(command + '\\r\\n')\n print(\"Throw\")\n else:\n print(\"No board\")\n",
"import serial\nfrom settings import *\n\n\nclass CommunicationController:\n\n def __init__(self):\n global board\n board = serial.Serial(ROBOT_SERIAL, BAUDRATE, serial.EIGHTBITS,\n timeout=0)\n self.count = 0\n print('Communication controller')\n\n def sendCommand(self, right, back, left):\n self.count += 1\n if self.count >= BUFFER_RESET_BOUND:\n board.reset_output_buffer()\n board.reset_input_buffer()\n self.count = 0\n command = ':'.join(('sd', str(right), str(left), str(back)))\n if board.is_open:\n board.write(command + '\\n')\n\n def throwBall(self, value):\n if board.is_open:\n command = ':'.join(('d', str(value)))\n print(command)\n board.write(command + '\\r\\n')\n print('Throw')\n else:\n print('No board')\n",
"<import token>\n\n\nclass CommunicationController:\n\n def __init__(self):\n global board\n board = serial.Serial(ROBOT_SERIAL, BAUDRATE, serial.EIGHTBITS,\n timeout=0)\n self.count = 0\n print('Communication controller')\n\n def sendCommand(self, right, back, left):\n self.count += 1\n if self.count >= BUFFER_RESET_BOUND:\n board.reset_output_buffer()\n board.reset_input_buffer()\n self.count = 0\n command = ':'.join(('sd', str(right), str(left), str(back)))\n if board.is_open:\n board.write(command + '\\n')\n\n def throwBall(self, value):\n if board.is_open:\n command = ':'.join(('d', str(value)))\n print(command)\n board.write(command + '\\r\\n')\n print('Throw')\n else:\n print('No board')\n",
"<import token>\n\n\nclass CommunicationController:\n\n def __init__(self):\n global board\n board = serial.Serial(ROBOT_SERIAL, BAUDRATE, serial.EIGHTBITS,\n timeout=0)\n self.count = 0\n print('Communication controller')\n <function token>\n\n def throwBall(self, value):\n if board.is_open:\n command = ':'.join(('d', str(value)))\n print(command)\n board.write(command + '\\r\\n')\n print('Throw')\n else:\n print('No board')\n",
"<import token>\n\n\nclass CommunicationController:\n <function token>\n <function token>\n\n def throwBall(self, value):\n if board.is_open:\n command = ':'.join(('d', str(value)))\n print(command)\n board.write(command + '\\r\\n')\n print('Throw')\n else:\n print('No board')\n",
"<import token>\n\n\nclass CommunicationController:\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
956 |
d7b0ff6549d854d21ad1d2d0f5a9e7f75f4ac1d5
|
from django.test import TestCase
from recruitmentapp.apps.core.models import Competence
class CompetenceTest(TestCase):
def setUp(self):
self.competence = Competence.objects.create(name='mining')
self.competence.set_current_language('sv')
self.competence.name = 'gruvarbete'
self.competence.save()
def test_translation(self):
competence = Competence.objects.first()
self.assertEqual(competence.name, 'mining')
competence.set_current_language('sv')
self.assertEqual(competence.name, 'gruvarbete')
def test_translation_fallback(self):
competence = Competence.objects.first()
competence.set_current_language('fi')
self.assertEqual(competence.name, 'mining')
|
[
"from django.test import TestCase\n\nfrom recruitmentapp.apps.core.models import Competence\n\n\nclass CompetenceTest(TestCase):\n def setUp(self):\n self.competence = Competence.objects.create(name='mining')\n self.competence.set_current_language('sv')\n self.competence.name = 'gruvarbete'\n self.competence.save()\n\n def test_translation(self):\n competence = Competence.objects.first()\n self.assertEqual(competence.name, 'mining')\n competence.set_current_language('sv')\n self.assertEqual(competence.name, 'gruvarbete')\n\n def test_translation_fallback(self):\n competence = Competence.objects.first()\n competence.set_current_language('fi')\n self.assertEqual(competence.name, 'mining')\n",
"from django.test import TestCase\nfrom recruitmentapp.apps.core.models import Competence\n\n\nclass CompetenceTest(TestCase):\n\n def setUp(self):\n self.competence = Competence.objects.create(name='mining')\n self.competence.set_current_language('sv')\n self.competence.name = 'gruvarbete'\n self.competence.save()\n\n def test_translation(self):\n competence = Competence.objects.first()\n self.assertEqual(competence.name, 'mining')\n competence.set_current_language('sv')\n self.assertEqual(competence.name, 'gruvarbete')\n\n def test_translation_fallback(self):\n competence = Competence.objects.first()\n competence.set_current_language('fi')\n self.assertEqual(competence.name, 'mining')\n",
"<import token>\n\n\nclass CompetenceTest(TestCase):\n\n def setUp(self):\n self.competence = Competence.objects.create(name='mining')\n self.competence.set_current_language('sv')\n self.competence.name = 'gruvarbete'\n self.competence.save()\n\n def test_translation(self):\n competence = Competence.objects.first()\n self.assertEqual(competence.name, 'mining')\n competence.set_current_language('sv')\n self.assertEqual(competence.name, 'gruvarbete')\n\n def test_translation_fallback(self):\n competence = Competence.objects.first()\n competence.set_current_language('fi')\n self.assertEqual(competence.name, 'mining')\n",
"<import token>\n\n\nclass CompetenceTest(TestCase):\n\n def setUp(self):\n self.competence = Competence.objects.create(name='mining')\n self.competence.set_current_language('sv')\n self.competence.name = 'gruvarbete'\n self.competence.save()\n\n def test_translation(self):\n competence = Competence.objects.first()\n self.assertEqual(competence.name, 'mining')\n competence.set_current_language('sv')\n self.assertEqual(competence.name, 'gruvarbete')\n <function token>\n",
"<import token>\n\n\nclass CompetenceTest(TestCase):\n\n def setUp(self):\n self.competence = Competence.objects.create(name='mining')\n self.competence.set_current_language('sv')\n self.competence.name = 'gruvarbete'\n self.competence.save()\n <function token>\n <function token>\n",
"<import token>\n\n\nclass CompetenceTest(TestCase):\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
957 |
8457cdde8f8ad069505c7729b8217e5d272be41e
|
from apps.mastermind.core.domain.domain import Color, Game
from apps.mastermind.infrastructure.mongo_persistence.uow import MongoUnitOfWork
from composite_root.container import provide
class GameMother:
async def a_game(
self,
num_slots: int,
num_colors: int,
max_guesses: int,
secret_code: list[Color],
reference: str | None = None,
) -> Game:
async with provide(MongoUnitOfWork) as uow:
game = Game.new(
id=uow.games.next_id(),
num_slots=num_slots,
num_colors=num_colors,
max_guesses=max_guesses,
)
game.secret_code = secret_code
if reference:
game.reference = reference
await uow.games.asave(game)
await uow.commit()
return game
|
[
"from apps.mastermind.core.domain.domain import Color, Game\nfrom apps.mastermind.infrastructure.mongo_persistence.uow import MongoUnitOfWork\nfrom composite_root.container import provide\n\n\nclass GameMother:\n async def a_game(\n self,\n num_slots: int,\n num_colors: int,\n max_guesses: int,\n secret_code: list[Color],\n reference: str | None = None,\n ) -> Game:\n async with provide(MongoUnitOfWork) as uow:\n game = Game.new(\n id=uow.games.next_id(),\n num_slots=num_slots,\n num_colors=num_colors,\n max_guesses=max_guesses,\n )\n game.secret_code = secret_code\n\n if reference:\n game.reference = reference\n\n await uow.games.asave(game)\n await uow.commit()\n return game\n",
"from apps.mastermind.core.domain.domain import Color, Game\nfrom apps.mastermind.infrastructure.mongo_persistence.uow import MongoUnitOfWork\nfrom composite_root.container import provide\n\n\nclass GameMother:\n\n async def a_game(self, num_slots: int, num_colors: int, max_guesses:\n int, secret_code: list[Color], reference: (str | None)=None) ->Game:\n async with provide(MongoUnitOfWork) as uow:\n game = Game.new(id=uow.games.next_id(), num_slots=num_slots,\n num_colors=num_colors, max_guesses=max_guesses)\n game.secret_code = secret_code\n if reference:\n game.reference = reference\n await uow.games.asave(game)\n await uow.commit()\n return game\n",
"<import token>\n\n\nclass GameMother:\n\n async def a_game(self, num_slots: int, num_colors: int, max_guesses:\n int, secret_code: list[Color], reference: (str | None)=None) ->Game:\n async with provide(MongoUnitOfWork) as uow:\n game = Game.new(id=uow.games.next_id(), num_slots=num_slots,\n num_colors=num_colors, max_guesses=max_guesses)\n game.secret_code = secret_code\n if reference:\n game.reference = reference\n await uow.games.asave(game)\n await uow.commit()\n return game\n",
"<import token>\n<class token>\n"
] | false |
958 |
690e7cc9047b3a445bf330524df52e2b359f1f13
|
AuthorPath = 'data/Author.csv'
PaperPath = 'buff/Paper.TitleCut.csv'
PaperAuthorPath = 'data/PaperAuthor.csv'
AffilListPath = 'buff/AffilList2.csv'
StopwordPath='InternalData/en.lst'
|
[
"AuthorPath = 'data/Author.csv'\r\nPaperPath = 'buff/Paper.TitleCut.csv'\r\nPaperAuthorPath = 'data/PaperAuthor.csv'\r\nAffilListPath = 'buff/AffilList2.csv'\r\nStopwordPath='InternalData/en.lst'\r\n\r\n",
"AuthorPath = 'data/Author.csv'\nPaperPath = 'buff/Paper.TitleCut.csv'\nPaperAuthorPath = 'data/PaperAuthor.csv'\nAffilListPath = 'buff/AffilList2.csv'\nStopwordPath = 'InternalData/en.lst'\n",
"<assignment token>\n"
] | false |
959 |
7a9515b1f8cc196eb7551137a1418d5a387e7fd3
|
import pandas as pd
import numpy as np
import difflib as dl
import sys
def get_close(x):
if len(x) == 0:
return ""
return x[0]
list_file = sys.argv[1]
rating_file = sys.argv[2]
output_file = sys.argv[3]
movie_list = open(list_file).read().splitlines()
movie_data = pd.DataFrame({'movie': movie_list})
rating_data = pd.read_csv(rating_file)
rating_data['rating'] = rating_data['rating'].astype(str).astype(float)
rating_data['counts'] = pd.Series(1, index=rating_data.index)
rating_data = rating_data.groupby(['title'])['counts', 'rating'].sum().reset_index()
rating_data['average_rating'] = pd.Series(rating_data['rating']/rating_data['counts'], index=rating_data.index)
movie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)
movie_data['closed'] = movie_data['closed'].apply(lambda x: dl.get_close_matches(x, rating_data['title'], n=1))
movie_data['closed'] = movie_data['closed'].apply(get_close)
result = movie_data.set_index('closed').join(rating_data.set_index('title')).reset_index()
result['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2))
result = result.drop(['closed', 'rating', 'counts'], axis=1)
result = result.set_index('movie')
result.to_csv(output_file, sep=',', encoding='utf-8')
|
[
"import pandas as pd\nimport numpy as np\nimport difflib as dl\nimport sys\n\ndef get_close(x):\n\tif len(x) == 0:\n\t\treturn \"\"\n\treturn x[0]\n\nlist_file = sys.argv[1]\nrating_file = sys.argv[2]\noutput_file = sys.argv[3]\n\nmovie_list = open(list_file).read().splitlines()\nmovie_data = pd.DataFrame({'movie': movie_list})\nrating_data = pd.read_csv(rating_file)\nrating_data['rating'] = rating_data['rating'].astype(str).astype(float)\nrating_data['counts'] = pd.Series(1, index=rating_data.index)\nrating_data = rating_data.groupby(['title'])['counts', 'rating'].sum().reset_index()\nrating_data['average_rating'] = pd.Series(rating_data['rating']/rating_data['counts'], index=rating_data.index)\n\nmovie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)\nmovie_data['closed'] = movie_data['closed'].apply(lambda x: dl.get_close_matches(x, rating_data['title'], n=1))\nmovie_data['closed'] = movie_data['closed'].apply(get_close)\n\nresult = movie_data.set_index('closed').join(rating_data.set_index('title')).reset_index()\n\nresult['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2))\nresult = result.drop(['closed', 'rating', 'counts'], axis=1)\nresult = result.set_index('movie')\n\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"import pandas as pd\nimport numpy as np\nimport difflib as dl\nimport sys\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\nlist_file = sys.argv[1]\nrating_file = sys.argv[2]\noutput_file = sys.argv[3]\nmovie_list = open(list_file).read().splitlines()\nmovie_data = pd.DataFrame({'movie': movie_list})\nrating_data = pd.read_csv(rating_file)\nrating_data['rating'] = rating_data['rating'].astype(str).astype(float)\nrating_data['counts'] = pd.Series(1, index=rating_data.index)\nrating_data = rating_data.groupby(['title'])['counts', 'rating'].sum(\n ).reset_index()\nrating_data['average_rating'] = pd.Series(rating_data['rating'] /\n rating_data['counts'], index=rating_data.index)\nmovie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)\nmovie_data['closed'] = movie_data['closed'].apply(lambda x: dl.\n get_close_matches(x, rating_data['title'], n=1))\nmovie_data['closed'] = movie_data['closed'].apply(get_close)\nresult = movie_data.set_index('closed').join(rating_data.set_index('title')\n ).reset_index()\nresult['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2)\n )\nresult = result.drop(['closed', 'rating', 'counts'], axis=1)\nresult = result.set_index('movie')\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"<import token>\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\nlist_file = sys.argv[1]\nrating_file = sys.argv[2]\noutput_file = sys.argv[3]\nmovie_list = open(list_file).read().splitlines()\nmovie_data = pd.DataFrame({'movie': movie_list})\nrating_data = pd.read_csv(rating_file)\nrating_data['rating'] = rating_data['rating'].astype(str).astype(float)\nrating_data['counts'] = pd.Series(1, index=rating_data.index)\nrating_data = rating_data.groupby(['title'])['counts', 'rating'].sum(\n ).reset_index()\nrating_data['average_rating'] = pd.Series(rating_data['rating'] /\n rating_data['counts'], index=rating_data.index)\nmovie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)\nmovie_data['closed'] = movie_data['closed'].apply(lambda x: dl.\n get_close_matches(x, rating_data['title'], n=1))\nmovie_data['closed'] = movie_data['closed'].apply(get_close)\nresult = movie_data.set_index('closed').join(rating_data.set_index('title')\n ).reset_index()\nresult['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2)\n )\nresult = result.drop(['closed', 'rating', 'counts'], axis=1)\nresult = result.set_index('movie')\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"<import token>\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\n<assignment token>\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"<import token>\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
960 |
19aad7d45416e311530aa2ce3e854cf1f65d18f5
|
import multiprocessing
import time
def foo():
time.sleep(0.1)
p = multiprocessing.Process(target=foo)
p.start()
print("process running: ", p, p.is_alive())
p.terminate()
print("process running: ", p, p.is_alive())
p.join()
print("process running: ", p, p.is_alive())
print("process exit code:", p.exitcode)
|
[
"import multiprocessing\nimport time\n\n\ndef foo():\n time.sleep(0.1)\n\n\np = multiprocessing.Process(target=foo)\np.start()\nprint(\"process running: \", p, p.is_alive())\np.terminate()\nprint(\"process running: \", p, p.is_alive())\np.join()\nprint(\"process running: \", p, p.is_alive())\nprint(\"process exit code:\", p.exitcode)\n",
"import multiprocessing\nimport time\n\n\ndef foo():\n time.sleep(0.1)\n\n\np = multiprocessing.Process(target=foo)\np.start()\nprint('process running: ', p, p.is_alive())\np.terminate()\nprint('process running: ', p, p.is_alive())\np.join()\nprint('process running: ', p, p.is_alive())\nprint('process exit code:', p.exitcode)\n",
"<import token>\n\n\ndef foo():\n time.sleep(0.1)\n\n\np = multiprocessing.Process(target=foo)\np.start()\nprint('process running: ', p, p.is_alive())\np.terminate()\nprint('process running: ', p, p.is_alive())\np.join()\nprint('process running: ', p, p.is_alive())\nprint('process exit code:', p.exitcode)\n",
"<import token>\n\n\ndef foo():\n time.sleep(0.1)\n\n\n<assignment token>\np.start()\nprint('process running: ', p, p.is_alive())\np.terminate()\nprint('process running: ', p, p.is_alive())\np.join()\nprint('process running: ', p, p.is_alive())\nprint('process exit code:', p.exitcode)\n",
"<import token>\n\n\ndef foo():\n time.sleep(0.1)\n\n\n<assignment token>\n<code token>\n",
"<import token>\n<function token>\n<assignment token>\n<code token>\n"
] | false |
961 |
623bd858923d5f9cc109af586fdda01cd3d5fff3
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import InputRequired, Length, EqualTo, ValidationError
from passlib.hash import pbkdf2_sha256
from models import User
def invalid_credentials(form, field):
''' Username and password checker '''
username_entered = form.username.data
password_entered = field.data
user_object = User.query.filter_by(username=username_entered).first()
if user_object is None:
raise ValidationError("Username or password is incorrect")
elif not pbkdf2_sha256.verify(password_entered, user_object.password):
raise ValidationError("Username or password is incorrect")
class JoinRoomForm(FlaskForm):
room_code = StringField('room_code', validators=[InputRequired(message="Room Code Required")])
username = StringField('username', validators=[InputRequired(message="Username Required")])
# room_code = StringField('room_code')
# username = StringField('username')
submit_button = SubmitField('Join Room')
class CreateRoomForm(FlaskForm):
username = StringField('username', validators=[InputRequired(message="Username Required")])
numplayers = StringField('numplayers', validators=[InputRequired(message="Username Required")])
# room_code = StringField('room_code')
# username = StringField('username')
submit_button = SubmitField('Join Room')
class CreateUsernameForm(FlaskForm):
username = StringField('username', validators=[InputRequired(message="Username Required")])
# room_code = StringField('room_code')
# username = StringField('username')
submit_button = SubmitField('Join Room')
class RegistrationForm(FlaskForm):
''' Registration form '''
username = StringField('username_label', validators=[InputRequired(message="Username required"),
Length(min=4, max=25, message="Username must be between 4 and 25 characters")])
password = PasswordField('password_label', validators=[InputRequired(message="Password required"),
Length(min=4, message="Password must be at least 4 characters")])
confirm_pswd = PasswordField('confirm_pswd_label', validators=[InputRequired(message="Please retype your password"),
EqualTo('password', message="Passwords must match")])
submit_button = SubmitField('Click Here to Start')
def validate_username(self, username):
user_object = User.query.filter_by(username=username.data).first()
if user_object:
raise ValidationError("Username already exists. Select a different username")
class LoginForm(FlaskForm):
''' Login form '''
username = StringField('username_label', validators=[InputRequired(message="Username required")])
password = PasswordField('password_label', validators=[InputRequired(message="Password required"), invalid_credentials])
submit_button = SubmitField('Login')
|
[
"from flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, SubmitField\nfrom wtforms.validators import InputRequired, Length, EqualTo, ValidationError\nfrom passlib.hash import pbkdf2_sha256\nfrom models import User\n\ndef invalid_credentials(form, field):\n\t''' Username and password checker '''\n\n\tusername_entered = form.username.data\n\tpassword_entered = field.data\n\n\tuser_object = User.query.filter_by(username=username_entered).first()\n\tif user_object is None:\n\t\traise ValidationError(\"Username or password is incorrect\")\n\telif not pbkdf2_sha256.verify(password_entered, user_object.password):\n\t\traise ValidationError(\"Username or password is incorrect\")\n\nclass JoinRoomForm(FlaskForm):\n\troom_code = StringField('room_code', validators=[InputRequired(message=\"Room Code Required\")])\n\tusername = StringField('username', validators=[InputRequired(message=\"Username Required\")])\n\t# room_code = StringField('room_code')\n\t# username = StringField('username')\n\tsubmit_button = SubmitField('Join Room')\nclass CreateRoomForm(FlaskForm):\n\tusername = StringField('username', validators=[InputRequired(message=\"Username Required\")])\n\tnumplayers = StringField('numplayers', validators=[InputRequired(message=\"Username Required\")])\n\t# room_code = StringField('room_code')\n\t# username = StringField('username')\n\tsubmit_button = SubmitField('Join Room')\n\nclass CreateUsernameForm(FlaskForm):\n\tusername = StringField('username', validators=[InputRequired(message=\"Username Required\")])\n\t# room_code = StringField('room_code')\n\t# username = StringField('username')\n\tsubmit_button = SubmitField('Join Room')\n\nclass RegistrationForm(FlaskForm):\n\t''' Registration form '''\n\n\tusername = StringField('username_label', validators=[InputRequired(message=\"Username required\"),\n\t\tLength(min=4, max=25, message=\"Username must be between 4 and 25 characters\")])\n\tpassword = PasswordField('password_label', validators=[InputRequired(message=\"Password required\"),\n\t\tLength(min=4, message=\"Password must be at least 4 characters\")])\n\tconfirm_pswd = PasswordField('confirm_pswd_label', validators=[InputRequired(message=\"Please retype your password\"),\n\t\tEqualTo('password', message=\"Passwords must match\")])\n\tsubmit_button = SubmitField('Click Here to Start')\n\n\tdef validate_username(self, username):\n\t\tuser_object = User.query.filter_by(username=username.data).first()\n\t\tif user_object:\n\t\t\traise ValidationError(\"Username already exists. Select a different username\")\n\n\nclass LoginForm(FlaskForm):\n\t''' Login form '''\n\n\tusername = StringField('username_label', validators=[InputRequired(message=\"Username required\")])\n\tpassword = PasswordField('password_label', validators=[InputRequired(message=\"Password required\"), invalid_credentials])\n\tsubmit_button = SubmitField('Login')\n",
"from flask_wtf import FlaskForm\nfrom wtforms import StringField, PasswordField, SubmitField\nfrom wtforms.validators import InputRequired, Length, EqualTo, ValidationError\nfrom passlib.hash import pbkdf2_sha256\nfrom models import User\n\n\ndef invalid_credentials(form, field):\n \"\"\" Username and password checker \"\"\"\n username_entered = form.username.data\n password_entered = field.data\n user_object = User.query.filter_by(username=username_entered).first()\n if user_object is None:\n raise ValidationError('Username or password is incorrect')\n elif not pbkdf2_sha256.verify(password_entered, user_object.password):\n raise ValidationError('Username or password is incorrect')\n\n\nclass JoinRoomForm(FlaskForm):\n room_code = StringField('room_code', validators=[InputRequired(message=\n 'Room Code Required')])\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateRoomForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n numplayers = StringField('numplayers', validators=[InputRequired(\n message='Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateUsernameForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n\n\ndef invalid_credentials(form, field):\n \"\"\" Username and password checker \"\"\"\n username_entered = form.username.data\n password_entered = field.data\n user_object = User.query.filter_by(username=username_entered).first()\n if user_object is None:\n raise ValidationError('Username or password is incorrect')\n elif not pbkdf2_sha256.verify(password_entered, user_object.password):\n raise ValidationError('Username or password is incorrect')\n\n\nclass JoinRoomForm(FlaskForm):\n room_code = StringField('room_code', validators=[InputRequired(message=\n 'Room Code Required')])\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateRoomForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n numplayers = StringField('numplayers', validators=[InputRequired(\n message='Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateUsernameForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n\n\nclass JoinRoomForm(FlaskForm):\n room_code = StringField('room_code', validators=[InputRequired(message=\n 'Room Code Required')])\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateRoomForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n numplayers = StringField('numplayers', validators=[InputRequired(\n message='Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateUsernameForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n\n\nclass JoinRoomForm(FlaskForm):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass CreateRoomForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n numplayers = StringField('numplayers', validators=[InputRequired(\n message='Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateUsernameForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n<class token>\n\n\nclass CreateRoomForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n numplayers = StringField('numplayers', validators=[InputRequired(\n message='Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass CreateUsernameForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n<class token>\n\n\nclass CreateRoomForm(FlaskForm):\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass CreateUsernameForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n<class token>\n<class token>\n\n\nclass CreateUsernameForm(FlaskForm):\n username = StringField('username', validators=[InputRequired(message=\n 'Username Required')])\n submit_button = SubmitField('Join Room')\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n<class token>\n<class token>\n\n\nclass CreateUsernameForm(FlaskForm):\n <assignment token>\n <assignment token>\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegistrationForm(FlaskForm):\n \"\"\" Registration form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegistrationForm(FlaskForm):\n <docstring token>\n username = StringField('username_label', validators=[InputRequired(\n message='Username required'), Length(min=4, max=25, message=\n 'Username must be between 4 and 25 characters')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), Length(min=4, message=\n 'Password must be at least 4 characters')])\n confirm_pswd = PasswordField('confirm_pswd_label', validators=[\n InputRequired(message='Please retype your password'), EqualTo(\n 'password', message='Passwords must match')])\n submit_button = SubmitField('Click Here to Start')\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegistrationForm(FlaskForm):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n def validate_username(self, username):\n user_object = User.query.filter_by(username=username.data).first()\n if user_object:\n raise ValidationError(\n 'Username already exists. Select a different username')\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n\n\nclass RegistrationForm(FlaskForm):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <function token>\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass LoginForm(FlaskForm):\n \"\"\" Login form \"\"\"\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass LoginForm(FlaskForm):\n <docstring token>\n username = StringField('username_label', validators=[InputRequired(\n message='Username required')])\n password = PasswordField('password_label', validators=[InputRequired(\n message='Password required'), invalid_credentials])\n submit_button = SubmitField('Login')\n",
"<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass LoginForm(FlaskForm):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<function token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
962 |
9abc5f18e2eb07afe6bc31d6bd27298350707d1d
|
"""
爱丽丝和鲍勃有不同大小的糖果棒:A[i] 是爱丽丝拥有的第 i 根糖果棒的大小,B[j] 是鲍勃拥有的第 j 根糖果棒的大小。
因为他们是朋友,所以他们想交换一根糖果棒,这样交换后,他们都有相同的糖果总量。(一个人拥有的糖果总量是他们拥有的糖果棒大小的总和。)
返回一个整数数组 ans,其中 ans[0] 是爱丽丝必须交换的糖果棒的大小,ans[1] 是 Bob 必须交换的糖果棒的大小。
如果有多个答案,你可以返回其中任何一个。保证答案存在。
"""
def fairCandySwap(A, B):
sumA, sumB = sum(A), sum(B)
setA, setB = set(A), set(B)
delta = (sumA -sumB) // 2
for j in setB:
if j + delta in setA:
return (j+delta, j)
print(fairCandySwap(A = [1,1], B = [2,2]))
print(fairCandySwap(A = [1,2], B = [2,3]))
print(fairCandySwap(A = [2], B = [1,3]))
print(fairCandySwap(A = [1,2,5], B = [2,4]))
|
[
"\"\"\"\n爱丽丝和鲍勃有不同大小的糖果棒:A[i] 是爱丽丝拥有的第 i 根糖果棒的大小,B[j] 是鲍勃拥有的第 j 根糖果棒的大小。\n\n因为他们是朋友,所以他们想交换一根糖果棒,这样交换后,他们都有相同的糖果总量。(一个人拥有的糖果总量是他们拥有的糖果棒大小的总和。)\n\n返回一个整数数组 ans,其中 ans[0] 是爱丽丝必须交换的糖果棒的大小,ans[1] 是 Bob 必须交换的糖果棒的大小。\n\n如果有多个答案,你可以返回其中任何一个。保证答案存在。\n\"\"\"\n\ndef fairCandySwap(A, B):\n sumA, sumB = sum(A), sum(B)\n setA, setB = set(A), set(B)\n delta = (sumA -sumB) // 2\n for j in setB:\n if j + delta in setA:\n return (j+delta, j)\n\nprint(fairCandySwap(A = [1,1], B = [2,2]))\nprint(fairCandySwap(A = [1,2], B = [2,3]))\nprint(fairCandySwap(A = [2], B = [1,3]))\nprint(fairCandySwap(A = [1,2,5], B = [2,4]))\n",
"<docstring token>\n\n\ndef fairCandySwap(A, B):\n sumA, sumB = sum(A), sum(B)\n setA, setB = set(A), set(B)\n delta = (sumA - sumB) // 2\n for j in setB:\n if j + delta in setA:\n return j + delta, j\n\n\nprint(fairCandySwap(A=[1, 1], B=[2, 2]))\nprint(fairCandySwap(A=[1, 2], B=[2, 3]))\nprint(fairCandySwap(A=[2], B=[1, 3]))\nprint(fairCandySwap(A=[1, 2, 5], B=[2, 4]))\n",
"<docstring token>\n\n\ndef fairCandySwap(A, B):\n sumA, sumB = sum(A), sum(B)\n setA, setB = set(A), set(B)\n delta = (sumA - sumB) // 2\n for j in setB:\n if j + delta in setA:\n return j + delta, j\n\n\n<code token>\n",
"<docstring token>\n<function token>\n<code token>\n"
] | false |
963 |
6f698196e9391d73bd99cda0a098a5bf7a3832ff
|
from turtle import *
while True:
n=input("Right or left? ")
if n == 'right':
right(60)
forward(100)
elif n == 'left':
left(60)
forward(100)
|
[
"from turtle import *\nwhile True:\n n=input(\"Right or left? \")\n\n if n == 'right':\n right(60)\n forward(100)\n elif n == 'left':\n left(60)\n forward(100)\n",
"from turtle import *\nwhile True:\n n = input('Right or left? ')\n if n == 'right':\n right(60)\n forward(100)\n elif n == 'left':\n left(60)\n forward(100)\n",
"<import token>\nwhile True:\n n = input('Right or left? ')\n if n == 'right':\n right(60)\n forward(100)\n elif n == 'left':\n left(60)\n forward(100)\n",
"<import token>\n<code token>\n"
] | false |
964 |
b4d412e8b45722a855a16dd64b7bce9b303d0ffe
|
from collections import defaultdict
class Graph:
def __init__(self):
self._graph = defaultdict(list)
self._odd_vertices = []
def add_vertex(self, v):
if not v in self._graph:
self._graph[v] = list()
def add_edge(self, v1, v2):
self._graph[v1].append(v2)
self._check_odd_vertex(v1)
self._graph[v2].append(v1)
self._check_odd_vertex(v2)
def _check_odd_vertex(self, v):
return len(self._graph[v]) % 2
def check_eulerian(self):
odd_deg = {v:e for (e, v) in self._graph.iteritems() if len(v) % 2 != 0}
return odd_deg or False
def check_semi_eulerian(self):
odd_deg = {v:e for (e, v) in self._graph.iteritems() if len(v) % 2 != 0}
return odd_deg in [0, 2] or False
|
[
"from collections import defaultdict\n\nclass Graph:\n def __init__(self):\n self._graph = defaultdict(list)\n self._odd_vertices = []\n\n def add_vertex(self, v):\n if not v in self._graph:\n self._graph[v] = list()\n\n def add_edge(self, v1, v2):\n self._graph[v1].append(v2)\n self._check_odd_vertex(v1)\n\n self._graph[v2].append(v1)\n self._check_odd_vertex(v2)\n\n def _check_odd_vertex(self, v):\n return len(self._graph[v]) % 2\n\n def check_eulerian(self):\n odd_deg = {v:e for (e, v) in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg or False\n\n def check_semi_eulerian(self):\n odd_deg = {v:e for (e, v) in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg in [0, 2] or False\n\n\n\n",
"from collections import defaultdict\n\n\nclass Graph:\n\n def __init__(self):\n self._graph = defaultdict(list)\n self._odd_vertices = []\n\n def add_vertex(self, v):\n if not v in self._graph:\n self._graph[v] = list()\n\n def add_edge(self, v1, v2):\n self._graph[v1].append(v2)\n self._check_odd_vertex(v1)\n self._graph[v2].append(v1)\n self._check_odd_vertex(v2)\n\n def _check_odd_vertex(self, v):\n return len(self._graph[v]) % 2\n\n def check_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg or False\n\n def check_semi_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg in [0, 2] or False\n",
"<import token>\n\n\nclass Graph:\n\n def __init__(self):\n self._graph = defaultdict(list)\n self._odd_vertices = []\n\n def add_vertex(self, v):\n if not v in self._graph:\n self._graph[v] = list()\n\n def add_edge(self, v1, v2):\n self._graph[v1].append(v2)\n self._check_odd_vertex(v1)\n self._graph[v2].append(v1)\n self._check_odd_vertex(v2)\n\n def _check_odd_vertex(self, v):\n return len(self._graph[v]) % 2\n\n def check_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg or False\n\n def check_semi_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg in [0, 2] or False\n",
"<import token>\n\n\nclass Graph:\n <function token>\n\n def add_vertex(self, v):\n if not v in self._graph:\n self._graph[v] = list()\n\n def add_edge(self, v1, v2):\n self._graph[v1].append(v2)\n self._check_odd_vertex(v1)\n self._graph[v2].append(v1)\n self._check_odd_vertex(v2)\n\n def _check_odd_vertex(self, v):\n return len(self._graph[v]) % 2\n\n def check_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg or False\n\n def check_semi_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg in [0, 2] or False\n",
"<import token>\n\n\nclass Graph:\n <function token>\n\n def add_vertex(self, v):\n if not v in self._graph:\n self._graph[v] = list()\n\n def add_edge(self, v1, v2):\n self._graph[v1].append(v2)\n self._check_odd_vertex(v1)\n self._graph[v2].append(v1)\n self._check_odd_vertex(v2)\n <function token>\n\n def check_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg or False\n\n def check_semi_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg in [0, 2] or False\n",
"<import token>\n\n\nclass Graph:\n <function token>\n <function token>\n\n def add_edge(self, v1, v2):\n self._graph[v1].append(v2)\n self._check_odd_vertex(v1)\n self._graph[v2].append(v1)\n self._check_odd_vertex(v2)\n <function token>\n\n def check_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg or False\n\n def check_semi_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg in [0, 2] or False\n",
"<import token>\n\n\nclass Graph:\n <function token>\n <function token>\n <function token>\n <function token>\n\n def check_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg or False\n\n def check_semi_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg in [0, 2] or False\n",
"<import token>\n\n\nclass Graph:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def check_semi_eulerian(self):\n odd_deg = {v: e for e, v in self._graph.iteritems() if len(v) % 2 != 0}\n return odd_deg in [0, 2] or False\n",
"<import token>\n\n\nclass Graph:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
965 |
bab78e8a88f9a26cc13fe0c301f82880cee2b680
|
from django.contrib import admin
from .models import Predictions
@admin.register(Predictions)
class PredictionsAdmin(admin.ModelAdmin):
pass
|
[
"from django.contrib import admin\nfrom .models import Predictions\n\n\[email protected](Predictions)\nclass PredictionsAdmin(admin.ModelAdmin):\n pass\n",
"<import token>\n\n\[email protected](Predictions)\nclass PredictionsAdmin(admin.ModelAdmin):\n pass\n",
"<import token>\n<class token>\n"
] | false |
966 |
fd450b5454b65ed69b411028788c587f9674760c
|
#!/usr/bin/env python
"""
Script that generates the photon efficiency curves and stores them in a root
file.
For the moment only the pT curves for the different eta bins are created
"""
import re
import json
import ROOT as r
r.PyConfig.IgnoreCommandLineOptions = True
import numpy as np
import sympy as sp
from utils.symbolic import func_cov
from utils.graph_utils import get_lower_band, get_upper_band
from common_func import get_name
# Covariance matrix from the fit integrated over the whole eta range, where
# alpha and beta were fixed. This will be used to calculate the correlation
# coefficients between the fitted parameters, which will then be used to get
# the uncertainty bands for the parametrization
COVARIANCE = np.array([
[1.181e-06, 1.545e-06, -4.328e-06, 4.156e-06],
[1.545e-06, 7.215e-06, -1.714e-05, 5.177e-06],
[-4.328e-06, -1.714e-05, 4.228e-05, -1.481e-05],
[4.156e-06, 5.177e-06, -1.481e-05, 1.506e-05],
])
# corr = diag(cov)^{-1/2} * cov * diag(cov)^{-1/2}
CORRELATIONS = np.matmul(
np.matmul(
np.diag(1/np.sqrt(np.diag(COVARIANCE))), COVARIANCE,
), np.diag(1/np.sqrt(np.diag(COVARIANCE)))
)
def eff_param_string():
"""
The parametrization of the efficiencies from AN-2015-11 as a string that can
be used in a TF1 constructor.
p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))
"""
return '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'
def eff_param():
"""
Get the parametrization as ROOT.TF1
"""
return r.TF1('photon_eff_param', eff_param_string(), 0, 7)
def eff_param_sym():
"""
Get the parametrization as sympy symbolic expression by doing some string
manipulation on the parametrization and then using sympy.sympify
"""
param_str = eff_param_string()
# replace call to ROOTs erf and give x[0] a parseable name
param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')
# convert parameters from [x] notation to px notation
param_str = re.sub(r'\[([0-9])\]', r'p\1', param_str)
# replace pow(x, y) with x**y (pythonic) syntax
param_str = re.sub(r'pow\((.*?)\s*?,\s*?([0-9])\)', r'\1**\2', param_str)
return sp.sympify(param_str)
def get_corr_subs_values(corr):
"""
Get the dictionary of substitution values for the correlation matrix
"""
subs_dict = {}
n_dim = corr.shape[0]
for irow in xrange(0, n_dim):
for icol in xrange(irow + 1, n_dim):
subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]
return subs_dict
def get_cov_func(params, corr):
"""
Get the uncertainty function where only pT is left as a free parameter.
This will return a python function that can be evaluated at any given point
"""
eff = eff_param_sym()
# get the list of free parameters
free_params = []
for sym in eff.free_symbols:
if sym.name in params and params[sym.name][1] != 0:
free_params.append(sym)
# sort the parameters according to their name, such that the correlation
# coefficients actually match
free_params.sort(key=lambda x: int(x.name.replace('p', '')))
cov_eff = func_cov(eff, free_params)
# build up the dictionary of symbol -> value that will be substituted.
# In the end the returned function will only have one free parameter left
subst_vals = {
p: v[0] for p, v in params.iteritems()
}
subst_vals.update({
'sigma_' + p: v[1] for p, v in params.iteritems()
})
subst_vals.update(
get_corr_subs_values(corr)
)
# NOTE: here it is assumed that 'x' is the only free parameter left
return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))
def get_graph_err(params, corr, n_sigma=1.0, n_points=100):
"""
Get the function evaluated at n_points with uncertainties taking into
account correlations between the parameters
"""
# central function
eff_f = eff_param_sym()
eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})
# NOTE: assume that 'x' is the only free parameter left
eff_f = sp.lambdify(sp.symbols('x'), eff_f)
# uncertainty function (as function of pT)
var_f = get_cov_func(params, corr)
x_bins = np.linspace(0.4, 7, n_points + 1)
x_cent = 0.5 * (x_bins[1:] + x_bins[:-1]) # bin centers
x_err = np.diff(x_bins) # "uncertainties" in x
y_cent = np.array([eff_f(x) for x in x_cent])
y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma
return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)
def set_params_errors(func, *params):
"""
Set all the parameters as pairs of value and uncertainty (in the order they)
are in the params list. If uncertainty = 0, the parameter is fixed
"""
central = np.array([p[0] for p in params])
uncer = np.array([p[1] for p in params])
func.SetParameters(central)
func.SetParErrors(uncer)
for idx, err in enumerate(uncer):
if err == 0:
func.FixParameter(idx, func.GetParameter(idx))
def load_params(param_file):
"""
Load the parameter file and return the list of dicts stored in it
"""
with open(param_file, 'r') as pfile:
eff_params = json.load(pfile)
return eff_params
def create_param(params, sigma_shift, uncorrelated):
"""
Create the function from the passed params and give it an appropriate name
"""
# if the central results are desired. Use the exact parametrization as TF1
if sigma_shift == 0:
func = eff_param()
set_params_errors(func, params["p0"], params["p1"], params["p2"],
params["p3"], params["alpha"], params["beta"])
func.SetName(get_name(params["eta"], 'photon_eff_pt'))
return func
# else get an aproximation by evaluating the function at a given number of
# points and determine the uncertainties at these points, then store the
# points as a TGraph where the y-values are the central + uncertainty values
# at each evaluation point
# NOTE: Since eff_param_sym names alpha and beta p4 and p5 respectively
# (can't use beta in an expression that goes through sympy.sympify), we have
# to clone them here. We can leave the original values in, since they will
# not be picked up by the substitution command
params['p4'] = params['alpha']
params['p5'] = params['beta']
# use the global correlation matrix or an identity matrix if uncorrelated
# parameters are desired
corr = np.identity(4) if uncorrelated else CORRELATIONS
graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)
if sigma_shift < 0:
graph = get_lower_band(graph)
else:
graph = get_upper_band(graph)
graph.SetName(get_name(params['eta'], 'photon_eff_pt'))
return graph
def main(args):
"""Main"""
file_option = 'update' if args.update else 'recreate'
outfile = r.TFile.Open(args.outfile, file_option)
all_params = load_params(args.paramfile)
for params in all_params:
eff = create_param(params, args.sigma, args.uncorrelated)
eff.Write()
outfile.Close()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='script to generate TF1 '
'photon efficiency parametrizations from '
'json file holding the fit parameters')
parser.add_argument('paramfile', help='json file containing the fitted '
'parameters')
parser.add_argument('-o', '--outfile', help='root file into which the TF1 '
'should be stored', default='photon_effs_param.root')
parser.add_argument('-u', '--update', help='update the output file instead '
'of recreating it', default=False, action='store_true')
parser.add_argument('-s', '--sigma', help='Use the central value + [sigma] '
'* uncertainty for each parameter', type=float,
default=0)
parser.add_argument('--uncorrelated', default=False, action='store_true',
help='Assume that the free parameters are uncorrelated '
'instead of using correlation parameters from a global '
'fit')
clargs = parser.parse_args()
main(clargs)
|
[
"#!/usr/bin/env python\n\"\"\"\nScript that generates the photon efficiency curves and stores them in a root\nfile.\n\nFor the moment only the pT curves for the different eta bins are created\n\"\"\"\n\nimport re\nimport json\nimport ROOT as r\nr.PyConfig.IgnoreCommandLineOptions = True\n\nimport numpy as np\nimport sympy as sp\n\nfrom utils.symbolic import func_cov\nfrom utils.graph_utils import get_lower_band, get_upper_band\n\nfrom common_func import get_name\n\n# Covariance matrix from the fit integrated over the whole eta range, where\n# alpha and beta were fixed. This will be used to calculate the correlation\n# coefficients between the fitted parameters, which will then be used to get\n# the uncertainty bands for the parametrization\nCOVARIANCE = np.array([\n [1.181e-06, 1.545e-06, -4.328e-06, 4.156e-06],\n [1.545e-06, 7.215e-06, -1.714e-05, 5.177e-06],\n [-4.328e-06, -1.714e-05, 4.228e-05, -1.481e-05],\n [4.156e-06, 5.177e-06, -1.481e-05, 1.506e-05],\n])\n\n# corr = diag(cov)^{-1/2} * cov * diag(cov)^{-1/2}\nCORRELATIONS = np.matmul(\n np.matmul(\n np.diag(1/np.sqrt(np.diag(COVARIANCE))), COVARIANCE,\n ), np.diag(1/np.sqrt(np.diag(COVARIANCE)))\n)\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n # replace call to ROOTs erf and give x[0] a parseable name\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n # convert parameters from [x] notation to px notation\n param_str = re.sub(r'\\[([0-9])\\]', r'p\\1', param_str)\n # replace pow(x, y) with x**y (pythonic) syntax\n param_str = re.sub(r'pow\\((.*?)\\s*?,\\s*?([0-9])\\)', r'\\1**\\2', param_str)\n\n return sp.sympify(param_str)\n\n\ndef get_corr_subs_values(corr):\n \"\"\"\n Get the dictionary of substitution values for the correlation matrix\n \"\"\"\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n\n return subs_dict\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n # get the list of free parameters\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n\n # sort the parameters according to their name, such that the correlation\n # coefficients actually match\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n\n cov_eff = func_cov(eff, free_params)\n\n # build up the dictionary of symbol -> value that will be substituted.\n # In the end the returned function will only have one free parameter left\n subst_vals = {\n p: v[0] for p, v in params.iteritems()\n }\n subst_vals.update({\n 'sigma_' + p: v[1] for p, v in params.iteritems()\n })\n subst_vals.update(\n get_corr_subs_values(corr)\n )\n\n # NOTE: here it is assumed that 'x' is the only free parameter left\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n # central function\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n # NOTE: assume that 'x' is the only free parameter left\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n\n # uncertainty function (as function of pT)\n var_f = get_cov_func(params, corr)\n\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1]) # bin centers\n x_err = np.diff(x_bins) # \"uncertainties\" in x\n\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n\n func.SetParameters(central)\n func.SetParErrors(uncer)\n\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n # if the central results are desired. Use the exact parametrization as TF1\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params[\"p0\"], params[\"p1\"], params[\"p2\"],\n params[\"p3\"], params[\"alpha\"], params[\"beta\"])\n\n func.SetName(get_name(params[\"eta\"], 'photon_eff_pt'))\n return func\n\n # else get an aproximation by evaluating the function at a given number of\n # points and determine the uncertainties at these points, then store the\n # points as a TGraph where the y-values are the central + uncertainty values\n # at each evaluation point\n\n # NOTE: Since eff_param_sym names alpha and beta p4 and p5 respectively\n # (can't use beta in an expression that goes through sympy.sympify), we have\n # to clone them here. We can leave the original values in, since they will\n # not be picked up by the substitution command\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n\n # use the global correlation matrix or an identity matrix if uncorrelated\n # parameters are desired\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n\n outfile.Close()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description='script to generate TF1 '\n 'photon efficiency parametrizations from '\n 'json file holding the fit parameters')\n parser.add_argument('paramfile', help='json file containing the fitted '\n 'parameters')\n parser.add_argument('-o', '--outfile', help='root file into which the TF1 '\n 'should be stored', default='photon_effs_param.root')\n parser.add_argument('-u', '--update', help='update the output file instead '\n 'of recreating it', default=False, action='store_true')\n parser.add_argument('-s', '--sigma', help='Use the central value + [sigma] '\n '* uncertainty for each parameter', type=float,\n default=0)\n parser.add_argument('--uncorrelated', default=False, action='store_true',\n help='Assume that the free parameters are uncorrelated '\n 'instead of using correlation parameters from a global '\n 'fit')\n\n clargs = parser.parse_args()\n main(clargs)\n",
"<docstring token>\nimport re\nimport json\nimport ROOT as r\nr.PyConfig.IgnoreCommandLineOptions = True\nimport numpy as np\nimport sympy as sp\nfrom utils.symbolic import func_cov\nfrom utils.graph_utils import get_lower_band, get_upper_band\nfrom common_func import get_name\nCOVARIANCE = np.array([[1.181e-06, 1.545e-06, -4.328e-06, 4.156e-06], [\n 1.545e-06, 7.215e-06, -1.714e-05, 5.177e-06], [-4.328e-06, -1.714e-05, \n 4.228e-05, -1.481e-05], [4.156e-06, 5.177e-06, -1.481e-05, 1.506e-05]])\nCORRELATIONS = np.matmul(np.matmul(np.diag(1 / np.sqrt(np.diag(COVARIANCE))\n ), COVARIANCE), np.diag(1 / np.sqrt(np.diag(COVARIANCE))))\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\ndef get_corr_subs_values(corr):\n \"\"\"\n Get the dictionary of substitution values for the correlation matrix\n \"\"\"\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n return subs_dict\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n var_f = get_cov_func(params, corr)\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])\n x_err = np.diff(x_bins)\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\n 'script to generate TF1 photon efficiency parametrizations from json file holding the fit parameters'\n )\n parser.add_argument('paramfile', help=\n 'json file containing the fitted parameters')\n parser.add_argument('-o', '--outfile', help=\n 'root file into which the TF1 should be stored', default=\n 'photon_effs_param.root')\n parser.add_argument('-u', '--update', help=\n 'update the output file instead of recreating it', default=False,\n action='store_true')\n parser.add_argument('-s', '--sigma', help=\n 'Use the central value + [sigma] * uncertainty for each parameter',\n type=float, default=0)\n parser.add_argument('--uncorrelated', default=False, action=\n 'store_true', help=\n 'Assume that the free parameters are uncorrelated instead of using correlation parameters from a global fit'\n )\n clargs = parser.parse_args()\n main(clargs)\n",
"<docstring token>\n<import token>\nr.PyConfig.IgnoreCommandLineOptions = True\n<import token>\nCOVARIANCE = np.array([[1.181e-06, 1.545e-06, -4.328e-06, 4.156e-06], [\n 1.545e-06, 7.215e-06, -1.714e-05, 5.177e-06], [-4.328e-06, -1.714e-05, \n 4.228e-05, -1.481e-05], [4.156e-06, 5.177e-06, -1.481e-05, 1.506e-05]])\nCORRELATIONS = np.matmul(np.matmul(np.diag(1 / np.sqrt(np.diag(COVARIANCE))\n ), COVARIANCE), np.diag(1 / np.sqrt(np.diag(COVARIANCE))))\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\ndef get_corr_subs_values(corr):\n \"\"\"\n Get the dictionary of substitution values for the correlation matrix\n \"\"\"\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n return subs_dict\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n var_f = get_cov_func(params, corr)\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])\n x_err = np.diff(x_bins)\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\n 'script to generate TF1 photon efficiency parametrizations from json file holding the fit parameters'\n )\n parser.add_argument('paramfile', help=\n 'json file containing the fitted parameters')\n parser.add_argument('-o', '--outfile', help=\n 'root file into which the TF1 should be stored', default=\n 'photon_effs_param.root')\n parser.add_argument('-u', '--update', help=\n 'update the output file instead of recreating it', default=False,\n action='store_true')\n parser.add_argument('-s', '--sigma', help=\n 'Use the central value + [sigma] * uncertainty for each parameter',\n type=float, default=0)\n parser.add_argument('--uncorrelated', default=False, action=\n 'store_true', help=\n 'Assume that the free parameters are uncorrelated instead of using correlation parameters from a global fit'\n )\n clargs = parser.parse_args()\n main(clargs)\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\ndef get_corr_subs_values(corr):\n \"\"\"\n Get the dictionary of substitution values for the correlation matrix\n \"\"\"\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n return subs_dict\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n var_f = get_cov_func(params, corr)\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])\n x_err = np.diff(x_bins)\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser(description=\n 'script to generate TF1 photon efficiency parametrizations from json file holding the fit parameters'\n )\n parser.add_argument('paramfile', help=\n 'json file containing the fitted parameters')\n parser.add_argument('-o', '--outfile', help=\n 'root file into which the TF1 should be stored', default=\n 'photon_effs_param.root')\n parser.add_argument('-u', '--update', help=\n 'update the output file instead of recreating it', default=False,\n action='store_true')\n parser.add_argument('-s', '--sigma', help=\n 'Use the central value + [sigma] * uncertainty for each parameter',\n type=float, default=0)\n parser.add_argument('--uncorrelated', default=False, action=\n 'store_true', help=\n 'Assume that the free parameters are uncorrelated instead of using correlation parameters from a global fit'\n )\n clargs = parser.parse_args()\n main(clargs)\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\ndef get_corr_subs_values(corr):\n \"\"\"\n Get the dictionary of substitution values for the correlation matrix\n \"\"\"\n subs_dict = {}\n n_dim = corr.shape[0]\n for irow in xrange(0, n_dim):\n for icol in xrange(irow + 1, n_dim):\n subs_dict['rho_p{}p{}'.format(irow, icol)] = corr[irow, icol]\n return subs_dict\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n var_f = get_cov_func(params, corr)\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])\n x_err = np.diff(x_bins)\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\n<function token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\ndef get_graph_err(params, corr, n_sigma=1.0, n_points=100):\n \"\"\"\n Get the function evaluated at n_points with uncertainties taking into\n account correlations between the parameters\n \"\"\"\n eff_f = eff_param_sym()\n eff_f = eff_f.subs({p: v[0] for p, v in params.iteritems()})\n eff_f = sp.lambdify(sp.symbols('x'), eff_f)\n var_f = get_cov_func(params, corr)\n x_bins = np.linspace(0.4, 7, n_points + 1)\n x_cent = 0.5 * (x_bins[1:] + x_bins[:-1])\n x_err = np.diff(x_bins)\n y_cent = np.array([eff_f(x) for x in x_cent])\n y_err = np.sqrt(np.array([var_f(x) for x in x_cent])) * n_sigma\n return r.TGraphErrors(len(x_cent), x_cent, y_cent, x_err, y_err)\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n\n\ndef eff_param_string():\n \"\"\"\n The parametrization of the efficiencies from AN-2015-11 as a string that can\n be used in a TF1 constructor.\n\n p0 * (1 - p1 * (Erf(pT + p2) - p1 / alpha * (pT - p3 * (pT^2 - p3 / beta * pT^3))))\n \"\"\"\n return (\n '[0] * (1 - [1] * (TMath::Erf(x[0] + [2]) - [1] / [4] * (x[0] - [3] * (pow(x[0], 2) - [3] / [5] * pow(x[0], 3)))))'\n )\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\n<function token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\n<function token>\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\n<function token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\n<function token>\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\ndef load_params(param_file):\n \"\"\"\n Load the parameter file and return the list of dicts stored in it\n \"\"\"\n with open(param_file, 'r') as pfile:\n eff_params = json.load(pfile)\n return eff_params\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\n<function token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\n<function token>\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\n<function token>\n\n\ndef create_param(params, sigma_shift, uncorrelated):\n \"\"\"\n Create the function from the passed params and give it an appropriate name\n \"\"\"\n if sigma_shift == 0:\n func = eff_param()\n set_params_errors(func, params['p0'], params['p1'], params['p2'],\n params['p3'], params['alpha'], params['beta'])\n func.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return func\n params['p4'] = params['alpha']\n params['p5'] = params['beta']\n corr = np.identity(4) if uncorrelated else CORRELATIONS\n graph = get_graph_err(params, corr, np.abs(sigma_shift), 200)\n if sigma_shift < 0:\n graph = get_lower_band(graph)\n else:\n graph = get_upper_band(graph)\n graph.SetName(get_name(params['eta'], 'photon_eff_pt'))\n return graph\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\ndef eff_param_sym():\n \"\"\"\n Get the parametrization as sympy symbolic expression by doing some string\n manipulation on the parametrization and then using sympy.sympify\n \"\"\"\n param_str = eff_param_string()\n param_str = param_str.replace('TMath::Erf', 'erf').replace('x[0]', 'x')\n param_str = re.sub('\\\\[([0-9])\\\\]', 'p\\\\1', param_str)\n param_str = re.sub('pow\\\\((.*?)\\\\s*?,\\\\s*?([0-9])\\\\)', '\\\\1**\\\\2',\n param_str)\n return sp.sympify(param_str)\n\n\n<function token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\n<function token>\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\n<function token>\n<function token>\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\n<function token>\n<function token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\n<function token>\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\n<function token>\n<function token>\n\n\ndef main(args):\n \"\"\"Main\"\"\"\n file_option = 'update' if args.update else 'recreate'\n outfile = r.TFile.Open(args.outfile, file_option)\n all_params = load_params(args.paramfile)\n for params in all_params:\n eff = create_param(params, args.sigma, args.uncorrelated)\n eff.Write()\n outfile.Close()\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n\n\ndef eff_param():\n \"\"\"\n Get the parametrization as ROOT.TF1\n \"\"\"\n return r.TF1('photon_eff_param', eff_param_string(), 0, 7)\n\n\n<function token>\n<function token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\n<function token>\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\n<function token>\n\n\ndef set_params_errors(func, *params):\n \"\"\"\n Set all the parameters as pairs of value and uncertainty (in the order they)\n are in the params list. If uncertainty = 0, the parameter is fixed\n \"\"\"\n central = np.array([p[0] for p in params])\n uncer = np.array([p[1] for p in params])\n func.SetParameters(central)\n func.SetParErrors(uncer)\n for idx, err in enumerate(uncer):\n if err == 0:\n func.FixParameter(idx, func.GetParameter(idx))\n\n\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_cov_func(params, corr):\n \"\"\"\n Get the uncertainty function where only pT is left as a free parameter.\n\n This will return a python function that can be evaluated at any given point\n \"\"\"\n eff = eff_param_sym()\n free_params = []\n for sym in eff.free_symbols:\n if sym.name in params and params[sym.name][1] != 0:\n free_params.append(sym)\n free_params.sort(key=lambda x: int(x.name.replace('p', '')))\n cov_eff = func_cov(eff, free_params)\n subst_vals = {p: v[0] for p, v in params.iteritems()}\n subst_vals.update({('sigma_' + p): v[1] for p, v in params.iteritems()})\n subst_vals.update(get_corr_subs_values(corr))\n return sp.lambdify(sp.symbols('x'), cov_eff.subs(subst_vals))\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<import token>\n<assignment token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
967 |
176ffac7ad47f5c43a24acc664631f8353ec5100
|
import matplotlib.pyplot as plt
import numpy as np
steps = 10
num_tests = 100
res = []
with open('txt.txt', 'r') as f:
data = f.readlines()
line = 0
for i in range(10, 110, 10):
agg = 0
for j in range(num_tests):
agg += int(data[line])
line += 1
res.append(agg/num_tests)
x = list(range(10, 110, steps))
y = res
z = np.polyfit(x, res, 2)
# print(z)
p = np.poly1d(z)
plt.plot(x, y, 'o')
plt.plot(x, p(x),label = "Best fit 2 degree polynomial")
plt.title("#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)")
plt.xlabel("Number of nodes in fully connected graph")
plt.ylabel("Number of messages")
plt.legend()
# plt.show()
plt.savefig("Messages.svg")
plt.clf()
steps = 10
num_tests = 10
res = []
with open('txt2.txt', 'r') as f:
data = f.readlines()
line = 0
for procs in range(1,13):
times = []
for i in range(10, 110, 10):
temp = 0
for num in range(num_tests):
temp += float(data[line].split()[1])
line += 3
times.append(temp/num_tests)
res.append(times)
x = list(range(10, 110, steps))
y = res
# z = np.polyfit(x, res, 2)
# print(z)
# p = np.poly1d(z)
# plt.plot(x, y, 'o')
# plt.plot(x, p(x),label = "Best fit 2 degree polynomial")
plt.title("Time taken vs. number of cores used (Averaged over 10 runs)")
plt.xlabel("Number of nodes in fully connected graph")
plt.ylabel("Time taken (in seconds)")
# for procs in range(1,13):
for procs in [1,2,4,8,12]:
plt.plot(x,res[procs-1],label = str((procs))+' Cores')
plt.legend()
# plt.show()
plt.savefig("Time.svg")
|
[
"import matplotlib.pyplot as plt\nimport numpy as np\n\nsteps = 10\nnum_tests = 100\n\nres = []\n\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg/num_tests)\n\nx = list(range(10, 110, steps))\ny = res\n\nz = np.polyfit(x, res, 2)\n# print(z)\np = np.poly1d(z)\nplt.plot(x, y, 'o')\nplt.plot(x, p(x),label = \"Best fit 2 degree polynomial\")\n\nplt.title(\"#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)\")\nplt.xlabel(\"Number of nodes in fully connected graph\")\nplt.ylabel(\"Number of messages\")\n\nplt.legend()\n# plt.show()\n\nplt.savefig(\"Messages.svg\")\n\nplt.clf()\nsteps = 10\nnum_tests = 10\n\nres = []\n\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1,13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp/num_tests)\n res.append(times)\n\nx = list(range(10, 110, steps))\ny = res\n\n# z = np.polyfit(x, res, 2)\n# print(z)\n# p = np.poly1d(z)\n# plt.plot(x, y, 'o')\n# plt.plot(x, p(x),label = \"Best fit 2 degree polynomial\")\n\nplt.title(\"Time taken vs. number of cores used (Averaged over 10 runs)\")\nplt.xlabel(\"Number of nodes in fully connected graph\")\nplt.ylabel(\"Time taken (in seconds)\")\n\n# for procs in range(1,13):\nfor procs in [1,2,4,8,12]:\n plt.plot(x,res[procs-1],label = str((procs))+' Cores')\n\nplt.legend()\n# plt.show()\n\nplt.savefig(\"Time.svg\")\n",
"import matplotlib.pyplot as plt\nimport numpy as np\nsteps = 10\nnum_tests = 100\nres = []\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg / num_tests)\nx = list(range(10, 110, steps))\ny = res\nz = np.polyfit(x, res, 2)\np = np.poly1d(z)\nplt.plot(x, y, 'o')\nplt.plot(x, p(x), label='Best fit 2 degree polynomial')\nplt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Number of messages')\nplt.legend()\nplt.savefig('Messages.svg')\nplt.clf()\nsteps = 10\nnum_tests = 10\nres = []\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1, 13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp / num_tests)\n res.append(times)\nx = list(range(10, 110, steps))\ny = res\nplt.title('Time taken vs. number of cores used (Averaged over 10 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Time taken (in seconds)')\nfor procs in [1, 2, 4, 8, 12]:\n plt.plot(x, res[procs - 1], label=str(procs) + ' Cores')\nplt.legend()\nplt.savefig('Time.svg')\n",
"<import token>\nsteps = 10\nnum_tests = 100\nres = []\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg / num_tests)\nx = list(range(10, 110, steps))\ny = res\nz = np.polyfit(x, res, 2)\np = np.poly1d(z)\nplt.plot(x, y, 'o')\nplt.plot(x, p(x), label='Best fit 2 degree polynomial')\nplt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Number of messages')\nplt.legend()\nplt.savefig('Messages.svg')\nplt.clf()\nsteps = 10\nnum_tests = 10\nres = []\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1, 13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp / num_tests)\n res.append(times)\nx = list(range(10, 110, steps))\ny = res\nplt.title('Time taken vs. number of cores used (Averaged over 10 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Time taken (in seconds)')\nfor procs in [1, 2, 4, 8, 12]:\n plt.plot(x, res[procs - 1], label=str(procs) + ' Cores')\nplt.legend()\nplt.savefig('Time.svg')\n",
"<import token>\n<assignment token>\nwith open('txt.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for i in range(10, 110, 10):\n agg = 0\n for j in range(num_tests):\n agg += int(data[line])\n line += 1\n res.append(agg / num_tests)\n<assignment token>\nplt.plot(x, y, 'o')\nplt.plot(x, p(x), label='Best fit 2 degree polynomial')\nplt.title('#messages vs. #nodes in graph (GHS algo.) (Averaged over 100 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Number of messages')\nplt.legend()\nplt.savefig('Messages.svg')\nplt.clf()\n<assignment token>\nwith open('txt2.txt', 'r') as f:\n data = f.readlines()\n line = 0\n for procs in range(1, 13):\n times = []\n for i in range(10, 110, 10):\n temp = 0\n for num in range(num_tests):\n temp += float(data[line].split()[1])\n line += 3\n times.append(temp / num_tests)\n res.append(times)\n<assignment token>\nplt.title('Time taken vs. number of cores used (Averaged over 10 runs)')\nplt.xlabel('Number of nodes in fully connected graph')\nplt.ylabel('Time taken (in seconds)')\nfor procs in [1, 2, 4, 8, 12]:\n plt.plot(x, res[procs - 1], label=str(procs) + ' Cores')\nplt.legend()\nplt.savefig('Time.svg')\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
968 |
b3d26d01d45c073192d06c8e94c06f7eae267b14
|
old_file = open("new.csv", "r")
new_file = open("new1,csv", "w")
for line in old_file.readlines():
cleaned_line =line.replace(',','.')
new_file.write(cleaned_line)
old_file.close
new_file.close
|
[
"old_file = open(\"new.csv\", \"r\")\nnew_file = open(\"new1,csv\", \"w\")\nfor line in old_file.readlines():\n cleaned_line =line.replace(',','.')\n new_file.write(cleaned_line)\nold_file.close\nnew_file.close",
"old_file = open('new.csv', 'r')\nnew_file = open('new1,csv', 'w')\nfor line in old_file.readlines():\n cleaned_line = line.replace(',', '.')\n new_file.write(cleaned_line)\nold_file.close\nnew_file.close\n",
"<assignment token>\nfor line in old_file.readlines():\n cleaned_line = line.replace(',', '.')\n new_file.write(cleaned_line)\nold_file.close\nnew_file.close\n",
"<assignment token>\n<code token>\n"
] | false |
969 |
aa13278a4686e9bab7948c2f212f87f9bd6eee00
|
import socket
END = bytearray()
END.append(255)
print(END[0])
def recvall(sock): # Odbiór danych
BUFF_SIZE = 4096 # 4 KiB
data = b''
while True: # odbieramy dane, pakiety 4KiB
part = sock.recv(BUFF_SIZE)
data += part
if len(part) < BUFF_SIZE:
# 0 lub koniec danych
break
return data
def create_dict(data): # Odczytuje otrzymany słownik
dict = {}
i = 0
while True:
dict[chr(data[i])] = ''
j = 1
while data[i + j] != END[0]: # Dopóki nie znajdzie FF, uznaje bajty za 'kod' slowa
dict[chr(data[i])] += str(chr(data[i + j]))
j += 1
i += 1 + j
if data[i] == END[0] and data[i + 1] == END[0]: # Gdy znajdzie 3x FF, kończy słownik
break
return dict
def extract_start(data): # Poszukuje pącztka segmentu danych
i = 0
while True:
if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0]:
return i + 3
i += 1
def bytes_to_bits(data, begin): # Zamienia bajty na znakowy odpowiednik w bitach
bits = ''
for i in range(begin, len(data)):
bits += format(data[i], "08b")
return bits
def data_to_extract(data, dict): # Otrzymane dane na podstawie slownika odczytuje do tekstu
begin = extract_start(data) # Szukamy początku tekstu
print(begin)
data = bytes_to_bits(data, begin)
dict = {y: x for x, y in dict.items()} # Zamiana kluczy z wartością w słowniku
text = ''
temp_code = ''
for i in range(len(data)): # Dla kazdego bitu
temp_code += data[i]
if temp_code in dict: # Szukamy czy utworzona tymczasowo zmienna nie zawiera się
# w słowniku
text += dict[temp_code]
temp_code = ''
return text
def recieve_data(codedpath, decodedpath, ip, port):
port = int(port) #Segment odpowiedzialny za utworzenie połaczenia przy użyciu gniazda
sock = socket.socket()
sock.bind((ip, int(port)))
sock.listen()
conn, addr = sock.accept()
print('Połączono:', addr)
rec_data = recvall(conn) #Odbierz dane
rec_dict = create_dict(rec_data) #Utwórz słownik z danych
extracted = data_to_extract(rec_data, rec_dict) #Na podstawie słownika, odkoduj tekst
print("ODEBRANY SLOWNIK\n")
print(rec_dict)
print(extracted)
f = open(codedpath, "wb") #Zapis otrzymanych danych
f.write(rec_data)
f.close()
f = open(decodedpath, "w")
f.write(extracted)
f.close()
return 0
|
[
"import socket\n\nEND = bytearray()\nEND.append(255)\nprint(END[0])\n\n\ndef recvall(sock): # Odbiór danych\n BUFF_SIZE = 4096 # 4 KiB\n data = b''\n while True: # odbieramy dane, pakiety 4KiB\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n # 0 lub koniec danych\n break\n return data\n\n\ndef create_dict(data): # Odczytuje otrzymany słownik\n dict = {}\n i = 0\n while True:\n dict[chr(data[i])] = ''\n j = 1\n while data[i + j] != END[0]: # Dopóki nie znajdzie FF, uznaje bajty za 'kod' slowa\n dict[chr(data[i])] += str(chr(data[i + j]))\n j += 1\n\n i += 1 + j\n if data[i] == END[0] and data[i + 1] == END[0]: # Gdy znajdzie 3x FF, kończy słownik\n break\n return dict\n\n\ndef extract_start(data): # Poszukuje pącztka segmentu danych\n i = 0\n while True:\n if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0]:\n return i + 3\n i += 1\n\n\ndef bytes_to_bits(data, begin): # Zamienia bajty na znakowy odpowiednik w bitach\n bits = ''\n for i in range(begin, len(data)):\n bits += format(data[i], \"08b\")\n return bits\n\n\ndef data_to_extract(data, dict): # Otrzymane dane na podstawie slownika odczytuje do tekstu\n begin = extract_start(data) # Szukamy początku tekstu\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()} # Zamiana kluczy z wartością w słowniku\n text = ''\n temp_code = ''\n for i in range(len(data)): # Dla kazdego bitu\n temp_code += data[i]\n if temp_code in dict: # Szukamy czy utworzona tymczasowo zmienna nie zawiera się\n # w słowniku\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port) #Segment odpowiedzialny za utworzenie połaczenia przy użyciu gniazda\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn) #Odbierz dane\n rec_dict = create_dict(rec_data) #Utwórz słownik z danych\n extracted = data_to_extract(rec_data, rec_dict) #Na podstawie słownika, odkoduj tekst\n\n print(\"ODEBRANY SLOWNIK\\n\")\n print(rec_dict)\n print(extracted)\n\n f = open(codedpath, \"wb\") #Zapis otrzymanych danych\n f.write(rec_data)\n f.close()\n f = open(decodedpath, \"w\")\n f.write(extracted)\n f.close()\n return 0\n",
"import socket\nEND = bytearray()\nEND.append(255)\nprint(END[0])\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\ndef create_dict(data):\n dict = {}\n i = 0\n while True:\n dict[chr(data[i])] = ''\n j = 1\n while data[i + j] != END[0]:\n dict[chr(data[i])] += str(chr(data[i + j]))\n j += 1\n i += 1 + j\n if data[i] == END[0] and data[i + 1] == END[0]:\n break\n return dict\n\n\ndef extract_start(data):\n i = 0\n while True:\n if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0\n ]:\n return i + 3\n i += 1\n\n\ndef bytes_to_bits(data, begin):\n bits = ''\n for i in range(begin, len(data)):\n bits += format(data[i], '08b')\n return bits\n\n\ndef data_to_extract(data, dict):\n begin = extract_start(data)\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()}\n text = ''\n temp_code = ''\n for i in range(len(data)):\n temp_code += data[i]\n if temp_code in dict:\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n",
"<import token>\nEND = bytearray()\nEND.append(255)\nprint(END[0])\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\ndef create_dict(data):\n dict = {}\n i = 0\n while True:\n dict[chr(data[i])] = ''\n j = 1\n while data[i + j] != END[0]:\n dict[chr(data[i])] += str(chr(data[i + j]))\n j += 1\n i += 1 + j\n if data[i] == END[0] and data[i + 1] == END[0]:\n break\n return dict\n\n\ndef extract_start(data):\n i = 0\n while True:\n if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0\n ]:\n return i + 3\n i += 1\n\n\ndef bytes_to_bits(data, begin):\n bits = ''\n for i in range(begin, len(data)):\n bits += format(data[i], '08b')\n return bits\n\n\ndef data_to_extract(data, dict):\n begin = extract_start(data)\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()}\n text = ''\n temp_code = ''\n for i in range(len(data)):\n temp_code += data[i]\n if temp_code in dict:\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n",
"<import token>\n<assignment token>\nEND.append(255)\nprint(END[0])\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\ndef create_dict(data):\n dict = {}\n i = 0\n while True:\n dict[chr(data[i])] = ''\n j = 1\n while data[i + j] != END[0]:\n dict[chr(data[i])] += str(chr(data[i + j]))\n j += 1\n i += 1 + j\n if data[i] == END[0] and data[i + 1] == END[0]:\n break\n return dict\n\n\ndef extract_start(data):\n i = 0\n while True:\n if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0\n ]:\n return i + 3\n i += 1\n\n\ndef bytes_to_bits(data, begin):\n bits = ''\n for i in range(begin, len(data)):\n bits += format(data[i], '08b')\n return bits\n\n\ndef data_to_extract(data, dict):\n begin = extract_start(data)\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()}\n text = ''\n temp_code = ''\n for i in range(len(data)):\n temp_code += data[i]\n if temp_code in dict:\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\ndef create_dict(data):\n dict = {}\n i = 0\n while True:\n dict[chr(data[i])] = ''\n j = 1\n while data[i + j] != END[0]:\n dict[chr(data[i])] += str(chr(data[i + j]))\n j += 1\n i += 1 + j\n if data[i] == END[0] and data[i + 1] == END[0]:\n break\n return dict\n\n\ndef extract_start(data):\n i = 0\n while True:\n if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0\n ]:\n return i + 3\n i += 1\n\n\ndef bytes_to_bits(data, begin):\n bits = ''\n for i in range(begin, len(data)):\n bits += format(data[i], '08b')\n return bits\n\n\ndef data_to_extract(data, dict):\n begin = extract_start(data)\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()}\n text = ''\n temp_code = ''\n for i in range(len(data)):\n temp_code += data[i]\n if temp_code in dict:\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\ndef create_dict(data):\n dict = {}\n i = 0\n while True:\n dict[chr(data[i])] = ''\n j = 1\n while data[i + j] != END[0]:\n dict[chr(data[i])] += str(chr(data[i + j]))\n j += 1\n i += 1 + j\n if data[i] == END[0] and data[i + 1] == END[0]:\n break\n return dict\n\n\ndef extract_start(data):\n i = 0\n while True:\n if data[i] == END[0] and data[i + 1] == END[0] and data[i + 2] == END[0\n ]:\n return i + 3\n i += 1\n\n\n<function token>\n\n\ndef data_to_extract(data, dict):\n begin = extract_start(data)\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()}\n text = ''\n temp_code = ''\n for i in range(len(data)):\n temp_code += data[i]\n if temp_code in dict:\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\ndef create_dict(data):\n dict = {}\n i = 0\n while True:\n dict[chr(data[i])] = ''\n j = 1\n while data[i + j] != END[0]:\n dict[chr(data[i])] += str(chr(data[i + j]))\n j += 1\n i += 1 + j\n if data[i] == END[0] and data[i + 1] == END[0]:\n break\n return dict\n\n\n<function token>\n<function token>\n\n\ndef data_to_extract(data, dict):\n begin = extract_start(data)\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()}\n text = ''\n temp_code = ''\n for i in range(len(data)):\n temp_code += data[i]\n if temp_code in dict:\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\n<function token>\n<function token>\n<function token>\n\n\ndef data_to_extract(data, dict):\n begin = extract_start(data)\n print(begin)\n data = bytes_to_bits(data, begin)\n dict = {y: x for x, y in dict.items()}\n text = ''\n temp_code = ''\n for i in range(len(data)):\n temp_code += data[i]\n if temp_code in dict:\n text += dict[temp_code]\n temp_code = ''\n return text\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n",
"<import token>\n<assignment token>\n<code token>\n\n\ndef recvall(sock):\n BUFF_SIZE = 4096\n data = b''\n while True:\n part = sock.recv(BUFF_SIZE)\n data += part\n if len(part) < BUFF_SIZE:\n break\n return data\n\n\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n\n\ndef recieve_data(codedpath, decodedpath, ip, port):\n port = int(port)\n sock = socket.socket()\n sock.bind((ip, int(port)))\n sock.listen()\n conn, addr = sock.accept()\n print('Połączono:', addr)\n rec_data = recvall(conn)\n rec_dict = create_dict(rec_data)\n extracted = data_to_extract(rec_data, rec_dict)\n print('ODEBRANY SLOWNIK\\n')\n print(rec_dict)\n print(extracted)\n f = open(codedpath, 'wb')\n f.write(rec_data)\n f.close()\n f = open(decodedpath, 'w')\n f.write(extracted)\n f.close()\n return 0\n",
"<import token>\n<assignment token>\n<code token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
970 |
736fee6f9a46b8568b2dd217b81d54d689306630
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import datetime
import time
from sys import exit
from matplotlib import colors, pyplot as plt
from functools import reduce
import matplotlib.cm as cm
import seaborn as sns
from astropy.io import ascii, fits
from astropy.wcs import wcs
from matplotlib import cm
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from scipy.interpolate import interp2d
import matplotlib.mlab as mlab
import scipy, pylab
import rpy2
import cubehelix
import math
from pysextractor import SExtractor
__author__ = 'pnovais'
ini=time.time()
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
#definindo a classe que ira ler as imagens fits
def get_image(f_sdss):
img = f_sdss[0].data
# sky = f_sdss[2].data
return img
#abertura do arquivo com o nome das imagens, nas n bandas
df_fit = pd.read_csv('data/arquivo_fits.csv')
'''
================================================================================
Rodando o SExtractor na imagem na banda r, criando uma segmentacao e um catalogo
com os objetos obtidos
ATUALIZAR NOME DA BANDA DE SEGMENTACAO
================================================================================
'''
fname = 'data/frame-r-002507-4-0226.fits'
sex = SExtractor()
sex.config['PARAMETERS_LIST'].append('FLUX_ISO')
sex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')
sex.config['PARAMETERS_LIST'].append('MAG_AUTO')
sex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')
sex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')
sex.config['PARAMETERS_LIST'].append('ALPHA_J2000')
sex.config['PARAMETERS_LIST'].append('DELTA_J2000')
sex.config['PARAMETERS_LIST'].append('FWHM_WORLD')
sex.config['PARAMETERS_LIST'].append('CLASS_STAR')
sex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'
sex.run(fname)
segmap = fits.open('check.fits')[0].data
df_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)
df_cat.columns = ['num','flux_best','fluxerr_best', 'x','y','flags',
'fwhm_image', 'flux_iso','mag_isocor','mag_auto',
'petro_radius','ISO_AREA','ra','dec',
'fwhm_world','class_star']
#selecao dos objetos que devem ser galaxias
df_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]
df_cat = df_cat.reset_index()
df_cat = df_cat.ix[:,1:15]
'''
================================================================================
Lendo as imagens, em todas as bandas, e gerando um dataframe para cada galaxia
utilizando astropy
Calculando o ceu em todas as bandas
ATUALIZAR NOME DA BANDA DE SEGMENTACAO
================================================================================
'''
df = pd.DataFrame()
df_sky = pd.DataFrame()
for i_object in range(13,14):
window_size = 250
filter_seg = 'rSDSS'
ra = df_cat['ra']
dec = df_cat['dec']
image_r = fits.open('data/frame-r-002507-4-0226.fits')
wcsys = wcs.WCS(header=image_r[0].header)
y, x = wcsys.wcs_world2pix(ra, dec, 1)
interval = (int(round(x[i_object] - window_size / 2)), int(round(x[i_object] + window_size / 2)),
int(round(y[i_object] - window_size / 2)), int(round(y[i_object] + window_size / 2)))
df = pd.DataFrame()
df_sky = pd.DataFrame()
seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]
for i_gal in range(len(df_fit)):
f_sdss = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],
df_fit['name'][i_gal]))
img = get_image(f_sdss)
img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]
plt.figure(1)
plt.clf()
plt.imshow(100*np.log10(img_cut/255), cmap='spectral')
plt.colorbar()
band=df_fit['filter'][i_gal]
nrows, ncols = img_cut.shape
xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] )
table = np.column_stack(( xx.flatten(), yy.flatten(), img_cut.flatten() ))
temp = pd.DataFrame(table, columns=['x','y',band])
df = pd.concat([df,temp], axis=1)
sky_r = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],
df_fit['name'][i_gal]))
sky = get_image(sky_r)
wcsys = wcs.WCS(header=sky_r[0].header)
yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)
delta_x = 85
delta_y = 85
interval_sky = (int(round(xc - delta_x / 2)), int(round(xc + delta_x / 2)), int(round(yc - delta_y / 2)),
int(round(yc + delta_y / 2)))
img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:interval_sky[3]]
sky_nrows, sky_ncols = img_sky.shape
xxc, yyc = np.meshgrid( *np.ogrid[:sky_ncols, :sky_nrows] )
table_sky = np.column_stack(( xxc.flatten(), yyc.flatten(), img_sky.flatten() ))
temp_sky = pd.DataFrame(table_sky, columns=['x','y',band])
df_sky = pd.concat([df_sky,temp_sky], axis=1)
df = df.ix[:, [0,1,2,5,8,11,14]]
df_sky = df_sky.ix[:, [0,1,2,5,8,11,14]]
'''
Imagem da galaxia, na banda r.
'''
plt.figure(1)
plt.clf()
r_sdss = fits.open('data/frame-r-%s' %(df_fit['name'][i_gal]))
img_r = get_image(r_sdss)
img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]
cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)
imgplot = plt.imshow(100*np.log10(img_cut_r/255), cmap='spectral')
titulo='Galaxy #%s - banda r' %(df_cat['num'][i_object])
plt.title(titulo)
plt.colorbar()
figura = 'figures/galaxy_#%s' %df_cat['num'][i_object]
plt.savefig(figura)
'''
Imagem segmentada da galaxia, na banda r.
'''
plt.figure(1)
plt.clf()
cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)
imgplot = plt.imshow(seg_sex, cmap='spectral')
titulo='Segmentation Galaxy #%s - banda r' %(df_cat['num'][i_object])
plt.title(titulo)
plt.colorbar()
figura = 'figures/seg_galaxy_#%s' %df_cat['num'][i_object]
plt.savefig(figura)
'''
================================================================================
Salvando os fluxos de cada galaxia em um arquivo txt
================================================================================
'''
saida_fluxes = 'data/all_band_fluxes_%s.txt' %df_cat['num'][i_object]
formats=['%d','%d','%5.4f','%5.4f','%5.4f','%5.4f','%5.4f']
headers2='x\ty\tu\tg\tr\ti\tz'
np.savetxt(saida_fluxes,df, delimiter='\t',header=headers2, fmt = formats)
print('')
print('>> Os dados estao em: "%s".' %saida_fluxes)
'''
================================================================================
Subtraindo o ceu, na banda r
================================================================================
'''
df_aux=df.ix[:,2:]
df_aux1=df.ix[:,:2]
df_sky_aux = df_sky.ix[:,2:]
df_aux3 = (df_aux - df_sky_aux.mean())
df_rss=df_aux1.join(df_aux3)
"""
A segmentacao consiste de usar um limiar para separar o objeto do fundo.
No nosso caso, usamos limiar = alpha*std_ceu
"""
'''
================================================================================
SEGMENTACAO
================================================================================
'''
#SELECAO DOS PIXEIS ACIMA DO LIMIAR
limiar = 2.5*df_sky.r.std()
df_seg = df_rss.ix[df_rss['r'] > limiar]
print('Pixeis acima do limiar: %d' %len(df_seg))
np.savetxt('fof2.txt',df_seg,delimiter='\t')
fim = time.time()
time_proc = fim - ini
print('')
print(bcolors.HEADER + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)
|
[
"\n#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport time\nfrom sys import exit\nfrom matplotlib import colors, pyplot as plt\nfrom functools import reduce\nimport matplotlib.cm as cm\nimport seaborn as sns\nfrom astropy.io import ascii, fits\nfrom astropy.wcs import wcs\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom scipy.interpolate import interp2d\nimport matplotlib.mlab as mlab\nimport scipy, pylab\nimport rpy2\nimport cubehelix\nimport math\nfrom pysextractor import SExtractor\n\n__author__ = 'pnovais'\n\nini=time.time()\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\n#definindo a classe que ira ler as imagens fits\ndef get_image(f_sdss):\n img = f_sdss[0].data\n# sky = f_sdss[2].data\n return img\n\n#abertura do arquivo com o nome das imagens, nas n bandas\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n\n'''\n================================================================================\nRodando o SExtractor na imagem na banda r, criando uma segmentacao e um catalogo\ncom os objetos obtidos\nATUALIZAR NOME DA BANDA DE SEGMENTACAO\n================================================================================\n'''\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\n\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num','flux_best','fluxerr_best', 'x','y','flags',\n 'fwhm_image', 'flux_iso','mag_isocor','mag_auto',\n 'petro_radius','ISO_AREA','ra','dec',\n 'fwhm_world','class_star']\n\n#selecao dos objetos que devem ser galaxias\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:,1:15]\n\n'''\n================================================================================\nLendo as imagens, em todas as bandas, e gerando um dataframe para cada galaxia\nutilizando astropy\nCalculando o ceu em todas as bandas\n\nATUALIZAR NOME DA BANDA DE SEGMENTACAO\n================================================================================\n'''\n\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\n\n\nfor i_object in range(13,14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = (int(round(x[i_object] - window_size / 2)), int(round(x[i_object] + window_size / 2)),\n int(round(y[i_object] - window_size / 2)), int(round(y[i_object] + window_size / 2)))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100*np.log10(img_cut/255), cmap='spectral')\n plt.colorbar()\n band=df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid( *np.ogrid[:ncols, :nrows] )\n table = np.column_stack(( xx.flatten(), yy.flatten(), img_cut.flatten() ))\n temp = pd.DataFrame(table, columns=['x','y',band])\n df = pd.concat([df,temp], axis=1)\n\n sky_r = fits.open('data/frame-%s-%s' %(df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = (int(round(xc - delta_x / 2)), int(round(xc + delta_x / 2)), int(round(yc - delta_y / 2)),\n int(round(yc + delta_y / 2)))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid( *np.ogrid[:sky_ncols, :sky_nrows] )\n table_sky = np.column_stack(( xxc.flatten(), yyc.flatten(), img_sky.flatten() ))\n temp_sky = pd.DataFrame(table_sky, columns=['x','y',band])\n df_sky = pd.concat([df_sky,temp_sky], axis=1)\n\n df = df.ix[:, [0,1,2,5,8,11,14]]\n df_sky = df_sky.ix[:, [0,1,2,5,8,11,14]]\n\n '''\n Imagem da galaxia, na banda r.\n '''\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' %(df_fit['name'][i_gal]))\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)\n imgplot = plt.imshow(100*np.log10(img_cut_r/255), cmap='spectral')\n titulo='Galaxy #%s - banda r' %(df_cat['num'][i_object])\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' %df_cat['num'][i_object]\n plt.savefig(figura)\n '''\n Imagem segmentada da galaxia, na banda r.\n '''\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0., rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo='Segmentation Galaxy #%s - banda r' %(df_cat['num'][i_object])\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' %df_cat['num'][i_object]\n plt.savefig(figura)\n\n '''\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n '''\n saida_fluxes = 'data/all_band_fluxes_%s.txt' %df_cat['num'][i_object]\n formats=['%d','%d','%5.4f','%5.4f','%5.4f','%5.4f','%5.4f']\n headers2='x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes,df, delimiter='\\t',header=headers2, fmt = formats)\n print('')\n print('>> Os dados estao em: \"%s\".' %saida_fluxes)\n\n '''\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n '''\n df_aux=df.ix[:,2:]\n df_aux1=df.ix[:,:2]\n df_sky_aux = df_sky.ix[:,2:]\n df_aux3 = (df_aux - df_sky_aux.mean())\n df_rss=df_aux1.join(df_aux3)\n\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n '''\n ================================================================================\n SEGMENTACAO\n ================================================================================\n '''\n #SELECAO DOS PIXEIS ACIMA DO LIMIAR\n limiar = 2.5*df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' %len(df_seg))\n np.savetxt('fof2.txt',df_seg,delimiter='\\t')\n\n\n\n\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' %time_proc + bcolors.ENDC)\n",
"import pandas as pd\nimport numpy as np\nimport datetime\nimport time\nfrom sys import exit\nfrom matplotlib import colors, pyplot as plt\nfrom functools import reduce\nimport matplotlib.cm as cm\nimport seaborn as sns\nfrom astropy.io import ascii, fits\nfrom astropy.wcs import wcs\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\nfrom scipy.interpolate import interp2d\nimport matplotlib.mlab as mlab\nimport scipy, pylab\nimport rpy2\nimport cubehelix\nimport math\nfrom pysextractor import SExtractor\n__author__ = 'pnovais'\nini = time.time()\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n<docstring token>\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num', 'flux_best', 'fluxerr_best', 'x', 'y', 'flags',\n 'fwhm_image', 'flux_iso', 'mag_isocor', 'mag_auto', 'petro_radius',\n 'ISO_AREA', 'ra', 'dec', 'fwhm_world', 'class_star']\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:, 1:15]\n<docstring token>\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n",
"<import token>\n__author__ = 'pnovais'\nini = time.time()\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\ndf_fit = pd.read_csv('data/arquivo_fits.csv')\n<docstring token>\nfname = 'data/frame-r-002507-4-0226.fits'\nsex = SExtractor()\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\nsex.config['CHECKIMAGE_TYPE'] = 'SEGMENTATION'\nsex.run(fname)\nsegmap = fits.open('check.fits')[0].data\ndf_cat = pd.read_table('py-sextractor.cat', delim_whitespace=True, header=16)\ndf_cat.columns = ['num', 'flux_best', 'fluxerr_best', 'x', 'y', 'flags',\n 'fwhm_image', 'flux_iso', 'mag_isocor', 'mag_auto', 'petro_radius',\n 'ISO_AREA', 'ra', 'dec', 'fwhm_world', 'class_star']\ndf_cat = df_cat.ix[(df_cat['fwhm_image'] > 4.5) & (df_cat['mag_auto'] < -7)]\ndf_cat = df_cat.reset_index()\ndf_cat = df_cat.ix[:, 1:15]\n<docstring token>\ndf = pd.DataFrame()\ndf_sky = pd.DataFrame()\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\nfim = time.time()\ntime_proc = fim - ini\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n",
"<import token>\n<assignment token>\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\n<assignment token>\n<docstring token>\n<assignment token>\nsex.config['PARAMETERS_LIST'].append('FLUX_ISO')\nsex.config['PARAMETERS_LIST'].append('MAG_ISOCOR')\nsex.config['PARAMETERS_LIST'].append('MAG_AUTO')\nsex.config['PARAMETERS_LIST'].append('PETRO_RADIUS')\nsex.config['PARAMETERS_LIST'].append('ISOAREA_IMAGE')\nsex.config['PARAMETERS_LIST'].append('ALPHA_J2000')\nsex.config['PARAMETERS_LIST'].append('DELTA_J2000')\nsex.config['PARAMETERS_LIST'].append('FWHM_WORLD')\nsex.config['PARAMETERS_LIST'].append('CLASS_STAR')\n<assignment token>\nsex.run(fname)\n<assignment token>\n<docstring token>\n<assignment token>\nfor i_object in range(13, 14):\n window_size = 250\n filter_seg = 'rSDSS'\n ra = df_cat['ra']\n dec = df_cat['dec']\n image_r = fits.open('data/frame-r-002507-4-0226.fits')\n wcsys = wcs.WCS(header=image_r[0].header)\n y, x = wcsys.wcs_world2pix(ra, dec, 1)\n interval = int(round(x[i_object] - window_size / 2)), int(round(x[\n i_object] + window_size / 2)), int(round(y[i_object] - window_size / 2)\n ), int(round(y[i_object] + window_size / 2))\n df = pd.DataFrame()\n df_sky = pd.DataFrame()\n seg_sex = segmap[interval[0]:interval[1], interval[2]:interval[3]]\n for i_gal in range(len(df_fit)):\n f_sdss = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n img = get_image(f_sdss)\n img_cut = img[interval[0]:interval[1], interval[2]:interval[3]]\n plt.figure(1)\n plt.clf()\n plt.imshow(100 * np.log10(img_cut / 255), cmap='spectral')\n plt.colorbar()\n band = df_fit['filter'][i_gal]\n nrows, ncols = img_cut.shape\n xx, yy = np.meshgrid(*np.ogrid[:ncols, :nrows])\n table = np.column_stack((xx.flatten(), yy.flatten(), img_cut.flatten())\n )\n temp = pd.DataFrame(table, columns=['x', 'y', band])\n df = pd.concat([df, temp], axis=1)\n sky_r = fits.open('data/frame-%s-%s' % (df_fit['filter'][i_gal],\n df_fit['name'][i_gal]))\n sky = get_image(sky_r)\n wcsys = wcs.WCS(header=sky_r[0].header)\n yc, xc = wcsys.wcs_world2pix(351.101, 14.737636, 1)\n delta_x = 85\n delta_y = 85\n interval_sky = int(round(xc - delta_x / 2)), int(round(xc + delta_x /\n 2)), int(round(yc - delta_y / 2)), int(round(yc + delta_y / 2))\n img_sky = sky[interval_sky[0]:interval_sky[1], interval_sky[2]:\n interval_sky[3]]\n sky_nrows, sky_ncols = img_sky.shape\n xxc, yyc = np.meshgrid(*np.ogrid[:sky_ncols, :sky_nrows])\n table_sky = np.column_stack((xxc.flatten(), yyc.flatten(), img_sky.\n flatten()))\n temp_sky = pd.DataFrame(table_sky, columns=['x', 'y', band])\n df_sky = pd.concat([df_sky, temp_sky], axis=1)\n df = df.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n df_sky = df_sky.ix[:, [0, 1, 2, 5, 8, 11, 14]]\n \"\"\"\n Imagem da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n r_sdss = fits.open('data/frame-r-%s' % df_fit['name'][i_gal])\n img_r = get_image(r_sdss)\n img_cut_r = img_r[interval[0]:interval[1], interval[2]:interval[3]]\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(100 * np.log10(img_cut_r / 255), cmap='spectral')\n titulo = 'Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n Imagem segmentada da galaxia, na banda r.\n \"\"\"\n plt.figure(1)\n plt.clf()\n cx = cubehelix.cmap(reverse=True, start=0.0, rot=-0.5)\n imgplot = plt.imshow(seg_sex, cmap='spectral')\n titulo = 'Segmentation Galaxy #%s - banda r' % df_cat['num'][i_object]\n plt.title(titulo)\n plt.colorbar()\n figura = 'figures/seg_galaxy_#%s' % df_cat['num'][i_object]\n plt.savefig(figura)\n \"\"\"\n ================================================================================\n Salvando os fluxos de cada galaxia em um arquivo txt\n ================================================================================\n \"\"\"\n saida_fluxes = 'data/all_band_fluxes_%s.txt' % df_cat['num'][i_object]\n formats = ['%d', '%d', '%5.4f', '%5.4f', '%5.4f', '%5.4f', '%5.4f']\n headers2 = 'x\\ty\\tu\\tg\\tr\\ti\\tz'\n np.savetxt(saida_fluxes, df, delimiter='\\t', header=headers2, fmt=formats)\n print('')\n print('>> Os dados estao em: \"%s\".' % saida_fluxes)\n \"\"\"\n ================================================================================\n Subtraindo o ceu, na banda r\n ================================================================================\n \"\"\"\n df_aux = df.ix[:, 2:]\n df_aux1 = df.ix[:, :2]\n df_sky_aux = df_sky.ix[:, 2:]\n df_aux3 = df_aux - df_sky_aux.mean()\n df_rss = df_aux1.join(df_aux3)\n \"\"\"\n A segmentacao consiste de usar um limiar para separar o objeto do fundo.\n No nosso caso, usamos limiar = alpha*std_ceu\n \"\"\"\n \"\"\"\n ================================================================================\n SEGMENTACAO\n ================================================================================\n \"\"\"\n limiar = 2.5 * df_sky.r.std()\n df_seg = df_rss.ix[df_rss['r'] > limiar]\n print('Pixeis acima do limiar: %d' % len(df_seg))\n np.savetxt('fof2.txt', df_seg, delimiter='\\t')\n<assignment token>\nprint('')\nprint(bcolors.HEADER + 'tempo de processamento: %fs' % time_proc + bcolors.ENDC\n )\n",
"<import token>\n<assignment token>\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\ndef get_image(f_sdss):\n img = f_sdss[0].data\n return img\n\n\n<assignment token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass bcolors:\n HEADER = '\\x1b[95m'\n OKBLUE = '\\x1b[94m'\n OKGREEN = '\\x1b[92m'\n WARNING = '\\x1b[93m'\n FAIL = '\\x1b[91m'\n ENDC = '\\x1b[0m'\n BOLD = '\\x1b[1m'\n UNDERLINE = '\\x1b[4m'\n\n\n<function token>\n<assignment token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass bcolors:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\n<function token>\n<assignment token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<function token>\n<assignment token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<docstring token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
971 |
dbe3aa107de8e62822803d1740773a4b22f41edf
|
import sys, os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
from two_layer_net import TwoLayerNet
(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label = True)
train_loss_list = []
#hiper param
iters_num = 1000
train_size = x_train.shape[0]
batch_size = 100
learning_rate = 0.1
network = TwoLayerNet(input_size = 784, hidden_size=50, output_size=10)
for i in range(iters_num):
print(i)
#get batch
batch_mask = np.random.choice(train_size, batch_size)
x_batch = x_train[batch_mask]
t_batch = t_train[batch_mask]
#calc gradient
grad = network.gradient(x_batch, t_batch)
#update param
for key in ('W1', 'b1', 'W2', 'b2'):
network.params[key] -= learning_rate * grad[key]
#recode
loss = network.loss(x_batch, t_batch)
train_loss_list.append(loss)
print("{} : {}".format(i, train_loss_list[i]))
print(train_loss_list)
|
[
"import sys, os\nsys.path.append(os.pardir)\nimport numpy as np\nfrom dataset.mnist import load_mnist\nfrom two_layer_net import TwoLayerNet\n\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True, one_hot_label = True)\n\ntrain_loss_list = []\n\n#hiper param\niters_num = 1000\ntrain_size = x_train.shape[0]\nbatch_size = 100\nlearning_rate = 0.1\n\nnetwork = TwoLayerNet(input_size = 784, hidden_size=50, output_size=10)\nfor i in range(iters_num):\n print(i)\n #get batch\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n\n #calc gradient\n grad = network.gradient(x_batch, t_batch)\n\n #update param\n for key in ('W1', 'b1', 'W2', 'b2'):\n network.params[key] -= learning_rate * grad[key]\n\n\n #recode\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n print(\"{} : {}\".format(i, train_loss_list[i]))\nprint(train_loss_list)\n",
"import sys, os\nsys.path.append(os.pardir)\nimport numpy as np\nfrom dataset.mnist import load_mnist\nfrom two_layer_net import TwoLayerNet\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,\n one_hot_label=True)\ntrain_loss_list = []\niters_num = 1000\ntrain_size = x_train.shape[0]\nbatch_size = 100\nlearning_rate = 0.1\nnetwork = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)\nfor i in range(iters_num):\n print(i)\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n grad = network.gradient(x_batch, t_batch)\n for key in ('W1', 'b1', 'W2', 'b2'):\n network.params[key] -= learning_rate * grad[key]\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n print('{} : {}'.format(i, train_loss_list[i]))\nprint(train_loss_list)\n",
"<import token>\nsys.path.append(os.pardir)\n<import token>\n(x_train, t_train), (x_test, t_test) = load_mnist(normalize=True,\n one_hot_label=True)\ntrain_loss_list = []\niters_num = 1000\ntrain_size = x_train.shape[0]\nbatch_size = 100\nlearning_rate = 0.1\nnetwork = TwoLayerNet(input_size=784, hidden_size=50, output_size=10)\nfor i in range(iters_num):\n print(i)\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n grad = network.gradient(x_batch, t_batch)\n for key in ('W1', 'b1', 'W2', 'b2'):\n network.params[key] -= learning_rate * grad[key]\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n print('{} : {}'.format(i, train_loss_list[i]))\nprint(train_loss_list)\n",
"<import token>\nsys.path.append(os.pardir)\n<import token>\n<assignment token>\nfor i in range(iters_num):\n print(i)\n batch_mask = np.random.choice(train_size, batch_size)\n x_batch = x_train[batch_mask]\n t_batch = t_train[batch_mask]\n grad = network.gradient(x_batch, t_batch)\n for key in ('W1', 'b1', 'W2', 'b2'):\n network.params[key] -= learning_rate * grad[key]\n loss = network.loss(x_batch, t_batch)\n train_loss_list.append(loss)\n print('{} : {}'.format(i, train_loss_list[i]))\nprint(train_loss_list)\n",
"<import token>\n<code token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
972 |
3f9be81c86852a758440c6a144b8caba736b3868
|
# Generated by Django 3.1.7 on 2021-02-20 02:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('usuarios', '0001_initial'),
('plataforma', '0005_auto_20210219_2343'),
]
operations = [
migrations.AlterField(
model_name='plataforma',
name='usuario',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='usuarios.usuario'),
),
]
|
[
"# Generated by Django 3.1.7 on 2021-02-20 02:52\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('usuarios', '0001_initial'),\n ('plataforma', '0005_auto_20210219_2343'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='plataforma',\n name='usuario',\n field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='usuarios.usuario'),\n ),\n ]\n",
"from django.db import migrations, models\nimport django.db.models.deletion\n\n\nclass Migration(migrations.Migration):\n dependencies = [('usuarios', '0001_initial'), ('plataforma',\n '0005_auto_20210219_2343')]\n operations = [migrations.AlterField(model_name='plataforma', name=\n 'usuario', field=models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='usuarios.usuario'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('usuarios', '0001_initial'), ('plataforma',\n '0005_auto_20210219_2343')]\n operations = [migrations.AlterField(model_name='plataforma', name=\n 'usuario', field=models.ForeignKey(on_delete=django.db.models.\n deletion.CASCADE, to='usuarios.usuario'))]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
973 |
e06b740f27e41b9f120c962fd76a38a29d54af3c
|
from more_itertools import ilen
from my.body import weight, shower, food, water
def test_body() -> None:
for func in (weight, shower, food, water):
assert ilen(func()) >= 1
|
[
"from more_itertools import ilen\n\nfrom my.body import weight, shower, food, water\n\n\ndef test_body() -> None:\n\n for func in (weight, shower, food, water):\n assert ilen(func()) >= 1\n",
"from more_itertools import ilen\nfrom my.body import weight, shower, food, water\n\n\ndef test_body() ->None:\n for func in (weight, shower, food, water):\n assert ilen(func()) >= 1\n",
"<import token>\n\n\ndef test_body() ->None:\n for func in (weight, shower, food, water):\n assert ilen(func()) >= 1\n",
"<import token>\n<function token>\n"
] | false |
974 |
1fd4d1a44270ef29512e601af737accb916dc441
|
from estmd import ESTMD
input_directory = "test.avi"
e = ESTMD()
e.open_movie(input_directory)
e.run(by_frame=True)
r = e.create_list_of_arrays()
print "Done testing!"
|
[
"from estmd import ESTMD\n\ninput_directory = \"test.avi\"\ne = ESTMD()\ne.open_movie(input_directory)\ne.run(by_frame=True)\nr = e.create_list_of_arrays()\n\nprint \"Done testing!\"\n"
] | true |
975 |
c7c412fe4e2d53af1b4f2a55bd3453496767890d
|
from time import sleep
import pytest
import allure
from app.debug_api import DebugAPI
from app.check_api import HandlersAPI
from locators.movies_details_locators import MoviesDetailsPageLocators
from locators.movies_locators import MoviesPageLocators
from locators.shedule_locators import ShedulePageLocators
from screens.MoviesPage import MoviesPage
from screens.MoviesDetailsPage import MoviesDetailsPage
from screens.ShedulePage import ShedulePage
from utils.internet import enable_proxy
@pytest.mark.usefixtures('driver')
class Test_001_ShedulePage:
@classmethod
def setup_class(cls):
cls.movies_locators = MoviesPageLocators()
cls.shedule_locators = ShedulePageLocators()
cls.event_detail_page_locators = MoviesDetailsPageLocators()
@staticmethod
def teardown_class(cls):
enable_proxy(mode=False)
def test_001_elements_exists(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
найти кнопку отмены, кнопку карты, поле поиска"""
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
self.shedule_page.find_element(*self.shedule_locators.btn_back)
self.shedule_page.find_element(*self.shedule_locators.btn_map)
self.shedule_page.find_element(*self.shedule_locators.search_field)
def test_002_valid_filters(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверить соответствие фильтров и ответа сервера
проверить порядок фильтров"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(5)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
sleep(5)
self.event_detail_page.click(*self.event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(5)
self.shedule_page.check_rows_filters(dbg_api)
finally:
dbg_api.kill()
def test_003_check_time_ticket_filter(self, driver):
"""тапнуть на фичерс,
тапнуть на смотреть расписание,
проверять соответствие времени на билетах с выставленными фильтрами"""
dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.url_creations_movie_schedule_filter)
try:
with allure.step('MoviesPage'):
self.movie_page = MoviesPage(driver)
self.movie_page.set_custom_wait(10)
sleep(10)
self.movie_page.act.click_by_coords(50, 30)
with allure.step('EventDetailsPage'):
self.event_detail_page = MoviesDetailsPage(driver)
self.event_detail_page.set_custom_wait(10)
self.event_detail_page.click(*self.event_detail_page_locators.btn_view_timetable)
with allure.step('ShedulePage'):
self.shedule_page = ShedulePage(driver)
self.shedule_page.set_custom_wait(10)
sleep(2)
self.shedule_page.compare_tickets_datetime_options_second_filter(dbg_api)
finally:
dbg_api.kill()
|
[
"from time import sleep\nimport pytest\nimport allure\n\nfrom app.debug_api import DebugAPI\nfrom app.check_api import HandlersAPI\nfrom locators.movies_details_locators import MoviesDetailsPageLocators\nfrom locators.movies_locators import MoviesPageLocators\nfrom locators.shedule_locators import ShedulePageLocators\nfrom screens.MoviesPage import MoviesPage\nfrom screens.MoviesDetailsPage import MoviesDetailsPage\nfrom screens.ShedulePage import ShedulePage\nfrom utils.internet import enable_proxy\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n @classmethod\n def setup_class(cls):\n cls.movies_locators = MoviesPageLocators()\n cls.shedule_locators = ShedulePageLocators()\n cls.event_detail_page_locators = MoviesDetailsPageLocators()\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(dbg_api)\n finally:\n dbg_api.kill()\n\n",
"from time import sleep\nimport pytest\nimport allure\nfrom app.debug_api import DebugAPI\nfrom app.check_api import HandlersAPI\nfrom locators.movies_details_locators import MoviesDetailsPageLocators\nfrom locators.movies_locators import MoviesPageLocators\nfrom locators.shedule_locators import ShedulePageLocators\nfrom screens.MoviesPage import MoviesPage\nfrom screens.MoviesDetailsPage import MoviesDetailsPage\nfrom screens.ShedulePage import ShedulePage\nfrom utils.internet import enable_proxy\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n\n @classmethod\n def setup_class(cls):\n cls.movies_locators = MoviesPageLocators()\n cls.shedule_locators = ShedulePageLocators()\n cls.event_detail_page_locators = MoviesDetailsPageLocators()\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"<import token>\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n\n @classmethod\n def setup_class(cls):\n cls.movies_locators = MoviesPageLocators()\n cls.shedule_locators = ShedulePageLocators()\n cls.event_detail_page_locators = MoviesDetailsPageLocators()\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n\n def test_003_check_time_ticket_filter(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверять соответствие времени на билетах с выставленными фильтрами\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(2)\n self.shedule_page.compare_tickets_datetime_options_second_filter(\n dbg_api)\n finally:\n dbg_api.kill()\n",
"<import token>\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n\n @classmethod\n def setup_class(cls):\n cls.movies_locators = MoviesPageLocators()\n cls.shedule_locators = ShedulePageLocators()\n cls.event_detail_page_locators = MoviesDetailsPageLocators()\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n\n def test_002_valid_filters(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n проверить соответствие фильтров и ответа сервера\n проверить порядок фильтров\"\"\"\n dbg_api = DebugAPI.run(request=False, mapi_handler=HandlersAPI.\n url_creations_movie_schedule_filter)\n try:\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n sleep(5)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n sleep(5)\n self.event_detail_page.click(*self.\n event_detail_page_locators.btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n sleep(5)\n self.shedule_page.check_rows_filters(dbg_api)\n finally:\n dbg_api.kill()\n <function token>\n",
"<import token>\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n\n @classmethod\n def setup_class(cls):\n cls.movies_locators = MoviesPageLocators()\n cls.shedule_locators = ShedulePageLocators()\n cls.event_detail_page_locators = MoviesDetailsPageLocators()\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n <function token>\n <function token>\n",
"<import token>\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n <function token>\n\n @staticmethod\n def teardown_class(cls):\n enable_proxy(mode=False)\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n <function token>\n <function token>\n",
"<import token>\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n <function token>\n <function token>\n\n def test_001_elements_exists(self, driver):\n \"\"\"тапнуть на фичерс,\n тапнуть на смотреть расписание,\n найти кнопку отмены, кнопку карты, поле поиска\"\"\"\n with allure.step('MoviesPage'):\n self.movie_page = MoviesPage(driver)\n self.movie_page.set_custom_wait(10)\n self.movie_page.act.click_by_coords(50, 30)\n with allure.step('EventDetailsPage'):\n self.event_detail_page = MoviesDetailsPage(driver)\n self.event_detail_page.set_custom_wait(10)\n self.event_detail_page.click(*self.event_detail_page_locators.\n btn_view_timetable)\n with allure.step('ShedulePage'):\n self.shedule_page = ShedulePage(driver)\n self.shedule_page.set_custom_wait(10)\n self.shedule_page.find_element(*self.shedule_locators.btn_back)\n self.shedule_page.find_element(*self.shedule_locators.btn_map)\n self.shedule_page.find_element(*self.shedule_locators.search_field)\n <function token>\n <function token>\n",
"<import token>\n\n\[email protected]('driver')\nclass Test_001_ShedulePage:\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<import token>\n<class token>\n"
] | false |
976 |
327371d373819273a2f77f63e0cedee6950dbc46
|
#!/usr/bin/env python
"""
##############################################################################
Software Package Risk Analysis Development Environment Specific Work Book View
##############################################################################
"""
# -*- coding: utf-8 -*-
#
# rtk.software.__gui.gtk.DevelopmentEnvironment.py is part of The RTK
# Project
#
# All rights reserved.
import sys
# Import modules for localization support.
import gettext
import locale
# Modules required for the GUI.
try:
import pygtk
pygtk.require('2.0')
except ImportError:
sys.exit(1)
try:
import gtk
except ImportError:
sys.exit(1)
try:
import gtk.glade
except ImportError:
sys.exit(1)
# Import other RTK modules.
try:
import Configuration
import gui.gtk.Widgets as Widgets
except ImportError:
import rtk.Configuration as Configuration
import rtk.gui.gtk.Widgets as Widgets
__author__ = 'Andrew Rowland'
__email__ = '[email protected]'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "weibullguy" Rowland'
try:
locale.setlocale(locale.LC_ALL, Configuration.LOCALE)
except locale.Error:
locale.setlocale(locale.LC_ALL, '')
_ = gettext.gettext
class RiskAnalysis(gtk.VPaned):
"""
The Work Book view for analyzing and displaying the risk associated with
the development environment. The attributes of a development environment
Work Book view are:
:ivar list _lst_handler_id: the list of gtk.Widget() signal handler IDs.
:ivar _software_model: the :py:class:`rtk.software.Software.Model` to
display.
"""
def __init__(self):
"""
Method to initialize the development environment risk analysis
questions Work Book page.
"""
gtk.VPaned.__init__(self)
# Define private dictionary attributes.
# Define private list attributes.
self._lst_handler_id = []
# Define private scalar attributes.
self._software_model = None
# Define public dictionary attributes.
# Define public list attributes.
# Define public scalar attributes.
self.chkDevEnvQ1 = Widgets.make_check_button()
self.chkDevEnvQ2 = Widgets.make_check_button()
self.chkDevEnvQ3 = Widgets.make_check_button()
self.chkDevEnvQ4 = Widgets.make_check_button()
self.chkDevEnvQ5 = Widgets.make_check_button()
self.chkDevEnvQ6 = Widgets.make_check_button()
self.chkDevEnvQ7 = Widgets.make_check_button()
self.chkDevEnvQ8 = Widgets.make_check_button()
self.chkDevEnvQ9 = Widgets.make_check_button()
self.chkDevEnvQ10 = Widgets.make_check_button()
self.chkDevEnvQ11 = Widgets.make_check_button()
self.chkDevEnvQ12 = Widgets.make_check_button()
self.chkDevEnvQ13 = Widgets.make_check_button()
self.chkDevEnvQ14 = Widgets.make_check_button()
self.chkDevEnvQ15 = Widgets.make_check_button()
self.chkDevEnvQ16 = Widgets.make_check_button()
self.chkDevEnvQ17 = Widgets.make_check_button()
self.chkDevEnvQ18 = Widgets.make_check_button()
self.chkDevEnvQ19 = Widgets.make_check_button()
self.chkDevEnvQ20 = Widgets.make_check_button()
self.chkDevEnvQ21 = Widgets.make_check_button()
self.chkDevEnvQ22 = Widgets.make_check_button()
self.chkDevEnvQ23 = Widgets.make_check_button()
self.chkDevEnvQ24 = Widgets.make_check_button()
self.chkDevEnvQ25 = Widgets.make_check_button()
self.chkDevEnvQ26 = Widgets.make_check_button()
self.chkDevEnvQ27 = Widgets.make_check_button()
self.chkDevEnvQ28 = Widgets.make_check_button()
self.chkDevEnvQ29 = Widgets.make_check_button()
self.chkDevEnvQ30 = Widgets.make_check_button()
self.chkDevEnvQ31 = Widgets.make_check_button()
self.chkDevEnvQ32 = Widgets.make_check_button()
self.chkDevEnvQ33 = Widgets.make_check_button()
self.chkDevEnvQ34 = Widgets.make_check_button()
self.chkDevEnvQ35 = Widgets.make_check_button()
self.chkDevEnvQ36 = Widgets.make_check_button()
self.chkDevEnvQ37 = Widgets.make_check_button()
self.chkDevEnvQ38 = Widgets.make_check_button()
self.chkDevEnvQ39 = Widgets.make_check_button()
self.chkDevEnvQ40 = Widgets.make_check_button()
self.chkDevEnvQ41 = Widgets.make_check_button()
self.chkDevEnvQ42 = Widgets.make_check_button()
self.chkDevEnvQ43 = Widgets.make_check_button()
# Connect gtk.Widget() signals to callback methods.
self._lst_handler_id.append(
self.chkDevEnvQ1.connect('toggled', self._on_toggled, 0))
self._lst_handler_id.append(
self.chkDevEnvQ2.connect('toggled', self._on_toggled, 1))
self._lst_handler_id.append(
self.chkDevEnvQ3.connect('toggled', self._on_toggled, 2))
self._lst_handler_id.append(
self.chkDevEnvQ4.connect('toggled', self._on_toggled, 3))
self._lst_handler_id.append(
self.chkDevEnvQ5.connect('toggled', self._on_toggled, 4))
self._lst_handler_id.append(
self.chkDevEnvQ6.connect('toggled', self._on_toggled, 5))
self._lst_handler_id.append(
self.chkDevEnvQ7.connect('toggled', self._on_toggled, 6))
self._lst_handler_id.append(
self.chkDevEnvQ8.connect('toggled', self._on_toggled, 7))
self._lst_handler_id.append(
self.chkDevEnvQ9.connect('toggled', self._on_toggled, 8))
self._lst_handler_id.append(
self.chkDevEnvQ10.connect('toggled', self._on_toggled, 9))
self._lst_handler_id.append(
self.chkDevEnvQ11.connect('toggled', self._on_toggled, 10))
self._lst_handler_id.append(
self.chkDevEnvQ12.connect('toggled', self._on_toggled, 11))
self._lst_handler_id.append(
self.chkDevEnvQ13.connect('toggled', self._on_toggled, 12))
self._lst_handler_id.append(
self.chkDevEnvQ14.connect('toggled', self._on_toggled, 13))
self._lst_handler_id.append(
self.chkDevEnvQ15.connect('toggled', self._on_toggled, 14))
self._lst_handler_id.append(
self.chkDevEnvQ16.connect('toggled', self._on_toggled, 15))
self._lst_handler_id.append(
self.chkDevEnvQ17.connect('toggled', self._on_toggled, 16))
self._lst_handler_id.append(
self.chkDevEnvQ18.connect('toggled', self._on_toggled, 17))
self._lst_handler_id.append(
self.chkDevEnvQ19.connect('toggled', self._on_toggled, 18))
self._lst_handler_id.append(
self.chkDevEnvQ20.connect('toggled', self._on_toggled, 19))
self._lst_handler_id.append(
self.chkDevEnvQ21.connect('toggled', self._on_toggled, 20))
self._lst_handler_id.append(
self.chkDevEnvQ22.connect('toggled', self._on_toggled, 21))
self._lst_handler_id.append(
self.chkDevEnvQ23.connect('toggled', self._on_toggled, 22))
self._lst_handler_id.append(
self.chkDevEnvQ24.connect('toggled', self._on_toggled, 23))
self._lst_handler_id.append(
self.chkDevEnvQ25.connect('toggled', self._on_toggled, 24))
self._lst_handler_id.append(
self.chkDevEnvQ26.connect('toggled', self._on_toggled, 25))
self._lst_handler_id.append(
self.chkDevEnvQ27.connect('toggled', self._on_toggled, 26))
self._lst_handler_id.append(
self.chkDevEnvQ28.connect('toggled', self._on_toggled, 27))
self._lst_handler_id.append(
self.chkDevEnvQ29.connect('toggled', self._on_toggled, 28))
self._lst_handler_id.append(
self.chkDevEnvQ30.connect('toggled', self._on_toggled, 29))
self._lst_handler_id.append(
self.chkDevEnvQ31.connect('toggled', self._on_toggled, 30))
self._lst_handler_id.append(
self.chkDevEnvQ32.connect('toggled', self._on_toggled, 31))
self._lst_handler_id.append(
self.chkDevEnvQ33.connect('toggled', self._on_toggled, 32))
self._lst_handler_id.append(
self.chkDevEnvQ34.connect('toggled', self._on_toggled, 33))
self._lst_handler_id.append(
self.chkDevEnvQ35.connect('toggled', self._on_toggled, 34))
self._lst_handler_id.append(
self.chkDevEnvQ36.connect('toggled', self._on_toggled, 35))
self._lst_handler_id.append(
self.chkDevEnvQ37.connect('toggled', self._on_toggled, 36))
self._lst_handler_id.append(
self.chkDevEnvQ38.connect('toggled', self._on_toggled, 37))
self._lst_handler_id.append(
self.chkDevEnvQ39.connect('toggled', self._on_toggled, 38))
self._lst_handler_id.append(
self.chkDevEnvQ40.connect('toggled', self._on_toggled, 39))
self._lst_handler_id.append(
self.chkDevEnvQ41.connect('toggled', self._on_toggled, 40))
self._lst_handler_id.append(
self.chkDevEnvQ42.connect('toggled', self._on_toggled, 41))
self._lst_handler_id.append(
self.chkDevEnvQ43.connect('toggled', self._on_toggled, 42))
def create_risk_analysis_page(self, notebook):
"""
Method to create the development environment risk analysis page and add
it to the risk analysis gtk.Notebook().
:param gtk.Notebook notebook: the gtk.Notebook() instance that will
hold the development environment risk
analysis questions.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
# Build-up the containers for the tab. #
# +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #
_hpaned = gtk.HPaned()
self.pack1(_hpaned, resize=True, shrink=True)
# Create the organizational risk pane.
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
_frame = Widgets.make_frame(label=_(u"Organization"))
_frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
_frame.add(_scrollwindow)
_hpaned.pack1(_frame, True, True)
_labels = [_(u"1. There are separate design and coding "
u"organizations."),
_(u"2. There is an independent software test "
u"organization."),
_(u"3. There is an independent software quality "
u"assurance organization."),
_(u"4. There is an independent software configuration "
u"management organization."),
_(u"5. There is an independent software verification "
u"and validation organization."),
_(u"6. A structured programming team will develop the "
u"software."),
_(u"7. The educational level of the software team members "
u"is above average."),
_(u"8. The experience level of the software team members "
u"is above average.")]
(_x_pos,
_y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)
_x_pos += 125
_fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])
_fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])
_fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])
_fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])
_fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])
_fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])
_fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])
_fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])
# Create the methods risk pane.
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
_frame = Widgets.make_frame(label=_(u"Methods"))
_frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
_frame.add(_scrollwindow)
_hpaned.pack2(_frame, True, True)
_labels = [_(u"1. Standards are defined and will be enforced."),
_(u"2. Software will be developed using a higher order "
u"language."),
_(u"3. The development process will include formal "
u"reviews (PDR, CDR, etc.)."),
_(u"4. The development process will include frequent "
u"walkthroughs."),
_(u"5. Development will take a top-down and "
u"structured approach."),
_(u"6. Unit development folders will be used."),
_(u"7. A software development library will be used."),
_(u"8. A formal change and error reporting process "
u"will be used."),
_(u"9. Progress and status will routinely be "
u"reported.")]
(__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)
_fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])
_fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])
_fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])
_fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])
_fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])
_fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])
_fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])
_fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])
_fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])
# Create the documentation risk pane.
_hpaned = gtk.HPaned()
self.pack2(_hpaned, resize=True, shrink=True)
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
_frame = Widgets.make_frame(label=_(u"Documentation"))
_frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
_frame.add(_scrollwindow)
_hpaned.pack1(_frame, True, True)
_labels = [_(u" 1. System requirements specifications will be "
u"documented."),
_(u" 2. Software requirements specifications will be "
u"documented."),
_(u" 3. Interface design specifications will be "
u"documented."),
_(u" 4. Software design specification will be "
u"documented."),
_(u" 5. Test plans, procedures, and reports will be "
u"documented."),
_(u" 6. The software development plan will be "
u"documented."),
_(u" 7. The software quality assurance plan will be "
u"documented."),
_(u" 8. The software configuration management plan will "
u"be documented."),
_(u" 9. A requirements traceability matrix will be "
u"used."),
_(u"10. The software version description will be "
u"documented."),
_(u"11. All software discrepancies will be "
u"documented.")]
(__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)
_fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])
_fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])
_fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])
_fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])
_fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])
_fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])
_fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])
_fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])
_fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])
_fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])
_fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])
# Create the tools and test techniques risk pane.
_fixed = gtk.Fixed()
_scrollwindow = gtk.ScrolledWindow()
_scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
_scrollwindow.add_with_viewport(_fixed)
_frame = Widgets.make_frame(label=_(u"Tools & Test Techniques"))
_frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)
_frame.add(_scrollwindow)
_hpaned.pack2(_frame, True, True)
_labels = [_(u" 1. The software language requirements will be "
u"specified."),
_(u" 2. Formal program design language will be used."),
_(u" 3. Program design graphical techniques "
u"(flowcharts, HIPO, etc.) will be used."),
_(u" 4. Simulation/emulation tools will be used."),
_(u" 5. Configuration management tools will be used."),
_(u" 6. A code auditing tool will be used."),
_(u" 7. A data flow analyzer will be used."),
_(u" 8. A programmer's workbench will be used."),
_(u" 9. Measurement tools will be used."),
_(u"10. Software code reviews will be used."),
_(u"11. Software branch testing will be used."),
_(u"12. Random testing will be used."),
_(u"13. Functional testing will be used."),
_(u"14. Error and anomaly detection testing will be "
u"used."),
_(u"15. Structure analysis will be used.")]
(__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)
_fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])
_fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])
_fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])
_fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])
_fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])
_fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])
_fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])
_fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])
_fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])
_fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])
_fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])
_fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])
_fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])
_fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])
_fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])
_label = gtk.Label()
_label.set_markup("<span weight='bold'>" +
_(u"Development\nEnvironment") +
"</span>")
_label.set_alignment(xalign=0.5, yalign=0.5)
_label.set_justify(gtk.JUSTIFY_CENTER)
_label.set_angle(0)
_label.show_all()
_label.set_tooltip_text(_(u"Assesses risk due to the development "
u"environment."))
notebook.insert_page(self, tab_label=_label, position=-1)
return False
def load(self, model):
"""
Method to load the Development Environment Risk Analysis answers.
:param `rtk.software.Software` model: the Software data model to load
the gtk.ToggleButton() from.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
self._software_model = model
self.chkDevEnvQ1.set_active(model.lst_development[0])
self.chkDevEnvQ2.set_active(model.lst_development[1])
self.chkDevEnvQ3.set_active(model.lst_development[2])
self.chkDevEnvQ4.set_active(model.lst_development[3])
self.chkDevEnvQ5.set_active(model.lst_development[4])
self.chkDevEnvQ6.set_active(model.lst_development[5])
self.chkDevEnvQ7.set_active(model.lst_development[6])
self.chkDevEnvQ8.set_active(model.lst_development[7])
self.chkDevEnvQ9.set_active(model.lst_development[8])
self.chkDevEnvQ10.set_active(model.lst_development[9])
self.chkDevEnvQ11.set_active(model.lst_development[10])
self.chkDevEnvQ12.set_active(model.lst_development[11])
self.chkDevEnvQ13.set_active(model.lst_development[12])
self.chkDevEnvQ14.set_active(model.lst_development[13])
self.chkDevEnvQ15.set_active(model.lst_development[14])
self.chkDevEnvQ16.set_active(model.lst_development[15])
self.chkDevEnvQ17.set_active(model.lst_development[16])
self.chkDevEnvQ18.set_active(model.lst_development[17])
self.chkDevEnvQ19.set_active(model.lst_development[18])
self.chkDevEnvQ20.set_active(model.lst_development[19])
self.chkDevEnvQ21.set_active(model.lst_development[20])
self.chkDevEnvQ22.set_active(model.lst_development[21])
self.chkDevEnvQ23.set_active(model.lst_development[22])
self.chkDevEnvQ24.set_active(model.lst_development[23])
self.chkDevEnvQ25.set_active(model.lst_development[24])
self.chkDevEnvQ26.set_active(model.lst_development[25])
self.chkDevEnvQ27.set_active(model.lst_development[26])
self.chkDevEnvQ28.set_active(model.lst_development[27])
self.chkDevEnvQ29.set_active(model.lst_development[28])
self.chkDevEnvQ30.set_active(model.lst_development[29])
self.chkDevEnvQ31.set_active(model.lst_development[30])
self.chkDevEnvQ32.set_active(model.lst_development[31])
self.chkDevEnvQ33.set_active(model.lst_development[32])
self.chkDevEnvQ34.set_active(model.lst_development[33])
self.chkDevEnvQ35.set_active(model.lst_development[34])
self.chkDevEnvQ36.set_active(model.lst_development[35])
self.chkDevEnvQ37.set_active(model.lst_development[36])
self.chkDevEnvQ38.set_active(model.lst_development[37])
self.chkDevEnvQ39.set_active(model.lst_development[38])
self.chkDevEnvQ40.set_active(model.lst_development[39])
self.chkDevEnvQ41.set_active(model.lst_development[40])
self.chkDevEnvQ42.set_active(model.lst_development[41])
self.chkDevEnvQ43.set_active(model.lst_development[42])
return False
def _on_toggled(self, check, index):
"""
Callback method for gtk.CheckButton() 'toggled' event.
:param gtk.CheckButton check: the gtk.CheckButton() that called this
method.
:param int index: the index of the Development Environment question
associated with the gtk.CheckButton() that was
toggled.
:return: False if successful or True if an error is encountered.
:rtype: bool
"""
check.handler_block(self._lst_handler_id[index])
self._software_model.lst_development[index] = int(check.get_active())
check.handler_unblock(self._lst_handler_id[index])
return False
|
[
"#!/usr/bin/env python\r\n\"\"\"\r\n##############################################################################\r\nSoftware Package Risk Analysis Development Environment Specific Work Book View\r\n##############################################################################\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n#\r\n# rtk.software.__gui.gtk.DevelopmentEnvironment.py is part of The RTK\r\n# Project\r\n#\r\n# All rights reserved.\r\n\r\nimport sys\r\n\r\n# Import modules for localization support.\r\nimport gettext\r\nimport locale\r\n\r\n# Modules required for the GUI.\r\ntry:\r\n import pygtk\r\n pygtk.require('2.0')\r\nexcept ImportError:\r\n sys.exit(1)\r\ntry:\r\n import gtk\r\nexcept ImportError:\r\n sys.exit(1)\r\ntry:\r\n import gtk.glade\r\nexcept ImportError:\r\n sys.exit(1)\r\n\r\n# Import other RTK modules.\r\ntry:\r\n import Configuration\r\n import gui.gtk.Widgets as Widgets\r\nexcept ImportError:\r\n import rtk.Configuration as Configuration\r\n import rtk.gui.gtk.Widgets as Widgets\r\n\r\n__author__ = 'Andrew Rowland'\r\n__email__ = '[email protected]'\r\n__organization__ = 'ReliaQual Associates, LLC'\r\n__copyright__ = 'Copyright 2007 - 2015 Andrew \"weibullguy\" Rowland'\r\n\r\ntry:\r\n locale.setlocale(locale.LC_ALL, Configuration.LOCALE)\r\nexcept locale.Error:\r\n locale.setlocale(locale.LC_ALL, '')\r\n\r\n_ = gettext.gettext\r\n\r\n\r\nclass RiskAnalysis(gtk.VPaned):\r\n \"\"\"\r\n The Work Book view for analyzing and displaying the risk associated with\r\n the development environment. The attributes of a development environment\r\n Work Book view are:\r\n\r\n :ivar list _lst_handler_id: the list of gtk.Widget() signal handler IDs.\r\n :ivar _software_model: the :py:class:`rtk.software.Software.Model` to\r\n display.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n Method to initialize the development environment risk analysis\r\n questions Work Book page.\r\n \"\"\"\r\n\r\n gtk.VPaned.__init__(self)\r\n\r\n # Define private dictionary attributes.\r\n\r\n # Define private list attributes.\r\n self._lst_handler_id = []\r\n\r\n # Define private scalar attributes.\r\n self._software_model = None\r\n\r\n # Define public dictionary attributes.\r\n\r\n # Define public list attributes.\r\n\r\n # Define public scalar attributes.\r\n self.chkDevEnvQ1 = Widgets.make_check_button()\r\n self.chkDevEnvQ2 = Widgets.make_check_button()\r\n self.chkDevEnvQ3 = Widgets.make_check_button()\r\n self.chkDevEnvQ4 = Widgets.make_check_button()\r\n self.chkDevEnvQ5 = Widgets.make_check_button()\r\n self.chkDevEnvQ6 = Widgets.make_check_button()\r\n self.chkDevEnvQ7 = Widgets.make_check_button()\r\n self.chkDevEnvQ8 = Widgets.make_check_button()\r\n self.chkDevEnvQ9 = Widgets.make_check_button()\r\n self.chkDevEnvQ10 = Widgets.make_check_button()\r\n self.chkDevEnvQ11 = Widgets.make_check_button()\r\n self.chkDevEnvQ12 = Widgets.make_check_button()\r\n self.chkDevEnvQ13 = Widgets.make_check_button()\r\n self.chkDevEnvQ14 = Widgets.make_check_button()\r\n self.chkDevEnvQ15 = Widgets.make_check_button()\r\n self.chkDevEnvQ16 = Widgets.make_check_button()\r\n self.chkDevEnvQ17 = Widgets.make_check_button()\r\n self.chkDevEnvQ18 = Widgets.make_check_button()\r\n self.chkDevEnvQ19 = Widgets.make_check_button()\r\n self.chkDevEnvQ20 = Widgets.make_check_button()\r\n self.chkDevEnvQ21 = Widgets.make_check_button()\r\n self.chkDevEnvQ22 = Widgets.make_check_button()\r\n self.chkDevEnvQ23 = Widgets.make_check_button()\r\n self.chkDevEnvQ24 = Widgets.make_check_button()\r\n self.chkDevEnvQ25 = Widgets.make_check_button()\r\n self.chkDevEnvQ26 = Widgets.make_check_button()\r\n self.chkDevEnvQ27 = Widgets.make_check_button()\r\n self.chkDevEnvQ28 = Widgets.make_check_button()\r\n self.chkDevEnvQ29 = Widgets.make_check_button()\r\n self.chkDevEnvQ30 = Widgets.make_check_button()\r\n self.chkDevEnvQ31 = Widgets.make_check_button()\r\n self.chkDevEnvQ32 = Widgets.make_check_button()\r\n self.chkDevEnvQ33 = Widgets.make_check_button()\r\n self.chkDevEnvQ34 = Widgets.make_check_button()\r\n self.chkDevEnvQ35 = Widgets.make_check_button()\r\n self.chkDevEnvQ36 = Widgets.make_check_button()\r\n self.chkDevEnvQ37 = Widgets.make_check_button()\r\n self.chkDevEnvQ38 = Widgets.make_check_button()\r\n self.chkDevEnvQ39 = Widgets.make_check_button()\r\n self.chkDevEnvQ40 = Widgets.make_check_button()\r\n self.chkDevEnvQ41 = Widgets.make_check_button()\r\n self.chkDevEnvQ42 = Widgets.make_check_button()\r\n self.chkDevEnvQ43 = Widgets.make_check_button()\r\n\r\n # Connect gtk.Widget() signals to callback methods.\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ1.connect('toggled', self._on_toggled, 0))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ2.connect('toggled', self._on_toggled, 1))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ3.connect('toggled', self._on_toggled, 2))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ4.connect('toggled', self._on_toggled, 3))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ5.connect('toggled', self._on_toggled, 4))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ6.connect('toggled', self._on_toggled, 5))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ7.connect('toggled', self._on_toggled, 6))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ8.connect('toggled', self._on_toggled, 7))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ9.connect('toggled', self._on_toggled, 8))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ10.connect('toggled', self._on_toggled, 9))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ11.connect('toggled', self._on_toggled, 10))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ12.connect('toggled', self._on_toggled, 11))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ13.connect('toggled', self._on_toggled, 12))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ14.connect('toggled', self._on_toggled, 13))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ15.connect('toggled', self._on_toggled, 14))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ16.connect('toggled', self._on_toggled, 15))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ17.connect('toggled', self._on_toggled, 16))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ18.connect('toggled', self._on_toggled, 17))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ19.connect('toggled', self._on_toggled, 18))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ20.connect('toggled', self._on_toggled, 19))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ21.connect('toggled', self._on_toggled, 20))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ22.connect('toggled', self._on_toggled, 21))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ23.connect('toggled', self._on_toggled, 22))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ24.connect('toggled', self._on_toggled, 23))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ25.connect('toggled', self._on_toggled, 24))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ26.connect('toggled', self._on_toggled, 25))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ27.connect('toggled', self._on_toggled, 26))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ28.connect('toggled', self._on_toggled, 27))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ29.connect('toggled', self._on_toggled, 28))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ30.connect('toggled', self._on_toggled, 29))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ31.connect('toggled', self._on_toggled, 30))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ32.connect('toggled', self._on_toggled, 31))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ33.connect('toggled', self._on_toggled, 32))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ34.connect('toggled', self._on_toggled, 33))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ35.connect('toggled', self._on_toggled, 34))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ36.connect('toggled', self._on_toggled, 35))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ37.connect('toggled', self._on_toggled, 36))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ38.connect('toggled', self._on_toggled, 37))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ39.connect('toggled', self._on_toggled, 38))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ40.connect('toggled', self._on_toggled, 39))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ41.connect('toggled', self._on_toggled, 40))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ42.connect('toggled', self._on_toggled, 41))\r\n self._lst_handler_id.append(\r\n self.chkDevEnvQ43.connect('toggled', self._on_toggled, 42))\r\n\r\n def create_risk_analysis_page(self, notebook):\r\n \"\"\"\r\n Method to create the development environment risk analysis page and add\r\n it to the risk analysis gtk.Notebook().\r\n\r\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\r\n hold the development environment risk\r\n analysis questions.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n # Build-up the containers for the tab. #\r\n # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ #\r\n _hpaned = gtk.HPaned()\r\n self.pack1(_hpaned, resize=True, shrink=True)\r\n\r\n # Create the organizational risk pane.\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Organization\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack1(_frame, True, True)\r\n\r\n _labels = [_(u\"1. There are separate design and coding \"\r\n u\"organizations.\"),\r\n _(u\"2. There is an independent software test \"\r\n u\"organization.\"),\r\n _(u\"3. There is an independent software quality \"\r\n u\"assurance organization.\"),\r\n _(u\"4. There is an independent software configuration \"\r\n u\"management organization.\"),\r\n _(u\"5. There is an independent software verification \"\r\n u\"and validation organization.\"),\r\n _(u\"6. A structured programming team will develop the \"\r\n u\"software.\"),\r\n _(u\"7. The educational level of the software team members \"\r\n u\"is above average.\"),\r\n _(u\"8. The experience level of the software team members \"\r\n u\"is above average.\")]\r\n (_x_pos,\r\n _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n _x_pos += 125\r\n\r\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\r\n\r\n # Create the methods risk pane.\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Methods\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack2(_frame, True, True)\r\n\r\n _labels = [_(u\"1. Standards are defined and will be enforced.\"),\r\n _(u\"2. Software will be developed using a higher order \"\r\n u\"language.\"),\r\n _(u\"3. The development process will include formal \"\r\n u\"reviews (PDR, CDR, etc.).\"),\r\n _(u\"4. The development process will include frequent \"\r\n u\"walkthroughs.\"),\r\n _(u\"5. Development will take a top-down and \"\r\n u\"structured approach.\"),\r\n _(u\"6. Unit development folders will be used.\"),\r\n _(u\"7. A software development library will be used.\"),\r\n _(u\"8. A formal change and error reporting process \"\r\n u\"will be used.\"),\r\n _(u\"9. Progress and status will routinely be \"\r\n u\"reported.\")]\r\n (__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n\r\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\r\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\r\n\r\n # Create the documentation risk pane.\r\n _hpaned = gtk.HPaned()\r\n self.pack2(_hpaned, resize=True, shrink=True)\r\n\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Documentation\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack1(_frame, True, True)\r\n\r\n _labels = [_(u\" 1. System requirements specifications will be \"\r\n u\"documented.\"),\r\n _(u\" 2. Software requirements specifications will be \"\r\n u\"documented.\"),\r\n _(u\" 3. Interface design specifications will be \"\r\n u\"documented.\"),\r\n _(u\" 4. Software design specification will be \"\r\n u\"documented.\"),\r\n _(u\" 5. Test plans, procedures, and reports will be \"\r\n u\"documented.\"),\r\n _(u\" 6. The software development plan will be \"\r\n u\"documented.\"),\r\n _(u\" 7. The software quality assurance plan will be \"\r\n u\"documented.\"),\r\n _(u\" 8. The software configuration management plan will \"\r\n u\"be documented.\"),\r\n _(u\" 9. A requirements traceability matrix will be \"\r\n u\"used.\"),\r\n _(u\"10. The software version description will be \"\r\n u\"documented.\"),\r\n _(u\"11. All software discrepancies will be \"\r\n u\"documented.\")]\r\n (__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n\r\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\r\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\r\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\r\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\r\n\r\n # Create the tools and test techniques risk pane.\r\n _fixed = gtk.Fixed()\r\n\r\n _scrollwindow = gtk.ScrolledWindow()\r\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\r\n _scrollwindow.add_with_viewport(_fixed)\r\n\r\n _frame = Widgets.make_frame(label=_(u\"Tools & Test Techniques\"))\r\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\r\n _frame.add(_scrollwindow)\r\n\r\n _hpaned.pack2(_frame, True, True)\r\n\r\n _labels = [_(u\" 1. The software language requirements will be \"\r\n u\"specified.\"),\r\n _(u\" 2. Formal program design language will be used.\"),\r\n _(u\" 3. Program design graphical techniques \"\r\n u\"(flowcharts, HIPO, etc.) will be used.\"),\r\n _(u\" 4. Simulation/emulation tools will be used.\"),\r\n _(u\" 5. Configuration management tools will be used.\"),\r\n _(u\" 6. A code auditing tool will be used.\"),\r\n _(u\" 7. A data flow analyzer will be used.\"),\r\n _(u\" 8. A programmer's workbench will be used.\"),\r\n _(u\" 9. Measurement tools will be used.\"),\r\n _(u\"10. Software code reviews will be used.\"),\r\n _(u\"11. Software branch testing will be used.\"),\r\n _(u\"12. Random testing will be used.\"),\r\n _(u\"13. Functional testing will be used.\"),\r\n _(u\"14. Error and anomaly detection testing will be \"\r\n u\"used.\"),\r\n _(u\"15. Structure analysis will be used.\")]\r\n (__, _y_pos) = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\r\n\r\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\r\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\r\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\r\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\r\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\r\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\r\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\r\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\r\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\r\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\r\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\r\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\r\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\r\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\r\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\r\n\r\n _label = gtk.Label()\r\n _label.set_markup(\"<span weight='bold'>\" +\r\n _(u\"Development\\nEnvironment\") +\r\n \"</span>\")\r\n _label.set_alignment(xalign=0.5, yalign=0.5)\r\n _label.set_justify(gtk.JUSTIFY_CENTER)\r\n _label.set_angle(0)\r\n _label.show_all()\r\n _label.set_tooltip_text(_(u\"Assesses risk due to the development \"\r\n u\"environment.\"))\r\n notebook.insert_page(self, tab_label=_label, position=-1)\r\n\r\n return False\r\n\r\n def load(self, model):\r\n \"\"\"\r\n Method to load the Development Environment Risk Analysis answers.\r\n\r\n :param `rtk.software.Software` model: the Software data model to load\r\n the gtk.ToggleButton() from.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n self._software_model = model\r\n\r\n self.chkDevEnvQ1.set_active(model.lst_development[0])\r\n self.chkDevEnvQ2.set_active(model.lst_development[1])\r\n self.chkDevEnvQ3.set_active(model.lst_development[2])\r\n self.chkDevEnvQ4.set_active(model.lst_development[3])\r\n self.chkDevEnvQ5.set_active(model.lst_development[4])\r\n self.chkDevEnvQ6.set_active(model.lst_development[5])\r\n self.chkDevEnvQ7.set_active(model.lst_development[6])\r\n self.chkDevEnvQ8.set_active(model.lst_development[7])\r\n self.chkDevEnvQ9.set_active(model.lst_development[8])\r\n self.chkDevEnvQ10.set_active(model.lst_development[9])\r\n self.chkDevEnvQ11.set_active(model.lst_development[10])\r\n self.chkDevEnvQ12.set_active(model.lst_development[11])\r\n self.chkDevEnvQ13.set_active(model.lst_development[12])\r\n self.chkDevEnvQ14.set_active(model.lst_development[13])\r\n self.chkDevEnvQ15.set_active(model.lst_development[14])\r\n self.chkDevEnvQ16.set_active(model.lst_development[15])\r\n self.chkDevEnvQ17.set_active(model.lst_development[16])\r\n self.chkDevEnvQ18.set_active(model.lst_development[17])\r\n self.chkDevEnvQ19.set_active(model.lst_development[18])\r\n self.chkDevEnvQ20.set_active(model.lst_development[19])\r\n self.chkDevEnvQ21.set_active(model.lst_development[20])\r\n self.chkDevEnvQ22.set_active(model.lst_development[21])\r\n self.chkDevEnvQ23.set_active(model.lst_development[22])\r\n self.chkDevEnvQ24.set_active(model.lst_development[23])\r\n self.chkDevEnvQ25.set_active(model.lst_development[24])\r\n self.chkDevEnvQ26.set_active(model.lst_development[25])\r\n self.chkDevEnvQ27.set_active(model.lst_development[26])\r\n self.chkDevEnvQ28.set_active(model.lst_development[27])\r\n self.chkDevEnvQ29.set_active(model.lst_development[28])\r\n self.chkDevEnvQ30.set_active(model.lst_development[29])\r\n self.chkDevEnvQ31.set_active(model.lst_development[30])\r\n self.chkDevEnvQ32.set_active(model.lst_development[31])\r\n self.chkDevEnvQ33.set_active(model.lst_development[32])\r\n self.chkDevEnvQ34.set_active(model.lst_development[33])\r\n self.chkDevEnvQ35.set_active(model.lst_development[34])\r\n self.chkDevEnvQ36.set_active(model.lst_development[35])\r\n self.chkDevEnvQ37.set_active(model.lst_development[36])\r\n self.chkDevEnvQ38.set_active(model.lst_development[37])\r\n self.chkDevEnvQ39.set_active(model.lst_development[38])\r\n self.chkDevEnvQ40.set_active(model.lst_development[39])\r\n self.chkDevEnvQ41.set_active(model.lst_development[40])\r\n self.chkDevEnvQ42.set_active(model.lst_development[41])\r\n self.chkDevEnvQ43.set_active(model.lst_development[42])\r\n\r\n return False\r\n\r\n def _on_toggled(self, check, index):\r\n \"\"\"\r\n Callback method for gtk.CheckButton() 'toggled' event.\r\n\r\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\r\n method.\r\n :param int index: the index of the Development Environment question\r\n associated with the gtk.CheckButton() that was\r\n toggled.\r\n :return: False if successful or True if an error is encountered.\r\n :rtype: bool\r\n \"\"\"\r\n\r\n check.handler_block(self._lst_handler_id[index])\r\n\r\n self._software_model.lst_development[index] = int(check.get_active())\r\n\r\n check.handler_unblock(self._lst_handler_id[index])\r\n\r\n return False\r\n",
"<docstring token>\nimport sys\nimport gettext\nimport locale\ntry:\n import pygtk\n pygtk.require('2.0')\nexcept ImportError:\n sys.exit(1)\ntry:\n import gtk\nexcept ImportError:\n sys.exit(1)\ntry:\n import gtk.glade\nexcept ImportError:\n sys.exit(1)\ntry:\n import Configuration\n import gui.gtk.Widgets as Widgets\nexcept ImportError:\n import rtk.Configuration as Configuration\n import rtk.gui.gtk.Widgets as Widgets\n__author__ = 'Andrew Rowland'\n__email__ = '[email protected]'\n__organization__ = 'ReliaQual Associates, LLC'\n__copyright__ = 'Copyright 2007 - 2015 Andrew \"weibullguy\" Rowland'\ntry:\n locale.setlocale(locale.LC_ALL, Configuration.LOCALE)\nexcept locale.Error:\n locale.setlocale(locale.LC_ALL, '')\n_ = gettext.gettext\n\n\nclass RiskAnalysis(gtk.VPaned):\n \"\"\"\n The Work Book view for analyzing and displaying the risk associated with\n the development environment. The attributes of a development environment\n Work Book view are:\n\n :ivar list _lst_handler_id: the list of gtk.Widget() signal handler IDs.\n :ivar _software_model: the :py:class:`rtk.software.Software.Model` to\n display.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Method to initialize the development environment risk analysis\n questions Work Book page.\n \"\"\"\n gtk.VPaned.__init__(self)\n self._lst_handler_id = []\n self._software_model = None\n self.chkDevEnvQ1 = Widgets.make_check_button()\n self.chkDevEnvQ2 = Widgets.make_check_button()\n self.chkDevEnvQ3 = Widgets.make_check_button()\n self.chkDevEnvQ4 = Widgets.make_check_button()\n self.chkDevEnvQ5 = Widgets.make_check_button()\n self.chkDevEnvQ6 = Widgets.make_check_button()\n self.chkDevEnvQ7 = Widgets.make_check_button()\n self.chkDevEnvQ8 = Widgets.make_check_button()\n self.chkDevEnvQ9 = Widgets.make_check_button()\n self.chkDevEnvQ10 = Widgets.make_check_button()\n self.chkDevEnvQ11 = Widgets.make_check_button()\n self.chkDevEnvQ12 = Widgets.make_check_button()\n self.chkDevEnvQ13 = Widgets.make_check_button()\n self.chkDevEnvQ14 = Widgets.make_check_button()\n self.chkDevEnvQ15 = Widgets.make_check_button()\n self.chkDevEnvQ16 = Widgets.make_check_button()\n self.chkDevEnvQ17 = Widgets.make_check_button()\n self.chkDevEnvQ18 = Widgets.make_check_button()\n self.chkDevEnvQ19 = Widgets.make_check_button()\n self.chkDevEnvQ20 = Widgets.make_check_button()\n self.chkDevEnvQ21 = Widgets.make_check_button()\n self.chkDevEnvQ22 = Widgets.make_check_button()\n self.chkDevEnvQ23 = Widgets.make_check_button()\n self.chkDevEnvQ24 = Widgets.make_check_button()\n self.chkDevEnvQ25 = Widgets.make_check_button()\n self.chkDevEnvQ26 = Widgets.make_check_button()\n self.chkDevEnvQ27 = Widgets.make_check_button()\n self.chkDevEnvQ28 = Widgets.make_check_button()\n self.chkDevEnvQ29 = Widgets.make_check_button()\n self.chkDevEnvQ30 = Widgets.make_check_button()\n self.chkDevEnvQ31 = Widgets.make_check_button()\n self.chkDevEnvQ32 = Widgets.make_check_button()\n self.chkDevEnvQ33 = Widgets.make_check_button()\n self.chkDevEnvQ34 = Widgets.make_check_button()\n self.chkDevEnvQ35 = Widgets.make_check_button()\n self.chkDevEnvQ36 = Widgets.make_check_button()\n self.chkDevEnvQ37 = Widgets.make_check_button()\n self.chkDevEnvQ38 = Widgets.make_check_button()\n self.chkDevEnvQ39 = Widgets.make_check_button()\n self.chkDevEnvQ40 = Widgets.make_check_button()\n self.chkDevEnvQ41 = Widgets.make_check_button()\n self.chkDevEnvQ42 = Widgets.make_check_button()\n self.chkDevEnvQ43 = Widgets.make_check_button()\n self._lst_handler_id.append(self.chkDevEnvQ1.connect('toggled',\n self._on_toggled, 0))\n self._lst_handler_id.append(self.chkDevEnvQ2.connect('toggled',\n self._on_toggled, 1))\n self._lst_handler_id.append(self.chkDevEnvQ3.connect('toggled',\n self._on_toggled, 2))\n self._lst_handler_id.append(self.chkDevEnvQ4.connect('toggled',\n self._on_toggled, 3))\n self._lst_handler_id.append(self.chkDevEnvQ5.connect('toggled',\n self._on_toggled, 4))\n self._lst_handler_id.append(self.chkDevEnvQ6.connect('toggled',\n self._on_toggled, 5))\n self._lst_handler_id.append(self.chkDevEnvQ7.connect('toggled',\n self._on_toggled, 6))\n self._lst_handler_id.append(self.chkDevEnvQ8.connect('toggled',\n self._on_toggled, 7))\n self._lst_handler_id.append(self.chkDevEnvQ9.connect('toggled',\n self._on_toggled, 8))\n self._lst_handler_id.append(self.chkDevEnvQ10.connect('toggled',\n self._on_toggled, 9))\n self._lst_handler_id.append(self.chkDevEnvQ11.connect('toggled',\n self._on_toggled, 10))\n self._lst_handler_id.append(self.chkDevEnvQ12.connect('toggled',\n self._on_toggled, 11))\n self._lst_handler_id.append(self.chkDevEnvQ13.connect('toggled',\n self._on_toggled, 12))\n self._lst_handler_id.append(self.chkDevEnvQ14.connect('toggled',\n self._on_toggled, 13))\n self._lst_handler_id.append(self.chkDevEnvQ15.connect('toggled',\n self._on_toggled, 14))\n self._lst_handler_id.append(self.chkDevEnvQ16.connect('toggled',\n self._on_toggled, 15))\n self._lst_handler_id.append(self.chkDevEnvQ17.connect('toggled',\n self._on_toggled, 16))\n self._lst_handler_id.append(self.chkDevEnvQ18.connect('toggled',\n self._on_toggled, 17))\n self._lst_handler_id.append(self.chkDevEnvQ19.connect('toggled',\n self._on_toggled, 18))\n self._lst_handler_id.append(self.chkDevEnvQ20.connect('toggled',\n self._on_toggled, 19))\n self._lst_handler_id.append(self.chkDevEnvQ21.connect('toggled',\n self._on_toggled, 20))\n self._lst_handler_id.append(self.chkDevEnvQ22.connect('toggled',\n self._on_toggled, 21))\n self._lst_handler_id.append(self.chkDevEnvQ23.connect('toggled',\n self._on_toggled, 22))\n self._lst_handler_id.append(self.chkDevEnvQ24.connect('toggled',\n self._on_toggled, 23))\n self._lst_handler_id.append(self.chkDevEnvQ25.connect('toggled',\n self._on_toggled, 24))\n self._lst_handler_id.append(self.chkDevEnvQ26.connect('toggled',\n self._on_toggled, 25))\n self._lst_handler_id.append(self.chkDevEnvQ27.connect('toggled',\n self._on_toggled, 26))\n self._lst_handler_id.append(self.chkDevEnvQ28.connect('toggled',\n self._on_toggled, 27))\n self._lst_handler_id.append(self.chkDevEnvQ29.connect('toggled',\n self._on_toggled, 28))\n self._lst_handler_id.append(self.chkDevEnvQ30.connect('toggled',\n self._on_toggled, 29))\n self._lst_handler_id.append(self.chkDevEnvQ31.connect('toggled',\n self._on_toggled, 30))\n self._lst_handler_id.append(self.chkDevEnvQ32.connect('toggled',\n self._on_toggled, 31))\n self._lst_handler_id.append(self.chkDevEnvQ33.connect('toggled',\n self._on_toggled, 32))\n self._lst_handler_id.append(self.chkDevEnvQ34.connect('toggled',\n self._on_toggled, 33))\n self._lst_handler_id.append(self.chkDevEnvQ35.connect('toggled',\n self._on_toggled, 34))\n self._lst_handler_id.append(self.chkDevEnvQ36.connect('toggled',\n self._on_toggled, 35))\n self._lst_handler_id.append(self.chkDevEnvQ37.connect('toggled',\n self._on_toggled, 36))\n self._lst_handler_id.append(self.chkDevEnvQ38.connect('toggled',\n self._on_toggled, 37))\n self._lst_handler_id.append(self.chkDevEnvQ39.connect('toggled',\n self._on_toggled, 38))\n self._lst_handler_id.append(self.chkDevEnvQ40.connect('toggled',\n self._on_toggled, 39))\n self._lst_handler_id.append(self.chkDevEnvQ41.connect('toggled',\n self._on_toggled, 40))\n self._lst_handler_id.append(self.chkDevEnvQ42.connect('toggled',\n self._on_toggled, 41))\n self._lst_handler_id.append(self.chkDevEnvQ43.connect('toggled',\n self._on_toggled, 42))\n\n def create_risk_analysis_page(self, notebook):\n \"\"\"\n Method to create the development environment risk analysis page and add\n it to the risk analysis gtk.Notebook().\n\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\n hold the development environment risk\n analysis questions.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _hpaned = gtk.HPaned()\n self.pack1(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Organization'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u'1. There are separate design and coding organizations.'), _(\n u'2. There is an independent software test organization.'), _(\n u'3. There is an independent software quality assurance organization.'\n ), _(\n u'4. There is an independent software configuration management organization.'\n ), _(\n u'5. There is an independent software verification and validation organization.'\n ), _(\n u'6. A structured programming team will develop the software.'),\n _(\n u'7. The educational level of the software team members is above average.'\n ), _(\n u'8. The experience level of the software team members is above average.'\n )]\n _x_pos, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _x_pos += 125\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Methods'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(u'1. Standards are defined and will be enforced.'), _(\n u'2. Software will be developed using a higher order language.'\n ), _(\n u'3. The development process will include formal reviews (PDR, CDR, etc.).'\n ), _(\n u'4. The development process will include frequent walkthroughs.'\n ), _(\n u'5. Development will take a top-down and structured approach.'\n ), _(u'6. Unit development folders will be used.'), _(\n u'7. A software development library will be used.'), _(\n u'8. A formal change and error reporting process will be used.'\n ), _(u'9. Progress and status will routinely be reported.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\n _hpaned = gtk.HPaned()\n self.pack2(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Documentation'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u' 1. System requirements specifications will be documented.'),\n _(\n u' 2. Software requirements specifications will be documented.'\n ), _(u' 3. Interface design specifications will be documented.'\n ), _(u' 4. Software design specification will be documented.'),\n _(\n u' 5. Test plans, procedures, and reports will be documented.'),\n _(u' 6. The software development plan will be documented.'), _(\n u' 7. The software quality assurance plan will be documented.'),\n _(\n u' 8. The software configuration management plan will be documented.'\n ), _(u' 9. A requirements traceability matrix will be used.'),\n _(u'10. The software version description will be documented.'),\n _(u'11. All software discrepancies will be documented.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Tools & Test Techniques'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(\n u' 1. The software language requirements will be specified.'),\n _(u' 2. Formal program design language will be used.'), _(\n u' 3. Program design graphical techniques (flowcharts, HIPO, etc.) will be used.'\n ), _(u' 4. Simulation/emulation tools will be used.'), _(\n u' 5. Configuration management tools will be used.'), _(\n u' 6. A code auditing tool will be used.'), _(\n u' 7. A data flow analyzer will be used.'), _(\n u\" 8. A programmer's workbench will be used.\"), _(\n u' 9. Measurement tools will be used.'), _(\n u'10. Software code reviews will be used.'), _(\n u'11. Software branch testing will be used.'), _(\n u'12. Random testing will be used.'), _(\n u'13. Functional testing will be used.'), _(\n u'14. Error and anomaly detection testing will be used.'), _(\n u'15. Structure analysis will be used.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\n _label = gtk.Label()\n _label.set_markup(\"<span weight='bold'>\" + _(\n u'Development\\nEnvironment') + '</span>')\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_angle(0)\n _label.show_all()\n _label.set_tooltip_text(_(\n u'Assesses risk due to the development environment.'))\n notebook.insert_page(self, tab_label=_label, position=-1)\n return False\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n\n def _on_toggled(self, check, index):\n \"\"\"\n Callback method for gtk.CheckButton() 'toggled' event.\n\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\n method.\n :param int index: the index of the Development Environment question\n associated with the gtk.CheckButton() that was\n toggled.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n check.handler_block(self._lst_handler_id[index])\n self._software_model.lst_development[index] = int(check.get_active())\n check.handler_unblock(self._lst_handler_id[index])\n return False\n",
"<docstring token>\n<import token>\ntry:\n import pygtk\n pygtk.require('2.0')\nexcept ImportError:\n sys.exit(1)\ntry:\n import gtk\nexcept ImportError:\n sys.exit(1)\ntry:\n import gtk.glade\nexcept ImportError:\n sys.exit(1)\ntry:\n import Configuration\n import gui.gtk.Widgets as Widgets\nexcept ImportError:\n import rtk.Configuration as Configuration\n import rtk.gui.gtk.Widgets as Widgets\n__author__ = 'Andrew Rowland'\n__email__ = '[email protected]'\n__organization__ = 'ReliaQual Associates, LLC'\n__copyright__ = 'Copyright 2007 - 2015 Andrew \"weibullguy\" Rowland'\ntry:\n locale.setlocale(locale.LC_ALL, Configuration.LOCALE)\nexcept locale.Error:\n locale.setlocale(locale.LC_ALL, '')\n_ = gettext.gettext\n\n\nclass RiskAnalysis(gtk.VPaned):\n \"\"\"\n The Work Book view for analyzing and displaying the risk associated with\n the development environment. The attributes of a development environment\n Work Book view are:\n\n :ivar list _lst_handler_id: the list of gtk.Widget() signal handler IDs.\n :ivar _software_model: the :py:class:`rtk.software.Software.Model` to\n display.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Method to initialize the development environment risk analysis\n questions Work Book page.\n \"\"\"\n gtk.VPaned.__init__(self)\n self._lst_handler_id = []\n self._software_model = None\n self.chkDevEnvQ1 = Widgets.make_check_button()\n self.chkDevEnvQ2 = Widgets.make_check_button()\n self.chkDevEnvQ3 = Widgets.make_check_button()\n self.chkDevEnvQ4 = Widgets.make_check_button()\n self.chkDevEnvQ5 = Widgets.make_check_button()\n self.chkDevEnvQ6 = Widgets.make_check_button()\n self.chkDevEnvQ7 = Widgets.make_check_button()\n self.chkDevEnvQ8 = Widgets.make_check_button()\n self.chkDevEnvQ9 = Widgets.make_check_button()\n self.chkDevEnvQ10 = Widgets.make_check_button()\n self.chkDevEnvQ11 = Widgets.make_check_button()\n self.chkDevEnvQ12 = Widgets.make_check_button()\n self.chkDevEnvQ13 = Widgets.make_check_button()\n self.chkDevEnvQ14 = Widgets.make_check_button()\n self.chkDevEnvQ15 = Widgets.make_check_button()\n self.chkDevEnvQ16 = Widgets.make_check_button()\n self.chkDevEnvQ17 = Widgets.make_check_button()\n self.chkDevEnvQ18 = Widgets.make_check_button()\n self.chkDevEnvQ19 = Widgets.make_check_button()\n self.chkDevEnvQ20 = Widgets.make_check_button()\n self.chkDevEnvQ21 = Widgets.make_check_button()\n self.chkDevEnvQ22 = Widgets.make_check_button()\n self.chkDevEnvQ23 = Widgets.make_check_button()\n self.chkDevEnvQ24 = Widgets.make_check_button()\n self.chkDevEnvQ25 = Widgets.make_check_button()\n self.chkDevEnvQ26 = Widgets.make_check_button()\n self.chkDevEnvQ27 = Widgets.make_check_button()\n self.chkDevEnvQ28 = Widgets.make_check_button()\n self.chkDevEnvQ29 = Widgets.make_check_button()\n self.chkDevEnvQ30 = Widgets.make_check_button()\n self.chkDevEnvQ31 = Widgets.make_check_button()\n self.chkDevEnvQ32 = Widgets.make_check_button()\n self.chkDevEnvQ33 = Widgets.make_check_button()\n self.chkDevEnvQ34 = Widgets.make_check_button()\n self.chkDevEnvQ35 = Widgets.make_check_button()\n self.chkDevEnvQ36 = Widgets.make_check_button()\n self.chkDevEnvQ37 = Widgets.make_check_button()\n self.chkDevEnvQ38 = Widgets.make_check_button()\n self.chkDevEnvQ39 = Widgets.make_check_button()\n self.chkDevEnvQ40 = Widgets.make_check_button()\n self.chkDevEnvQ41 = Widgets.make_check_button()\n self.chkDevEnvQ42 = Widgets.make_check_button()\n self.chkDevEnvQ43 = Widgets.make_check_button()\n self._lst_handler_id.append(self.chkDevEnvQ1.connect('toggled',\n self._on_toggled, 0))\n self._lst_handler_id.append(self.chkDevEnvQ2.connect('toggled',\n self._on_toggled, 1))\n self._lst_handler_id.append(self.chkDevEnvQ3.connect('toggled',\n self._on_toggled, 2))\n self._lst_handler_id.append(self.chkDevEnvQ4.connect('toggled',\n self._on_toggled, 3))\n self._lst_handler_id.append(self.chkDevEnvQ5.connect('toggled',\n self._on_toggled, 4))\n self._lst_handler_id.append(self.chkDevEnvQ6.connect('toggled',\n self._on_toggled, 5))\n self._lst_handler_id.append(self.chkDevEnvQ7.connect('toggled',\n self._on_toggled, 6))\n self._lst_handler_id.append(self.chkDevEnvQ8.connect('toggled',\n self._on_toggled, 7))\n self._lst_handler_id.append(self.chkDevEnvQ9.connect('toggled',\n self._on_toggled, 8))\n self._lst_handler_id.append(self.chkDevEnvQ10.connect('toggled',\n self._on_toggled, 9))\n self._lst_handler_id.append(self.chkDevEnvQ11.connect('toggled',\n self._on_toggled, 10))\n self._lst_handler_id.append(self.chkDevEnvQ12.connect('toggled',\n self._on_toggled, 11))\n self._lst_handler_id.append(self.chkDevEnvQ13.connect('toggled',\n self._on_toggled, 12))\n self._lst_handler_id.append(self.chkDevEnvQ14.connect('toggled',\n self._on_toggled, 13))\n self._lst_handler_id.append(self.chkDevEnvQ15.connect('toggled',\n self._on_toggled, 14))\n self._lst_handler_id.append(self.chkDevEnvQ16.connect('toggled',\n self._on_toggled, 15))\n self._lst_handler_id.append(self.chkDevEnvQ17.connect('toggled',\n self._on_toggled, 16))\n self._lst_handler_id.append(self.chkDevEnvQ18.connect('toggled',\n self._on_toggled, 17))\n self._lst_handler_id.append(self.chkDevEnvQ19.connect('toggled',\n self._on_toggled, 18))\n self._lst_handler_id.append(self.chkDevEnvQ20.connect('toggled',\n self._on_toggled, 19))\n self._lst_handler_id.append(self.chkDevEnvQ21.connect('toggled',\n self._on_toggled, 20))\n self._lst_handler_id.append(self.chkDevEnvQ22.connect('toggled',\n self._on_toggled, 21))\n self._lst_handler_id.append(self.chkDevEnvQ23.connect('toggled',\n self._on_toggled, 22))\n self._lst_handler_id.append(self.chkDevEnvQ24.connect('toggled',\n self._on_toggled, 23))\n self._lst_handler_id.append(self.chkDevEnvQ25.connect('toggled',\n self._on_toggled, 24))\n self._lst_handler_id.append(self.chkDevEnvQ26.connect('toggled',\n self._on_toggled, 25))\n self._lst_handler_id.append(self.chkDevEnvQ27.connect('toggled',\n self._on_toggled, 26))\n self._lst_handler_id.append(self.chkDevEnvQ28.connect('toggled',\n self._on_toggled, 27))\n self._lst_handler_id.append(self.chkDevEnvQ29.connect('toggled',\n self._on_toggled, 28))\n self._lst_handler_id.append(self.chkDevEnvQ30.connect('toggled',\n self._on_toggled, 29))\n self._lst_handler_id.append(self.chkDevEnvQ31.connect('toggled',\n self._on_toggled, 30))\n self._lst_handler_id.append(self.chkDevEnvQ32.connect('toggled',\n self._on_toggled, 31))\n self._lst_handler_id.append(self.chkDevEnvQ33.connect('toggled',\n self._on_toggled, 32))\n self._lst_handler_id.append(self.chkDevEnvQ34.connect('toggled',\n self._on_toggled, 33))\n self._lst_handler_id.append(self.chkDevEnvQ35.connect('toggled',\n self._on_toggled, 34))\n self._lst_handler_id.append(self.chkDevEnvQ36.connect('toggled',\n self._on_toggled, 35))\n self._lst_handler_id.append(self.chkDevEnvQ37.connect('toggled',\n self._on_toggled, 36))\n self._lst_handler_id.append(self.chkDevEnvQ38.connect('toggled',\n self._on_toggled, 37))\n self._lst_handler_id.append(self.chkDevEnvQ39.connect('toggled',\n self._on_toggled, 38))\n self._lst_handler_id.append(self.chkDevEnvQ40.connect('toggled',\n self._on_toggled, 39))\n self._lst_handler_id.append(self.chkDevEnvQ41.connect('toggled',\n self._on_toggled, 40))\n self._lst_handler_id.append(self.chkDevEnvQ42.connect('toggled',\n self._on_toggled, 41))\n self._lst_handler_id.append(self.chkDevEnvQ43.connect('toggled',\n self._on_toggled, 42))\n\n def create_risk_analysis_page(self, notebook):\n \"\"\"\n Method to create the development environment risk analysis page and add\n it to the risk analysis gtk.Notebook().\n\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\n hold the development environment risk\n analysis questions.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _hpaned = gtk.HPaned()\n self.pack1(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Organization'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u'1. There are separate design and coding organizations.'), _(\n u'2. There is an independent software test organization.'), _(\n u'3. There is an independent software quality assurance organization.'\n ), _(\n u'4. There is an independent software configuration management organization.'\n ), _(\n u'5. There is an independent software verification and validation organization.'\n ), _(\n u'6. A structured programming team will develop the software.'),\n _(\n u'7. The educational level of the software team members is above average.'\n ), _(\n u'8. The experience level of the software team members is above average.'\n )]\n _x_pos, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _x_pos += 125\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Methods'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(u'1. Standards are defined and will be enforced.'), _(\n u'2. Software will be developed using a higher order language.'\n ), _(\n u'3. The development process will include formal reviews (PDR, CDR, etc.).'\n ), _(\n u'4. The development process will include frequent walkthroughs.'\n ), _(\n u'5. Development will take a top-down and structured approach.'\n ), _(u'6. Unit development folders will be used.'), _(\n u'7. A software development library will be used.'), _(\n u'8. A formal change and error reporting process will be used.'\n ), _(u'9. Progress and status will routinely be reported.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\n _hpaned = gtk.HPaned()\n self.pack2(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Documentation'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u' 1. System requirements specifications will be documented.'),\n _(\n u' 2. Software requirements specifications will be documented.'\n ), _(u' 3. Interface design specifications will be documented.'\n ), _(u' 4. Software design specification will be documented.'),\n _(\n u' 5. Test plans, procedures, and reports will be documented.'),\n _(u' 6. The software development plan will be documented.'), _(\n u' 7. The software quality assurance plan will be documented.'),\n _(\n u' 8. The software configuration management plan will be documented.'\n ), _(u' 9. A requirements traceability matrix will be used.'),\n _(u'10. The software version description will be documented.'),\n _(u'11. All software discrepancies will be documented.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Tools & Test Techniques'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(\n u' 1. The software language requirements will be specified.'),\n _(u' 2. Formal program design language will be used.'), _(\n u' 3. Program design graphical techniques (flowcharts, HIPO, etc.) will be used.'\n ), _(u' 4. Simulation/emulation tools will be used.'), _(\n u' 5. Configuration management tools will be used.'), _(\n u' 6. A code auditing tool will be used.'), _(\n u' 7. A data flow analyzer will be used.'), _(\n u\" 8. A programmer's workbench will be used.\"), _(\n u' 9. Measurement tools will be used.'), _(\n u'10. Software code reviews will be used.'), _(\n u'11. Software branch testing will be used.'), _(\n u'12. Random testing will be used.'), _(\n u'13. Functional testing will be used.'), _(\n u'14. Error and anomaly detection testing will be used.'), _(\n u'15. Structure analysis will be used.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\n _label = gtk.Label()\n _label.set_markup(\"<span weight='bold'>\" + _(\n u'Development\\nEnvironment') + '</span>')\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_angle(0)\n _label.show_all()\n _label.set_tooltip_text(_(\n u'Assesses risk due to the development environment.'))\n notebook.insert_page(self, tab_label=_label, position=-1)\n return False\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n\n def _on_toggled(self, check, index):\n \"\"\"\n Callback method for gtk.CheckButton() 'toggled' event.\n\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\n method.\n :param int index: the index of the Development Environment question\n associated with the gtk.CheckButton() that was\n toggled.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n check.handler_block(self._lst_handler_id[index])\n self._software_model.lst_development[index] = int(check.get_active())\n check.handler_unblock(self._lst_handler_id[index])\n return False\n",
"<docstring token>\n<import token>\ntry:\n import pygtk\n pygtk.require('2.0')\nexcept ImportError:\n sys.exit(1)\ntry:\n import gtk\nexcept ImportError:\n sys.exit(1)\ntry:\n import gtk.glade\nexcept ImportError:\n sys.exit(1)\ntry:\n import Configuration\n import gui.gtk.Widgets as Widgets\nexcept ImportError:\n import rtk.Configuration as Configuration\n import rtk.gui.gtk.Widgets as Widgets\n<assignment token>\ntry:\n locale.setlocale(locale.LC_ALL, Configuration.LOCALE)\nexcept locale.Error:\n locale.setlocale(locale.LC_ALL, '')\n<assignment token>\n\n\nclass RiskAnalysis(gtk.VPaned):\n \"\"\"\n The Work Book view for analyzing and displaying the risk associated with\n the development environment. The attributes of a development environment\n Work Book view are:\n\n :ivar list _lst_handler_id: the list of gtk.Widget() signal handler IDs.\n :ivar _software_model: the :py:class:`rtk.software.Software.Model` to\n display.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Method to initialize the development environment risk analysis\n questions Work Book page.\n \"\"\"\n gtk.VPaned.__init__(self)\n self._lst_handler_id = []\n self._software_model = None\n self.chkDevEnvQ1 = Widgets.make_check_button()\n self.chkDevEnvQ2 = Widgets.make_check_button()\n self.chkDevEnvQ3 = Widgets.make_check_button()\n self.chkDevEnvQ4 = Widgets.make_check_button()\n self.chkDevEnvQ5 = Widgets.make_check_button()\n self.chkDevEnvQ6 = Widgets.make_check_button()\n self.chkDevEnvQ7 = Widgets.make_check_button()\n self.chkDevEnvQ8 = Widgets.make_check_button()\n self.chkDevEnvQ9 = Widgets.make_check_button()\n self.chkDevEnvQ10 = Widgets.make_check_button()\n self.chkDevEnvQ11 = Widgets.make_check_button()\n self.chkDevEnvQ12 = Widgets.make_check_button()\n self.chkDevEnvQ13 = Widgets.make_check_button()\n self.chkDevEnvQ14 = Widgets.make_check_button()\n self.chkDevEnvQ15 = Widgets.make_check_button()\n self.chkDevEnvQ16 = Widgets.make_check_button()\n self.chkDevEnvQ17 = Widgets.make_check_button()\n self.chkDevEnvQ18 = Widgets.make_check_button()\n self.chkDevEnvQ19 = Widgets.make_check_button()\n self.chkDevEnvQ20 = Widgets.make_check_button()\n self.chkDevEnvQ21 = Widgets.make_check_button()\n self.chkDevEnvQ22 = Widgets.make_check_button()\n self.chkDevEnvQ23 = Widgets.make_check_button()\n self.chkDevEnvQ24 = Widgets.make_check_button()\n self.chkDevEnvQ25 = Widgets.make_check_button()\n self.chkDevEnvQ26 = Widgets.make_check_button()\n self.chkDevEnvQ27 = Widgets.make_check_button()\n self.chkDevEnvQ28 = Widgets.make_check_button()\n self.chkDevEnvQ29 = Widgets.make_check_button()\n self.chkDevEnvQ30 = Widgets.make_check_button()\n self.chkDevEnvQ31 = Widgets.make_check_button()\n self.chkDevEnvQ32 = Widgets.make_check_button()\n self.chkDevEnvQ33 = Widgets.make_check_button()\n self.chkDevEnvQ34 = Widgets.make_check_button()\n self.chkDevEnvQ35 = Widgets.make_check_button()\n self.chkDevEnvQ36 = Widgets.make_check_button()\n self.chkDevEnvQ37 = Widgets.make_check_button()\n self.chkDevEnvQ38 = Widgets.make_check_button()\n self.chkDevEnvQ39 = Widgets.make_check_button()\n self.chkDevEnvQ40 = Widgets.make_check_button()\n self.chkDevEnvQ41 = Widgets.make_check_button()\n self.chkDevEnvQ42 = Widgets.make_check_button()\n self.chkDevEnvQ43 = Widgets.make_check_button()\n self._lst_handler_id.append(self.chkDevEnvQ1.connect('toggled',\n self._on_toggled, 0))\n self._lst_handler_id.append(self.chkDevEnvQ2.connect('toggled',\n self._on_toggled, 1))\n self._lst_handler_id.append(self.chkDevEnvQ3.connect('toggled',\n self._on_toggled, 2))\n self._lst_handler_id.append(self.chkDevEnvQ4.connect('toggled',\n self._on_toggled, 3))\n self._lst_handler_id.append(self.chkDevEnvQ5.connect('toggled',\n self._on_toggled, 4))\n self._lst_handler_id.append(self.chkDevEnvQ6.connect('toggled',\n self._on_toggled, 5))\n self._lst_handler_id.append(self.chkDevEnvQ7.connect('toggled',\n self._on_toggled, 6))\n self._lst_handler_id.append(self.chkDevEnvQ8.connect('toggled',\n self._on_toggled, 7))\n self._lst_handler_id.append(self.chkDevEnvQ9.connect('toggled',\n self._on_toggled, 8))\n self._lst_handler_id.append(self.chkDevEnvQ10.connect('toggled',\n self._on_toggled, 9))\n self._lst_handler_id.append(self.chkDevEnvQ11.connect('toggled',\n self._on_toggled, 10))\n self._lst_handler_id.append(self.chkDevEnvQ12.connect('toggled',\n self._on_toggled, 11))\n self._lst_handler_id.append(self.chkDevEnvQ13.connect('toggled',\n self._on_toggled, 12))\n self._lst_handler_id.append(self.chkDevEnvQ14.connect('toggled',\n self._on_toggled, 13))\n self._lst_handler_id.append(self.chkDevEnvQ15.connect('toggled',\n self._on_toggled, 14))\n self._lst_handler_id.append(self.chkDevEnvQ16.connect('toggled',\n self._on_toggled, 15))\n self._lst_handler_id.append(self.chkDevEnvQ17.connect('toggled',\n self._on_toggled, 16))\n self._lst_handler_id.append(self.chkDevEnvQ18.connect('toggled',\n self._on_toggled, 17))\n self._lst_handler_id.append(self.chkDevEnvQ19.connect('toggled',\n self._on_toggled, 18))\n self._lst_handler_id.append(self.chkDevEnvQ20.connect('toggled',\n self._on_toggled, 19))\n self._lst_handler_id.append(self.chkDevEnvQ21.connect('toggled',\n self._on_toggled, 20))\n self._lst_handler_id.append(self.chkDevEnvQ22.connect('toggled',\n self._on_toggled, 21))\n self._lst_handler_id.append(self.chkDevEnvQ23.connect('toggled',\n self._on_toggled, 22))\n self._lst_handler_id.append(self.chkDevEnvQ24.connect('toggled',\n self._on_toggled, 23))\n self._lst_handler_id.append(self.chkDevEnvQ25.connect('toggled',\n self._on_toggled, 24))\n self._lst_handler_id.append(self.chkDevEnvQ26.connect('toggled',\n self._on_toggled, 25))\n self._lst_handler_id.append(self.chkDevEnvQ27.connect('toggled',\n self._on_toggled, 26))\n self._lst_handler_id.append(self.chkDevEnvQ28.connect('toggled',\n self._on_toggled, 27))\n self._lst_handler_id.append(self.chkDevEnvQ29.connect('toggled',\n self._on_toggled, 28))\n self._lst_handler_id.append(self.chkDevEnvQ30.connect('toggled',\n self._on_toggled, 29))\n self._lst_handler_id.append(self.chkDevEnvQ31.connect('toggled',\n self._on_toggled, 30))\n self._lst_handler_id.append(self.chkDevEnvQ32.connect('toggled',\n self._on_toggled, 31))\n self._lst_handler_id.append(self.chkDevEnvQ33.connect('toggled',\n self._on_toggled, 32))\n self._lst_handler_id.append(self.chkDevEnvQ34.connect('toggled',\n self._on_toggled, 33))\n self._lst_handler_id.append(self.chkDevEnvQ35.connect('toggled',\n self._on_toggled, 34))\n self._lst_handler_id.append(self.chkDevEnvQ36.connect('toggled',\n self._on_toggled, 35))\n self._lst_handler_id.append(self.chkDevEnvQ37.connect('toggled',\n self._on_toggled, 36))\n self._lst_handler_id.append(self.chkDevEnvQ38.connect('toggled',\n self._on_toggled, 37))\n self._lst_handler_id.append(self.chkDevEnvQ39.connect('toggled',\n self._on_toggled, 38))\n self._lst_handler_id.append(self.chkDevEnvQ40.connect('toggled',\n self._on_toggled, 39))\n self._lst_handler_id.append(self.chkDevEnvQ41.connect('toggled',\n self._on_toggled, 40))\n self._lst_handler_id.append(self.chkDevEnvQ42.connect('toggled',\n self._on_toggled, 41))\n self._lst_handler_id.append(self.chkDevEnvQ43.connect('toggled',\n self._on_toggled, 42))\n\n def create_risk_analysis_page(self, notebook):\n \"\"\"\n Method to create the development environment risk analysis page and add\n it to the risk analysis gtk.Notebook().\n\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\n hold the development environment risk\n analysis questions.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _hpaned = gtk.HPaned()\n self.pack1(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Organization'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u'1. There are separate design and coding organizations.'), _(\n u'2. There is an independent software test organization.'), _(\n u'3. There is an independent software quality assurance organization.'\n ), _(\n u'4. There is an independent software configuration management organization.'\n ), _(\n u'5. There is an independent software verification and validation organization.'\n ), _(\n u'6. A structured programming team will develop the software.'),\n _(\n u'7. The educational level of the software team members is above average.'\n ), _(\n u'8. The experience level of the software team members is above average.'\n )]\n _x_pos, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _x_pos += 125\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Methods'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(u'1. Standards are defined and will be enforced.'), _(\n u'2. Software will be developed using a higher order language.'\n ), _(\n u'3. The development process will include formal reviews (PDR, CDR, etc.).'\n ), _(\n u'4. The development process will include frequent walkthroughs.'\n ), _(\n u'5. Development will take a top-down and structured approach.'\n ), _(u'6. Unit development folders will be used.'), _(\n u'7. A software development library will be used.'), _(\n u'8. A formal change and error reporting process will be used.'\n ), _(u'9. Progress and status will routinely be reported.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\n _hpaned = gtk.HPaned()\n self.pack2(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Documentation'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u' 1. System requirements specifications will be documented.'),\n _(\n u' 2. Software requirements specifications will be documented.'\n ), _(u' 3. Interface design specifications will be documented.'\n ), _(u' 4. Software design specification will be documented.'),\n _(\n u' 5. Test plans, procedures, and reports will be documented.'),\n _(u' 6. The software development plan will be documented.'), _(\n u' 7. The software quality assurance plan will be documented.'),\n _(\n u' 8. The software configuration management plan will be documented.'\n ), _(u' 9. A requirements traceability matrix will be used.'),\n _(u'10. The software version description will be documented.'),\n _(u'11. All software discrepancies will be documented.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Tools & Test Techniques'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(\n u' 1. The software language requirements will be specified.'),\n _(u' 2. Formal program design language will be used.'), _(\n u' 3. Program design graphical techniques (flowcharts, HIPO, etc.) will be used.'\n ), _(u' 4. Simulation/emulation tools will be used.'), _(\n u' 5. Configuration management tools will be used.'), _(\n u' 6. A code auditing tool will be used.'), _(\n u' 7. A data flow analyzer will be used.'), _(\n u\" 8. A programmer's workbench will be used.\"), _(\n u' 9. Measurement tools will be used.'), _(\n u'10. Software code reviews will be used.'), _(\n u'11. Software branch testing will be used.'), _(\n u'12. Random testing will be used.'), _(\n u'13. Functional testing will be used.'), _(\n u'14. Error and anomaly detection testing will be used.'), _(\n u'15. Structure analysis will be used.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\n _label = gtk.Label()\n _label.set_markup(\"<span weight='bold'>\" + _(\n u'Development\\nEnvironment') + '</span>')\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_angle(0)\n _label.show_all()\n _label.set_tooltip_text(_(\n u'Assesses risk due to the development environment.'))\n notebook.insert_page(self, tab_label=_label, position=-1)\n return False\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n\n def _on_toggled(self, check, index):\n \"\"\"\n Callback method for gtk.CheckButton() 'toggled' event.\n\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\n method.\n :param int index: the index of the Development Environment question\n associated with the gtk.CheckButton() that was\n toggled.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n check.handler_block(self._lst_handler_id[index])\n self._software_model.lst_development[index] = int(check.get_active())\n check.handler_unblock(self._lst_handler_id[index])\n return False\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass RiskAnalysis(gtk.VPaned):\n \"\"\"\n The Work Book view for analyzing and displaying the risk associated with\n the development environment. The attributes of a development environment\n Work Book view are:\n\n :ivar list _lst_handler_id: the list of gtk.Widget() signal handler IDs.\n :ivar _software_model: the :py:class:`rtk.software.Software.Model` to\n display.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Method to initialize the development environment risk analysis\n questions Work Book page.\n \"\"\"\n gtk.VPaned.__init__(self)\n self._lst_handler_id = []\n self._software_model = None\n self.chkDevEnvQ1 = Widgets.make_check_button()\n self.chkDevEnvQ2 = Widgets.make_check_button()\n self.chkDevEnvQ3 = Widgets.make_check_button()\n self.chkDevEnvQ4 = Widgets.make_check_button()\n self.chkDevEnvQ5 = Widgets.make_check_button()\n self.chkDevEnvQ6 = Widgets.make_check_button()\n self.chkDevEnvQ7 = Widgets.make_check_button()\n self.chkDevEnvQ8 = Widgets.make_check_button()\n self.chkDevEnvQ9 = Widgets.make_check_button()\n self.chkDevEnvQ10 = Widgets.make_check_button()\n self.chkDevEnvQ11 = Widgets.make_check_button()\n self.chkDevEnvQ12 = Widgets.make_check_button()\n self.chkDevEnvQ13 = Widgets.make_check_button()\n self.chkDevEnvQ14 = Widgets.make_check_button()\n self.chkDevEnvQ15 = Widgets.make_check_button()\n self.chkDevEnvQ16 = Widgets.make_check_button()\n self.chkDevEnvQ17 = Widgets.make_check_button()\n self.chkDevEnvQ18 = Widgets.make_check_button()\n self.chkDevEnvQ19 = Widgets.make_check_button()\n self.chkDevEnvQ20 = Widgets.make_check_button()\n self.chkDevEnvQ21 = Widgets.make_check_button()\n self.chkDevEnvQ22 = Widgets.make_check_button()\n self.chkDevEnvQ23 = Widgets.make_check_button()\n self.chkDevEnvQ24 = Widgets.make_check_button()\n self.chkDevEnvQ25 = Widgets.make_check_button()\n self.chkDevEnvQ26 = Widgets.make_check_button()\n self.chkDevEnvQ27 = Widgets.make_check_button()\n self.chkDevEnvQ28 = Widgets.make_check_button()\n self.chkDevEnvQ29 = Widgets.make_check_button()\n self.chkDevEnvQ30 = Widgets.make_check_button()\n self.chkDevEnvQ31 = Widgets.make_check_button()\n self.chkDevEnvQ32 = Widgets.make_check_button()\n self.chkDevEnvQ33 = Widgets.make_check_button()\n self.chkDevEnvQ34 = Widgets.make_check_button()\n self.chkDevEnvQ35 = Widgets.make_check_button()\n self.chkDevEnvQ36 = Widgets.make_check_button()\n self.chkDevEnvQ37 = Widgets.make_check_button()\n self.chkDevEnvQ38 = Widgets.make_check_button()\n self.chkDevEnvQ39 = Widgets.make_check_button()\n self.chkDevEnvQ40 = Widgets.make_check_button()\n self.chkDevEnvQ41 = Widgets.make_check_button()\n self.chkDevEnvQ42 = Widgets.make_check_button()\n self.chkDevEnvQ43 = Widgets.make_check_button()\n self._lst_handler_id.append(self.chkDevEnvQ1.connect('toggled',\n self._on_toggled, 0))\n self._lst_handler_id.append(self.chkDevEnvQ2.connect('toggled',\n self._on_toggled, 1))\n self._lst_handler_id.append(self.chkDevEnvQ3.connect('toggled',\n self._on_toggled, 2))\n self._lst_handler_id.append(self.chkDevEnvQ4.connect('toggled',\n self._on_toggled, 3))\n self._lst_handler_id.append(self.chkDevEnvQ5.connect('toggled',\n self._on_toggled, 4))\n self._lst_handler_id.append(self.chkDevEnvQ6.connect('toggled',\n self._on_toggled, 5))\n self._lst_handler_id.append(self.chkDevEnvQ7.connect('toggled',\n self._on_toggled, 6))\n self._lst_handler_id.append(self.chkDevEnvQ8.connect('toggled',\n self._on_toggled, 7))\n self._lst_handler_id.append(self.chkDevEnvQ9.connect('toggled',\n self._on_toggled, 8))\n self._lst_handler_id.append(self.chkDevEnvQ10.connect('toggled',\n self._on_toggled, 9))\n self._lst_handler_id.append(self.chkDevEnvQ11.connect('toggled',\n self._on_toggled, 10))\n self._lst_handler_id.append(self.chkDevEnvQ12.connect('toggled',\n self._on_toggled, 11))\n self._lst_handler_id.append(self.chkDevEnvQ13.connect('toggled',\n self._on_toggled, 12))\n self._lst_handler_id.append(self.chkDevEnvQ14.connect('toggled',\n self._on_toggled, 13))\n self._lst_handler_id.append(self.chkDevEnvQ15.connect('toggled',\n self._on_toggled, 14))\n self._lst_handler_id.append(self.chkDevEnvQ16.connect('toggled',\n self._on_toggled, 15))\n self._lst_handler_id.append(self.chkDevEnvQ17.connect('toggled',\n self._on_toggled, 16))\n self._lst_handler_id.append(self.chkDevEnvQ18.connect('toggled',\n self._on_toggled, 17))\n self._lst_handler_id.append(self.chkDevEnvQ19.connect('toggled',\n self._on_toggled, 18))\n self._lst_handler_id.append(self.chkDevEnvQ20.connect('toggled',\n self._on_toggled, 19))\n self._lst_handler_id.append(self.chkDevEnvQ21.connect('toggled',\n self._on_toggled, 20))\n self._lst_handler_id.append(self.chkDevEnvQ22.connect('toggled',\n self._on_toggled, 21))\n self._lst_handler_id.append(self.chkDevEnvQ23.connect('toggled',\n self._on_toggled, 22))\n self._lst_handler_id.append(self.chkDevEnvQ24.connect('toggled',\n self._on_toggled, 23))\n self._lst_handler_id.append(self.chkDevEnvQ25.connect('toggled',\n self._on_toggled, 24))\n self._lst_handler_id.append(self.chkDevEnvQ26.connect('toggled',\n self._on_toggled, 25))\n self._lst_handler_id.append(self.chkDevEnvQ27.connect('toggled',\n self._on_toggled, 26))\n self._lst_handler_id.append(self.chkDevEnvQ28.connect('toggled',\n self._on_toggled, 27))\n self._lst_handler_id.append(self.chkDevEnvQ29.connect('toggled',\n self._on_toggled, 28))\n self._lst_handler_id.append(self.chkDevEnvQ30.connect('toggled',\n self._on_toggled, 29))\n self._lst_handler_id.append(self.chkDevEnvQ31.connect('toggled',\n self._on_toggled, 30))\n self._lst_handler_id.append(self.chkDevEnvQ32.connect('toggled',\n self._on_toggled, 31))\n self._lst_handler_id.append(self.chkDevEnvQ33.connect('toggled',\n self._on_toggled, 32))\n self._lst_handler_id.append(self.chkDevEnvQ34.connect('toggled',\n self._on_toggled, 33))\n self._lst_handler_id.append(self.chkDevEnvQ35.connect('toggled',\n self._on_toggled, 34))\n self._lst_handler_id.append(self.chkDevEnvQ36.connect('toggled',\n self._on_toggled, 35))\n self._lst_handler_id.append(self.chkDevEnvQ37.connect('toggled',\n self._on_toggled, 36))\n self._lst_handler_id.append(self.chkDevEnvQ38.connect('toggled',\n self._on_toggled, 37))\n self._lst_handler_id.append(self.chkDevEnvQ39.connect('toggled',\n self._on_toggled, 38))\n self._lst_handler_id.append(self.chkDevEnvQ40.connect('toggled',\n self._on_toggled, 39))\n self._lst_handler_id.append(self.chkDevEnvQ41.connect('toggled',\n self._on_toggled, 40))\n self._lst_handler_id.append(self.chkDevEnvQ42.connect('toggled',\n self._on_toggled, 41))\n self._lst_handler_id.append(self.chkDevEnvQ43.connect('toggled',\n self._on_toggled, 42))\n\n def create_risk_analysis_page(self, notebook):\n \"\"\"\n Method to create the development environment risk analysis page and add\n it to the risk analysis gtk.Notebook().\n\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\n hold the development environment risk\n analysis questions.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _hpaned = gtk.HPaned()\n self.pack1(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Organization'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u'1. There are separate design and coding organizations.'), _(\n u'2. There is an independent software test organization.'), _(\n u'3. There is an independent software quality assurance organization.'\n ), _(\n u'4. There is an independent software configuration management organization.'\n ), _(\n u'5. There is an independent software verification and validation organization.'\n ), _(\n u'6. A structured programming team will develop the software.'),\n _(\n u'7. The educational level of the software team members is above average.'\n ), _(\n u'8. The experience level of the software team members is above average.'\n )]\n _x_pos, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _x_pos += 125\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Methods'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(u'1. Standards are defined and will be enforced.'), _(\n u'2. Software will be developed using a higher order language.'\n ), _(\n u'3. The development process will include formal reviews (PDR, CDR, etc.).'\n ), _(\n u'4. The development process will include frequent walkthroughs.'\n ), _(\n u'5. Development will take a top-down and structured approach.'\n ), _(u'6. Unit development folders will be used.'), _(\n u'7. A software development library will be used.'), _(\n u'8. A formal change and error reporting process will be used.'\n ), _(u'9. Progress and status will routinely be reported.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\n _hpaned = gtk.HPaned()\n self.pack2(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Documentation'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u' 1. System requirements specifications will be documented.'),\n _(\n u' 2. Software requirements specifications will be documented.'\n ), _(u' 3. Interface design specifications will be documented.'\n ), _(u' 4. Software design specification will be documented.'),\n _(\n u' 5. Test plans, procedures, and reports will be documented.'),\n _(u' 6. The software development plan will be documented.'), _(\n u' 7. The software quality assurance plan will be documented.'),\n _(\n u' 8. The software configuration management plan will be documented.'\n ), _(u' 9. A requirements traceability matrix will be used.'),\n _(u'10. The software version description will be documented.'),\n _(u'11. All software discrepancies will be documented.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Tools & Test Techniques'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(\n u' 1. The software language requirements will be specified.'),\n _(u' 2. Formal program design language will be used.'), _(\n u' 3. Program design graphical techniques (flowcharts, HIPO, etc.) will be used.'\n ), _(u' 4. Simulation/emulation tools will be used.'), _(\n u' 5. Configuration management tools will be used.'), _(\n u' 6. A code auditing tool will be used.'), _(\n u' 7. A data flow analyzer will be used.'), _(\n u\" 8. A programmer's workbench will be used.\"), _(\n u' 9. Measurement tools will be used.'), _(\n u'10. Software code reviews will be used.'), _(\n u'11. Software branch testing will be used.'), _(\n u'12. Random testing will be used.'), _(\n u'13. Functional testing will be used.'), _(\n u'14. Error and anomaly detection testing will be used.'), _(\n u'15. Structure analysis will be used.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\n _label = gtk.Label()\n _label.set_markup(\"<span weight='bold'>\" + _(\n u'Development\\nEnvironment') + '</span>')\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_angle(0)\n _label.show_all()\n _label.set_tooltip_text(_(\n u'Assesses risk due to the development environment.'))\n notebook.insert_page(self, tab_label=_label, position=-1)\n return False\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n\n def _on_toggled(self, check, index):\n \"\"\"\n Callback method for gtk.CheckButton() 'toggled' event.\n\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\n method.\n :param int index: the index of the Development Environment question\n associated with the gtk.CheckButton() that was\n toggled.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n check.handler_block(self._lst_handler_id[index])\n self._software_model.lst_development[index] = int(check.get_active())\n check.handler_unblock(self._lst_handler_id[index])\n return False\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass RiskAnalysis(gtk.VPaned):\n <docstring token>\n\n def __init__(self):\n \"\"\"\n Method to initialize the development environment risk analysis\n questions Work Book page.\n \"\"\"\n gtk.VPaned.__init__(self)\n self._lst_handler_id = []\n self._software_model = None\n self.chkDevEnvQ1 = Widgets.make_check_button()\n self.chkDevEnvQ2 = Widgets.make_check_button()\n self.chkDevEnvQ3 = Widgets.make_check_button()\n self.chkDevEnvQ4 = Widgets.make_check_button()\n self.chkDevEnvQ5 = Widgets.make_check_button()\n self.chkDevEnvQ6 = Widgets.make_check_button()\n self.chkDevEnvQ7 = Widgets.make_check_button()\n self.chkDevEnvQ8 = Widgets.make_check_button()\n self.chkDevEnvQ9 = Widgets.make_check_button()\n self.chkDevEnvQ10 = Widgets.make_check_button()\n self.chkDevEnvQ11 = Widgets.make_check_button()\n self.chkDevEnvQ12 = Widgets.make_check_button()\n self.chkDevEnvQ13 = Widgets.make_check_button()\n self.chkDevEnvQ14 = Widgets.make_check_button()\n self.chkDevEnvQ15 = Widgets.make_check_button()\n self.chkDevEnvQ16 = Widgets.make_check_button()\n self.chkDevEnvQ17 = Widgets.make_check_button()\n self.chkDevEnvQ18 = Widgets.make_check_button()\n self.chkDevEnvQ19 = Widgets.make_check_button()\n self.chkDevEnvQ20 = Widgets.make_check_button()\n self.chkDevEnvQ21 = Widgets.make_check_button()\n self.chkDevEnvQ22 = Widgets.make_check_button()\n self.chkDevEnvQ23 = Widgets.make_check_button()\n self.chkDevEnvQ24 = Widgets.make_check_button()\n self.chkDevEnvQ25 = Widgets.make_check_button()\n self.chkDevEnvQ26 = Widgets.make_check_button()\n self.chkDevEnvQ27 = Widgets.make_check_button()\n self.chkDevEnvQ28 = Widgets.make_check_button()\n self.chkDevEnvQ29 = Widgets.make_check_button()\n self.chkDevEnvQ30 = Widgets.make_check_button()\n self.chkDevEnvQ31 = Widgets.make_check_button()\n self.chkDevEnvQ32 = Widgets.make_check_button()\n self.chkDevEnvQ33 = Widgets.make_check_button()\n self.chkDevEnvQ34 = Widgets.make_check_button()\n self.chkDevEnvQ35 = Widgets.make_check_button()\n self.chkDevEnvQ36 = Widgets.make_check_button()\n self.chkDevEnvQ37 = Widgets.make_check_button()\n self.chkDevEnvQ38 = Widgets.make_check_button()\n self.chkDevEnvQ39 = Widgets.make_check_button()\n self.chkDevEnvQ40 = Widgets.make_check_button()\n self.chkDevEnvQ41 = Widgets.make_check_button()\n self.chkDevEnvQ42 = Widgets.make_check_button()\n self.chkDevEnvQ43 = Widgets.make_check_button()\n self._lst_handler_id.append(self.chkDevEnvQ1.connect('toggled',\n self._on_toggled, 0))\n self._lst_handler_id.append(self.chkDevEnvQ2.connect('toggled',\n self._on_toggled, 1))\n self._lst_handler_id.append(self.chkDevEnvQ3.connect('toggled',\n self._on_toggled, 2))\n self._lst_handler_id.append(self.chkDevEnvQ4.connect('toggled',\n self._on_toggled, 3))\n self._lst_handler_id.append(self.chkDevEnvQ5.connect('toggled',\n self._on_toggled, 4))\n self._lst_handler_id.append(self.chkDevEnvQ6.connect('toggled',\n self._on_toggled, 5))\n self._lst_handler_id.append(self.chkDevEnvQ7.connect('toggled',\n self._on_toggled, 6))\n self._lst_handler_id.append(self.chkDevEnvQ8.connect('toggled',\n self._on_toggled, 7))\n self._lst_handler_id.append(self.chkDevEnvQ9.connect('toggled',\n self._on_toggled, 8))\n self._lst_handler_id.append(self.chkDevEnvQ10.connect('toggled',\n self._on_toggled, 9))\n self._lst_handler_id.append(self.chkDevEnvQ11.connect('toggled',\n self._on_toggled, 10))\n self._lst_handler_id.append(self.chkDevEnvQ12.connect('toggled',\n self._on_toggled, 11))\n self._lst_handler_id.append(self.chkDevEnvQ13.connect('toggled',\n self._on_toggled, 12))\n self._lst_handler_id.append(self.chkDevEnvQ14.connect('toggled',\n self._on_toggled, 13))\n self._lst_handler_id.append(self.chkDevEnvQ15.connect('toggled',\n self._on_toggled, 14))\n self._lst_handler_id.append(self.chkDevEnvQ16.connect('toggled',\n self._on_toggled, 15))\n self._lst_handler_id.append(self.chkDevEnvQ17.connect('toggled',\n self._on_toggled, 16))\n self._lst_handler_id.append(self.chkDevEnvQ18.connect('toggled',\n self._on_toggled, 17))\n self._lst_handler_id.append(self.chkDevEnvQ19.connect('toggled',\n self._on_toggled, 18))\n self._lst_handler_id.append(self.chkDevEnvQ20.connect('toggled',\n self._on_toggled, 19))\n self._lst_handler_id.append(self.chkDevEnvQ21.connect('toggled',\n self._on_toggled, 20))\n self._lst_handler_id.append(self.chkDevEnvQ22.connect('toggled',\n self._on_toggled, 21))\n self._lst_handler_id.append(self.chkDevEnvQ23.connect('toggled',\n self._on_toggled, 22))\n self._lst_handler_id.append(self.chkDevEnvQ24.connect('toggled',\n self._on_toggled, 23))\n self._lst_handler_id.append(self.chkDevEnvQ25.connect('toggled',\n self._on_toggled, 24))\n self._lst_handler_id.append(self.chkDevEnvQ26.connect('toggled',\n self._on_toggled, 25))\n self._lst_handler_id.append(self.chkDevEnvQ27.connect('toggled',\n self._on_toggled, 26))\n self._lst_handler_id.append(self.chkDevEnvQ28.connect('toggled',\n self._on_toggled, 27))\n self._lst_handler_id.append(self.chkDevEnvQ29.connect('toggled',\n self._on_toggled, 28))\n self._lst_handler_id.append(self.chkDevEnvQ30.connect('toggled',\n self._on_toggled, 29))\n self._lst_handler_id.append(self.chkDevEnvQ31.connect('toggled',\n self._on_toggled, 30))\n self._lst_handler_id.append(self.chkDevEnvQ32.connect('toggled',\n self._on_toggled, 31))\n self._lst_handler_id.append(self.chkDevEnvQ33.connect('toggled',\n self._on_toggled, 32))\n self._lst_handler_id.append(self.chkDevEnvQ34.connect('toggled',\n self._on_toggled, 33))\n self._lst_handler_id.append(self.chkDevEnvQ35.connect('toggled',\n self._on_toggled, 34))\n self._lst_handler_id.append(self.chkDevEnvQ36.connect('toggled',\n self._on_toggled, 35))\n self._lst_handler_id.append(self.chkDevEnvQ37.connect('toggled',\n self._on_toggled, 36))\n self._lst_handler_id.append(self.chkDevEnvQ38.connect('toggled',\n self._on_toggled, 37))\n self._lst_handler_id.append(self.chkDevEnvQ39.connect('toggled',\n self._on_toggled, 38))\n self._lst_handler_id.append(self.chkDevEnvQ40.connect('toggled',\n self._on_toggled, 39))\n self._lst_handler_id.append(self.chkDevEnvQ41.connect('toggled',\n self._on_toggled, 40))\n self._lst_handler_id.append(self.chkDevEnvQ42.connect('toggled',\n self._on_toggled, 41))\n self._lst_handler_id.append(self.chkDevEnvQ43.connect('toggled',\n self._on_toggled, 42))\n\n def create_risk_analysis_page(self, notebook):\n \"\"\"\n Method to create the development environment risk analysis page and add\n it to the risk analysis gtk.Notebook().\n\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\n hold the development environment risk\n analysis questions.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _hpaned = gtk.HPaned()\n self.pack1(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Organization'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u'1. There are separate design and coding organizations.'), _(\n u'2. There is an independent software test organization.'), _(\n u'3. There is an independent software quality assurance organization.'\n ), _(\n u'4. There is an independent software configuration management organization.'\n ), _(\n u'5. There is an independent software verification and validation organization.'\n ), _(\n u'6. A structured programming team will develop the software.'),\n _(\n u'7. The educational level of the software team members is above average.'\n ), _(\n u'8. The experience level of the software team members is above average.'\n )]\n _x_pos, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _x_pos += 125\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Methods'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(u'1. Standards are defined and will be enforced.'), _(\n u'2. Software will be developed using a higher order language.'\n ), _(\n u'3. The development process will include formal reviews (PDR, CDR, etc.).'\n ), _(\n u'4. The development process will include frequent walkthroughs.'\n ), _(\n u'5. Development will take a top-down and structured approach.'\n ), _(u'6. Unit development folders will be used.'), _(\n u'7. A software development library will be used.'), _(\n u'8. A formal change and error reporting process will be used.'\n ), _(u'9. Progress and status will routinely be reported.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\n _hpaned = gtk.HPaned()\n self.pack2(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Documentation'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u' 1. System requirements specifications will be documented.'),\n _(\n u' 2. Software requirements specifications will be documented.'\n ), _(u' 3. Interface design specifications will be documented.'\n ), _(u' 4. Software design specification will be documented.'),\n _(\n u' 5. Test plans, procedures, and reports will be documented.'),\n _(u' 6. The software development plan will be documented.'), _(\n u' 7. The software quality assurance plan will be documented.'),\n _(\n u' 8. The software configuration management plan will be documented.'\n ), _(u' 9. A requirements traceability matrix will be used.'),\n _(u'10. The software version description will be documented.'),\n _(u'11. All software discrepancies will be documented.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Tools & Test Techniques'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(\n u' 1. The software language requirements will be specified.'),\n _(u' 2. Formal program design language will be used.'), _(\n u' 3. Program design graphical techniques (flowcharts, HIPO, etc.) will be used.'\n ), _(u' 4. Simulation/emulation tools will be used.'), _(\n u' 5. Configuration management tools will be used.'), _(\n u' 6. A code auditing tool will be used.'), _(\n u' 7. A data flow analyzer will be used.'), _(\n u\" 8. A programmer's workbench will be used.\"), _(\n u' 9. Measurement tools will be used.'), _(\n u'10. Software code reviews will be used.'), _(\n u'11. Software branch testing will be used.'), _(\n u'12. Random testing will be used.'), _(\n u'13. Functional testing will be used.'), _(\n u'14. Error and anomaly detection testing will be used.'), _(\n u'15. Structure analysis will be used.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\n _label = gtk.Label()\n _label.set_markup(\"<span weight='bold'>\" + _(\n u'Development\\nEnvironment') + '</span>')\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_angle(0)\n _label.show_all()\n _label.set_tooltip_text(_(\n u'Assesses risk due to the development environment.'))\n notebook.insert_page(self, tab_label=_label, position=-1)\n return False\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n\n def _on_toggled(self, check, index):\n \"\"\"\n Callback method for gtk.CheckButton() 'toggled' event.\n\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\n method.\n :param int index: the index of the Development Environment question\n associated with the gtk.CheckButton() that was\n toggled.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n check.handler_block(self._lst_handler_id[index])\n self._software_model.lst_development[index] = int(check.get_active())\n check.handler_unblock(self._lst_handler_id[index])\n return False\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass RiskAnalysis(gtk.VPaned):\n <docstring token>\n <function token>\n\n def create_risk_analysis_page(self, notebook):\n \"\"\"\n Method to create the development environment risk analysis page and add\n it to the risk analysis gtk.Notebook().\n\n :param gtk.Notebook notebook: the gtk.Notebook() instance that will\n hold the development environment risk\n analysis questions.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n _hpaned = gtk.HPaned()\n self.pack1(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Organization'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u'1. There are separate design and coding organizations.'), _(\n u'2. There is an independent software test organization.'), _(\n u'3. There is an independent software quality assurance organization.'\n ), _(\n u'4. There is an independent software configuration management organization.'\n ), _(\n u'5. There is an independent software verification and validation organization.'\n ), _(\n u'6. A structured programming team will develop the software.'),\n _(\n u'7. The educational level of the software team members is above average.'\n ), _(\n u'8. The experience level of the software team members is above average.'\n )]\n _x_pos, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _x_pos += 125\n _fixed.put(self.chkDevEnvQ1, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ2, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ3, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ4, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ5, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ6, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ7, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ8, _x_pos, _y_pos[7])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Methods'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(u'1. Standards are defined and will be enforced.'), _(\n u'2. Software will be developed using a higher order language.'\n ), _(\n u'3. The development process will include formal reviews (PDR, CDR, etc.).'\n ), _(\n u'4. The development process will include frequent walkthroughs.'\n ), _(\n u'5. Development will take a top-down and structured approach.'\n ), _(u'6. Unit development folders will be used.'), _(\n u'7. A software development library will be used.'), _(\n u'8. A formal change and error reporting process will be used.'\n ), _(u'9. Progress and status will routinely be reported.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ9, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ10, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ11, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ12, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ13, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ14, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ15, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ16, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ17, _x_pos, _y_pos[8])\n _hpaned = gtk.HPaned()\n self.pack2(_hpaned, resize=True, shrink=True)\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Documentation'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack1(_frame, True, True)\n _labels = [_(\n u' 1. System requirements specifications will be documented.'),\n _(\n u' 2. Software requirements specifications will be documented.'\n ), _(u' 3. Interface design specifications will be documented.'\n ), _(u' 4. Software design specification will be documented.'),\n _(\n u' 5. Test plans, procedures, and reports will be documented.'),\n _(u' 6. The software development plan will be documented.'), _(\n u' 7. The software quality assurance plan will be documented.'),\n _(\n u' 8. The software configuration management plan will be documented.'\n ), _(u' 9. A requirements traceability matrix will be used.'),\n _(u'10. The software version description will be documented.'),\n _(u'11. All software discrepancies will be documented.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ18, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ19, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ20, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ21, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ22, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ23, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ24, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ25, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ26, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ27, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ28, _x_pos, _y_pos[10])\n _fixed = gtk.Fixed()\n _scrollwindow = gtk.ScrolledWindow()\n _scrollwindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)\n _scrollwindow.add_with_viewport(_fixed)\n _frame = Widgets.make_frame(label=_(u'Tools & Test Techniques'))\n _frame.set_shadow_type(gtk.SHADOW_ETCHED_OUT)\n _frame.add(_scrollwindow)\n _hpaned.pack2(_frame, True, True)\n _labels = [_(\n u' 1. The software language requirements will be specified.'),\n _(u' 2. Formal program design language will be used.'), _(\n u' 3. Program design graphical techniques (flowcharts, HIPO, etc.) will be used.'\n ), _(u' 4. Simulation/emulation tools will be used.'), _(\n u' 5. Configuration management tools will be used.'), _(\n u' 6. A code auditing tool will be used.'), _(\n u' 7. A data flow analyzer will be used.'), _(\n u\" 8. A programmer's workbench will be used.\"), _(\n u' 9. Measurement tools will be used.'), _(\n u'10. Software code reviews will be used.'), _(\n u'11. Software branch testing will be used.'), _(\n u'12. Random testing will be used.'), _(\n u'13. Functional testing will be used.'), _(\n u'14. Error and anomaly detection testing will be used.'), _(\n u'15. Structure analysis will be used.')]\n __, _y_pos = Widgets.make_labels(_labels, _fixed, 5, 5, wrap=False)\n _fixed.put(self.chkDevEnvQ29, _x_pos, _y_pos[0])\n _fixed.put(self.chkDevEnvQ30, _x_pos, _y_pos[1])\n _fixed.put(self.chkDevEnvQ31, _x_pos, _y_pos[2])\n _fixed.put(self.chkDevEnvQ32, _x_pos, _y_pos[3])\n _fixed.put(self.chkDevEnvQ33, _x_pos, _y_pos[4])\n _fixed.put(self.chkDevEnvQ34, _x_pos, _y_pos[5])\n _fixed.put(self.chkDevEnvQ35, _x_pos, _y_pos[6])\n _fixed.put(self.chkDevEnvQ36, _x_pos, _y_pos[7])\n _fixed.put(self.chkDevEnvQ37, _x_pos, _y_pos[8])\n _fixed.put(self.chkDevEnvQ38, _x_pos, _y_pos[9])\n _fixed.put(self.chkDevEnvQ39, _x_pos, _y_pos[10])\n _fixed.put(self.chkDevEnvQ40, _x_pos, _y_pos[11])\n _fixed.put(self.chkDevEnvQ41, _x_pos, _y_pos[12])\n _fixed.put(self.chkDevEnvQ42, _x_pos, _y_pos[13])\n _fixed.put(self.chkDevEnvQ43, _x_pos, _y_pos[14])\n _label = gtk.Label()\n _label.set_markup(\"<span weight='bold'>\" + _(\n u'Development\\nEnvironment') + '</span>')\n _label.set_alignment(xalign=0.5, yalign=0.5)\n _label.set_justify(gtk.JUSTIFY_CENTER)\n _label.set_angle(0)\n _label.show_all()\n _label.set_tooltip_text(_(\n u'Assesses risk due to the development environment.'))\n notebook.insert_page(self, tab_label=_label, position=-1)\n return False\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n\n def _on_toggled(self, check, index):\n \"\"\"\n Callback method for gtk.CheckButton() 'toggled' event.\n\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\n method.\n :param int index: the index of the Development Environment question\n associated with the gtk.CheckButton() that was\n toggled.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n check.handler_block(self._lst_handler_id[index])\n self._software_model.lst_development[index] = int(check.get_active())\n check.handler_unblock(self._lst_handler_id[index])\n return False\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass RiskAnalysis(gtk.VPaned):\n <docstring token>\n <function token>\n <function token>\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n\n def _on_toggled(self, check, index):\n \"\"\"\n Callback method for gtk.CheckButton() 'toggled' event.\n\n :param gtk.CheckButton check: the gtk.CheckButton() that called this\n method.\n :param int index: the index of the Development Environment question\n associated with the gtk.CheckButton() that was\n toggled.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n check.handler_block(self._lst_handler_id[index])\n self._software_model.lst_development[index] = int(check.get_active())\n check.handler_unblock(self._lst_handler_id[index])\n return False\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass RiskAnalysis(gtk.VPaned):\n <docstring token>\n <function token>\n <function token>\n\n def load(self, model):\n \"\"\"\n Method to load the Development Environment Risk Analysis answers.\n\n :param `rtk.software.Software` model: the Software data model to load\n the gtk.ToggleButton() from.\n :return: False if successful or True if an error is encountered.\n :rtype: bool\n \"\"\"\n self._software_model = model\n self.chkDevEnvQ1.set_active(model.lst_development[0])\n self.chkDevEnvQ2.set_active(model.lst_development[1])\n self.chkDevEnvQ3.set_active(model.lst_development[2])\n self.chkDevEnvQ4.set_active(model.lst_development[3])\n self.chkDevEnvQ5.set_active(model.lst_development[4])\n self.chkDevEnvQ6.set_active(model.lst_development[5])\n self.chkDevEnvQ7.set_active(model.lst_development[6])\n self.chkDevEnvQ8.set_active(model.lst_development[7])\n self.chkDevEnvQ9.set_active(model.lst_development[8])\n self.chkDevEnvQ10.set_active(model.lst_development[9])\n self.chkDevEnvQ11.set_active(model.lst_development[10])\n self.chkDevEnvQ12.set_active(model.lst_development[11])\n self.chkDevEnvQ13.set_active(model.lst_development[12])\n self.chkDevEnvQ14.set_active(model.lst_development[13])\n self.chkDevEnvQ15.set_active(model.lst_development[14])\n self.chkDevEnvQ16.set_active(model.lst_development[15])\n self.chkDevEnvQ17.set_active(model.lst_development[16])\n self.chkDevEnvQ18.set_active(model.lst_development[17])\n self.chkDevEnvQ19.set_active(model.lst_development[18])\n self.chkDevEnvQ20.set_active(model.lst_development[19])\n self.chkDevEnvQ21.set_active(model.lst_development[20])\n self.chkDevEnvQ22.set_active(model.lst_development[21])\n self.chkDevEnvQ23.set_active(model.lst_development[22])\n self.chkDevEnvQ24.set_active(model.lst_development[23])\n self.chkDevEnvQ25.set_active(model.lst_development[24])\n self.chkDevEnvQ26.set_active(model.lst_development[25])\n self.chkDevEnvQ27.set_active(model.lst_development[26])\n self.chkDevEnvQ28.set_active(model.lst_development[27])\n self.chkDevEnvQ29.set_active(model.lst_development[28])\n self.chkDevEnvQ30.set_active(model.lst_development[29])\n self.chkDevEnvQ31.set_active(model.lst_development[30])\n self.chkDevEnvQ32.set_active(model.lst_development[31])\n self.chkDevEnvQ33.set_active(model.lst_development[32])\n self.chkDevEnvQ34.set_active(model.lst_development[33])\n self.chkDevEnvQ35.set_active(model.lst_development[34])\n self.chkDevEnvQ36.set_active(model.lst_development[35])\n self.chkDevEnvQ37.set_active(model.lst_development[36])\n self.chkDevEnvQ38.set_active(model.lst_development[37])\n self.chkDevEnvQ39.set_active(model.lst_development[38])\n self.chkDevEnvQ40.set_active(model.lst_development[39])\n self.chkDevEnvQ41.set_active(model.lst_development[40])\n self.chkDevEnvQ42.set_active(model.lst_development[41])\n self.chkDevEnvQ43.set_active(model.lst_development[42])\n return False\n <function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n\n\nclass RiskAnalysis(gtk.VPaned):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<class token>\n"
] | false |
977 |
136215a3ba99f74160373181c458db9bec4bb6b7
|
#PortableKanban 4.3.6578.38136 - Encrypted Password Retrieval
#Python3 -m pip install des
#or
#pip install des
import json
import base64
from des import * #python3 -m pip install des, pip install des
import sys
def decode(hash):
hash = base64.b64decode(hash.encode('utf-8'))
key = DesKey(b"7ly6UznJ")
return key.decrypt(hash,initial=b"XuVUm5fR",padding=True).decode('utf-8')
print(decode('XXXXXXXXXXXXXXXXXXXXXX'))
#change this to your encrypted key
|
[
"#PortableKanban 4.3.6578.38136 - Encrypted Password Retrieval\r\n#Python3 -m pip install des\r\n#or\r\n#pip install des\r\n\r\nimport json\r\nimport base64\r\nfrom des import * #python3 -m pip install des, pip install des\r\nimport sys\r\n\r\ndef decode(hash):\r\n\thash = base64.b64decode(hash.encode('utf-8'))\r\n\tkey = DesKey(b\"7ly6UznJ\")\r\n\treturn key.decrypt(hash,initial=b\"XuVUm5fR\",padding=True).decode('utf-8')\r\n\r\nprint(decode('XXXXXXXXXXXXXXXXXXXXXX'))\r\n\r\n#change this to your encrypted key\r\n\r\n",
"import json\nimport base64\nfrom des import *\nimport sys\n\n\ndef decode(hash):\n hash = base64.b64decode(hash.encode('utf-8'))\n key = DesKey(b'7ly6UznJ')\n return key.decrypt(hash, initial=b'XuVUm5fR', padding=True).decode('utf-8')\n\n\nprint(decode('XXXXXXXXXXXXXXXXXXXXXX'))\n",
"<import token>\n\n\ndef decode(hash):\n hash = base64.b64decode(hash.encode('utf-8'))\n key = DesKey(b'7ly6UznJ')\n return key.decrypt(hash, initial=b'XuVUm5fR', padding=True).decode('utf-8')\n\n\nprint(decode('XXXXXXXXXXXXXXXXXXXXXX'))\n",
"<import token>\n\n\ndef decode(hash):\n hash = base64.b64decode(hash.encode('utf-8'))\n key = DesKey(b'7ly6UznJ')\n return key.decrypt(hash, initial=b'XuVUm5fR', padding=True).decode('utf-8')\n\n\n<code token>\n",
"<import token>\n<function token>\n<code token>\n"
] | false |
978 |
cc46485a3b5c68e4f77a2f9a033fd2ee2859b52b
|
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score
iris_dataset=load_iris()
X=iris_dataset['data']
y=iris_dataset['target']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.1,stratify=y,random_state=42)
model=[]
for c in range(1,11):
tree=DecisionTreeClassifier(max_depth=4,random_state=c)
model.append(tree.fit(X_train,y_train))
in_sample_accuracy=[]
out_of_sample_accuracy=[]
for a in model:
in_sample_accuracy.append(a.score(X_train,y_train))
out_of_sample_accuracy.append(a.score(X_test,y_test))
a=list(range(1,11))
a.append('mean')
a.append('standard')
in_sample_accuracy.append(np.mean(in_sample_accuracy))
in_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))
out_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))
out_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))
b=pd.DataFrame([in_sample_accuracy,out_of_sample_accuracy,],
columns=a,index=['in_sample_accuracy','out_of_sample_accuracy'])
pd.set_option('precision',3)
b
#cross validation
CVS=[]
score=cross_val_score(DecisionTreeClassifier(max_depth=4),X_train,y_train,cv=10)
CVS.append(score)
pd.set_option('precision',3)
c=pd.DataFrame(CVS,columns=['result1','result2','result3','result4','result5','result6','result7','result8','result9','result 10'],)
c['mean']=c.mean(1)
c['standard']=c.std(1)
dt=DecisionTreeClassifier(max_depth=4)
dt.fit(X_train,y_train)
c['Out-of-sample-accuracy']=dt.score(X_test,y_test)
c
print("My name is Fengkai Xu")
print("My NetID is: fengkai4")
print("I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.")
|
[
"\r\nfrom sklearn.tree import DecisionTreeClassifier\r\nfrom sklearn.model_selection import train_test_split\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.datasets import load_iris\r\nfrom sklearn.model_selection import cross_val_score\r\niris_dataset=load_iris()\r\nX=iris_dataset['data']\r\ny=iris_dataset['target']\r\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.1,stratify=y,random_state=42)\r\nmodel=[]\r\nfor c in range(1,11):\r\n tree=DecisionTreeClassifier(max_depth=4,random_state=c)\r\n model.append(tree.fit(X_train,y_train))\r\nin_sample_accuracy=[]\r\nout_of_sample_accuracy=[]\r\nfor a in model:\r\n in_sample_accuracy.append(a.score(X_train,y_train))\r\n out_of_sample_accuracy.append(a.score(X_test,y_test))\r\n\r\na=list(range(1,11))\r\na.append('mean')\r\na.append('standard')\r\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\r\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\r\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\r\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\r\nb=pd.DataFrame([in_sample_accuracy,out_of_sample_accuracy,],\r\n columns=a,index=['in_sample_accuracy','out_of_sample_accuracy'])\r\npd.set_option('precision',3)\r\nb\r\n#cross validation\r\nCVS=[]\r\nscore=cross_val_score(DecisionTreeClassifier(max_depth=4),X_train,y_train,cv=10)\r\nCVS.append(score)\r\npd.set_option('precision',3)\r\nc=pd.DataFrame(CVS,columns=['result1','result2','result3','result4','result5','result6','result7','result8','result9','result 10'],)\r\nc['mean']=c.mean(1)\r\nc['standard']=c.std(1)\r\ndt=DecisionTreeClassifier(max_depth=4)\r\ndt.fit(X_train,y_train)\r\nc['Out-of-sample-accuracy']=dt.score(X_test,y_test)\r\nc\r\nprint(\"My name is Fengkai Xu\")\r\nprint(\"My NetID is: fengkai4\")\r\nprint(\"I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.\")",
"from sklearn.tree import DecisionTreeClassifier\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import cross_val_score\niris_dataset = load_iris()\nX = iris_dataset['data']\ny = iris_dataset['target']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,\n stratify=y, random_state=42)\nmodel = []\nfor c in range(1, 11):\n tree = DecisionTreeClassifier(max_depth=4, random_state=c)\n model.append(tree.fit(X_train, y_train))\nin_sample_accuracy = []\nout_of_sample_accuracy = []\nfor a in model:\n in_sample_accuracy.append(a.score(X_train, y_train))\n out_of_sample_accuracy.append(a.score(X_test, y_test))\na = list(range(1, 11))\na.append('mean')\na.append('standard')\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\nb = pd.DataFrame([in_sample_accuracy, out_of_sample_accuracy], columns=a,\n index=['in_sample_accuracy', 'out_of_sample_accuracy'])\npd.set_option('precision', 3)\nb\nCVS = []\nscore = cross_val_score(DecisionTreeClassifier(max_depth=4), X_train,\n y_train, cv=10)\nCVS.append(score)\npd.set_option('precision', 3)\nc = pd.DataFrame(CVS, columns=['result1', 'result2', 'result3', 'result4',\n 'result5', 'result6', 'result7', 'result8', 'result9', 'result 10'])\nc['mean'] = c.mean(1)\nc['standard'] = c.std(1)\ndt = DecisionTreeClassifier(max_depth=4)\ndt.fit(X_train, y_train)\nc['Out-of-sample-accuracy'] = dt.score(X_test, y_test)\nc\nprint('My name is Fengkai Xu')\nprint('My NetID is: fengkai4')\nprint(\n 'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'\n )\n",
"<import token>\niris_dataset = load_iris()\nX = iris_dataset['data']\ny = iris_dataset['target']\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1,\n stratify=y, random_state=42)\nmodel = []\nfor c in range(1, 11):\n tree = DecisionTreeClassifier(max_depth=4, random_state=c)\n model.append(tree.fit(X_train, y_train))\nin_sample_accuracy = []\nout_of_sample_accuracy = []\nfor a in model:\n in_sample_accuracy.append(a.score(X_train, y_train))\n out_of_sample_accuracy.append(a.score(X_test, y_test))\na = list(range(1, 11))\na.append('mean')\na.append('standard')\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\nb = pd.DataFrame([in_sample_accuracy, out_of_sample_accuracy], columns=a,\n index=['in_sample_accuracy', 'out_of_sample_accuracy'])\npd.set_option('precision', 3)\nb\nCVS = []\nscore = cross_val_score(DecisionTreeClassifier(max_depth=4), X_train,\n y_train, cv=10)\nCVS.append(score)\npd.set_option('precision', 3)\nc = pd.DataFrame(CVS, columns=['result1', 'result2', 'result3', 'result4',\n 'result5', 'result6', 'result7', 'result8', 'result9', 'result 10'])\nc['mean'] = c.mean(1)\nc['standard'] = c.std(1)\ndt = DecisionTreeClassifier(max_depth=4)\ndt.fit(X_train, y_train)\nc['Out-of-sample-accuracy'] = dt.score(X_test, y_test)\nc\nprint('My name is Fengkai Xu')\nprint('My NetID is: fengkai4')\nprint(\n 'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'\n )\n",
"<import token>\n<assignment token>\nfor c in range(1, 11):\n tree = DecisionTreeClassifier(max_depth=4, random_state=c)\n model.append(tree.fit(X_train, y_train))\n<assignment token>\nfor a in model:\n in_sample_accuracy.append(a.score(X_train, y_train))\n out_of_sample_accuracy.append(a.score(X_test, y_test))\n<assignment token>\na.append('mean')\na.append('standard')\nin_sample_accuracy.append(np.mean(in_sample_accuracy))\nin_sample_accuracy.append(np.std(in_sample_accuracy[:-1]))\nout_of_sample_accuracy.append(np.mean(out_of_sample_accuracy))\nout_of_sample_accuracy.append(np.std(out_of_sample_accuracy[:-1]))\n<assignment token>\npd.set_option('precision', 3)\nb\n<assignment token>\nCVS.append(score)\npd.set_option('precision', 3)\n<assignment token>\ndt.fit(X_train, y_train)\n<assignment token>\nc\nprint('My name is Fengkai Xu')\nprint('My NetID is: fengkai4')\nprint(\n 'I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.'\n )\n",
"<import token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n<assignment token>\n<code token>\n"
] | false |
979 |
ce98c13555c474de0a9cb12e99a97b2316312b00
|
yuki = list(map(int, input().split()))
S = input()
enemy = [S.count('G'), S.count('C'), S.count('P')]
ans = 0
for i in range(3):
ans += min(yuki[i], enemy[(i+1)%3]) * 3
yuki[i], enemy[(i+1)%3] = max(0, yuki[i]-enemy[(i+1)%3]), max(0, enemy[(i+1)%3]-yuki[i])
for i in range(3):
ans += min(yuki[i], enemy[i])
print(ans)
|
[
"yuki = list(map(int, input().split()))\nS = input()\nenemy = [S.count('G'), S.count('C'), S.count('P')]\nans = 0\nfor i in range(3):\n ans += min(yuki[i], enemy[(i+1)%3]) * 3\n yuki[i], enemy[(i+1)%3] = max(0, yuki[i]-enemy[(i+1)%3]), max(0, enemy[(i+1)%3]-yuki[i])\n\nfor i in range(3):\n ans += min(yuki[i], enemy[i])\nprint(ans)\n",
"yuki = list(map(int, input().split()))\nS = input()\nenemy = [S.count('G'), S.count('C'), S.count('P')]\nans = 0\nfor i in range(3):\n ans += min(yuki[i], enemy[(i + 1) % 3]) * 3\n yuki[i], enemy[(i + 1) % 3] = max(0, yuki[i] - enemy[(i + 1) % 3]), max(\n 0, enemy[(i + 1) % 3] - yuki[i])\nfor i in range(3):\n ans += min(yuki[i], enemy[i])\nprint(ans)\n",
"<assignment token>\nfor i in range(3):\n ans += min(yuki[i], enemy[(i + 1) % 3]) * 3\n yuki[i], enemy[(i + 1) % 3] = max(0, yuki[i] - enemy[(i + 1) % 3]), max(\n 0, enemy[(i + 1) % 3] - yuki[i])\nfor i in range(3):\n ans += min(yuki[i], enemy[i])\nprint(ans)\n",
"<assignment token>\n<code token>\n"
] | false |
980 |
3f3ed0165120dc135a4ce1f282dbdf9dad57adf8
|
# coding: UTF-8 -*-
import os.path
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
EMOTICONS = {
"O:)": "angel",
"o:)": "angel",
"O:-)": "angel",
"o:-)": "angel",
"o:-3": "angel",
"o:3": "angel",
"O;^)": "angel",
">:[": "annoyed/disappointed",
":-(": "annoyed/disappointed",
":(": "annoyed/disappointed",
":((": "annoyed/disappointed",
":-((": "annoyed/disappointed",
":-c": "annoyed/disappointed",
":-<": "annoyed/disappointed",
":?C": "annoyed/disappointed",
":<": "annoyed/disappointed",
":[": "annoyed/disappointed",
":{": "annoyed/disappointed",
":=||": "annoyed/disappointed",
":@": "annoyed/disappointed",
">:(": "annoyed/disappointed",
":/": "annoyed/disappointed",
":\\": "annoyed/disappointed",
"=/": "annoyed/disappointed",
"=\\": "annoyed/disappointed",
">:/": "annoyed/disappointed",
">:\\": "annoyed/disappointed",
":S": "annoyed/disappointed",
":s": "annoyed/disappointed",
":-S": "annoyed/disappointed",
":-s": "annoyed/disappointed",
":|": "annoyed/disappointed",
":-|": "annoyed/disappointed",
":$": "annoyed/disappointed",
"?_?": "annoyed/disappointed",
"(>_<)": "annoyed/disappointed",
">_<": "annoyed/disappointed",
">__<": "annoyed/disappointed",
"(>__<)": "annoyed/disappointed",
"(-.-)": "annoyed/disappointed",
"(-_-)": "annoyed/disappointed",
"(._.)": "annoyed/disappointed",
"/:)": "annoyed/disappointed",
":-$": "annoyed/disappointed",
">:P": "annoyed/disappointed",
"K": "annoyed/disappointed",
"3:)": "devilish",
"3:-)": "devilish",
"}:-)": "devilish",
"}:)": "devilish",
">:)": "devilish",
"B-)": "happy",
":-)": "happy",
":)": "happy",
":o)": "happy",
":]": "happy",
":3": "happy",
":c)": "happy",
":>": "happy",
"=]": "happy",
"8)": "happy",
"=)": "happy",
":}": "happy",
":^)": "happy",
":?)": "happy",
":-))": "happy",
"<:-P": "happy",
"<:P": "happy",
"<:-p": "happy",
"<:p": "happy",
";;)": "happy",
"J": "happy",
"<3": "heart",
"^5": "high-five",
">_>^": "high-five",
"^<_<": "high-five",
":*": "kiss",
":*)": "kiss",
":^*": "kiss",
"}{": "kiss",
"('}{')": "kiss",
":-D": "laughing",
":D": "laughing",
"8-D": "laughing",
"8D": "laughing",
"x-D": "laughing",
"xD": "laughing",
"X-D": "laughing",
"XD": "laughing",
"=-D": "laughing",
"=D": "laughing",
";D": "laughing",
"-3": "laughing",
"3": "laughing",
"B^D": "laughing",
"D:<": "laughing",
"D:": "laughing",
"D8": "laughing",
"D;": "laughing",
"D=": "laughing",
"DX": "laughing",
":-B": "nerd",
"8-)": "nerd",
"8)": "nerd",
"</3": "sad",
":'(": "sad",
":'-(": "sad",
"QQ": "sad",
"L": "sad",
":#": "sealed mouth",
":-#": "sealed mouth",
":-X": "sealed mouth",
":-x": "sealed mouth",
":X": "sealed mouth",
":x": "sealed mouth",
"??": "shooting star",
"??": "shooting star",
"~?": "shooting star",
">:O": "suprprised/shocked",
">:o": "suprprised/shocked",
":-O": "suprprised/shocked",
":-o": "suprprised/shocked",
":O": "suprprised/shocked",
":o": "suprprised/shocked",
"O_o": "suprprised/shocked",
"o_O": "suprprised/shocked",
"O.o": "suprprised/shocked",
"o.O": "suprprised/shocked",
"(O_o)": "suprprised/shocked",
"(o_O)": "suprprised/shocked",
"(O.o)": "suprprised/shocked",
"(o.O)": "suprprised/shocked",
":'-)": "tears of happines",
":')": "tears of happines",
":P": "teasing/playful",
":p": "teasing/playful",
">:P": "teasing/playful",
">:p": "teasing/playful",
"X-P": "teasing/playful",
"x-p": "teasing/playful",
"xp": "teasing/playful",
"XP": "teasing/playful",
":-P": "teasing/playful",
":-p": "teasing/playful",
"=P": "teasing/playful",
"=P": "teasing/playful",
":-?": "teasing/playful",
":-b": "teasing/playful",
":b": "teasing/playful",
";)": "wink",
u"º)": "wink",
";-)": "wink",
";]": "wink",
u"^Ü^": "happy",
}
special_tokens = EMOTICONS
from DAPOS.data.variation import Prefix, Suffix
EASY_WORDS = {
u"ليا": [(Prefix(u"ل"), u"يا", Suffix(u""))],
u"لي": [(Prefix(u"ل"), u"ي", Suffix(u""))],
u"لكم": [(Prefix(u"ل"), u"كم", Suffix(u""))],
u"لكما": [(Prefix(u"ل"), u"كما", Suffix(u""))],
u"له": [(Prefix(u"ل"), u"ه", Suffix(u""))],
u"لها": [(Prefix(u"ل"), u"ها", Suffix(u""))],
u"لهم": [(Prefix(u"ل"), u"هم", Suffix(u""))],
u"لهما": [(Prefix(u"ل"), u"هما", Suffix(u""))],
u"لهن": [(Prefix(u"ل"), u"هم", Suffix(u""))],
u"بيا": [(Prefix(u"ب"), u"يا", Suffix(u""))],
u"بي": [(Prefix(u"ب"), u"ي", Suffix(u""))],
u"بك": [(Prefix(u"ب"), u"ك", Suffix(u""))],
u"بكم": [(Prefix(u"ب"), u"كم", Suffix(u""))],
u"بكما": [(Prefix(u"ب"), u"كما", Suffix(u""))],
u"به": [(Prefix(u"ب"), u"ه", Suffix(u""))],
u"بها": [(Prefix(u"ب"), u"ها", Suffix(u""))],
u"بهما": [(Prefix(u"ب"), u"هما", Suffix(u""))],
u"بهم": [(Prefix(u"ب"), u"هم", Suffix(u""))],
u"بهن": [(Prefix(u"ب"), u"هن", Suffix(u""))],
u"عليا": [(Prefix(u""), u"على", Suffix(u"يا"))],
u"فيا": [(Prefix(u"ف"), u"يا", Suffix(u""))],
}
EMOTICONS_TAG = 'EMO'
PUNCTUATION_TAG = 'PUNC'
DIGIT_TAG = 'CD'
NOTDEFINED_TAG = 'NN'
|
[
"# coding: UTF-8 -*-\nimport os.path\n\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\n\nEMOTICONS = {\n \"O:)\": \"angel\",\n \"o:)\": \"angel\",\n \"O:-)\": \"angel\",\n \"o:-)\": \"angel\",\n \"o:-3\": \"angel\",\n \"o:3\": \"angel\",\n \"O;^)\": \"angel\",\n \">:[\": \"annoyed/disappointed\",\n \":-(\": \"annoyed/disappointed\",\n \":(\": \"annoyed/disappointed\",\n \":((\": \"annoyed/disappointed\",\n \":-((\": \"annoyed/disappointed\",\n \":-c\": \"annoyed/disappointed\",\n \":-<\": \"annoyed/disappointed\",\n \":?C\": \"annoyed/disappointed\",\n \":<\": \"annoyed/disappointed\",\n \":[\": \"annoyed/disappointed\",\n \":{\": \"annoyed/disappointed\",\n \":=||\": \"annoyed/disappointed\",\n \":@\": \"annoyed/disappointed\",\n \">:(\": \"annoyed/disappointed\",\n \":/\": \"annoyed/disappointed\",\n \":\\\\\": \"annoyed/disappointed\",\n \"=/\": \"annoyed/disappointed\",\n \"=\\\\\": \"annoyed/disappointed\",\n \">:/\": \"annoyed/disappointed\",\n \">:\\\\\": \"annoyed/disappointed\",\n \":S\": \"annoyed/disappointed\",\n \":s\": \"annoyed/disappointed\",\n \":-S\": \"annoyed/disappointed\",\n \":-s\": \"annoyed/disappointed\",\n \":|\": \"annoyed/disappointed\",\n \":-|\": \"annoyed/disappointed\",\n \":$\": \"annoyed/disappointed\",\n \"?_?\": \"annoyed/disappointed\",\n \"(>_<)\": \"annoyed/disappointed\",\n \">_<\": \"annoyed/disappointed\",\n \">__<\": \"annoyed/disappointed\",\n \"(>__<)\": \"annoyed/disappointed\",\n \"(-.-)\": \"annoyed/disappointed\",\n \"(-_-)\": \"annoyed/disappointed\",\n \"(._.)\": \"annoyed/disappointed\",\n \"/:)\": \"annoyed/disappointed\",\n \":-$\": \"annoyed/disappointed\",\n \">:P\": \"annoyed/disappointed\",\n \"K\": \"annoyed/disappointed\",\n \"3:)\": \"devilish\",\n \"3:-)\": \"devilish\",\n \"}:-)\": \"devilish\",\n \"}:)\": \"devilish\",\n \">:)\": \"devilish\",\n \"B-)\": \"happy\",\n \":-)\": \"happy\",\n \":)\": \"happy\",\n \":o)\": \"happy\",\n \":]\": \"happy\",\n \":3\": \"happy\",\n \":c)\": \"happy\",\n \":>\": \"happy\",\n \"=]\": \"happy\",\n \"8)\": \"happy\",\n \"=)\": \"happy\",\n \":}\": \"happy\",\n \":^)\": \"happy\",\n \":?)\": \"happy\",\n \":-))\": \"happy\",\n \"<:-P\": \"happy\",\n \"<:P\": \"happy\",\n \"<:-p\": \"happy\",\n \"<:p\": \"happy\",\n \";;)\": \"happy\",\n \"J\": \"happy\",\n \"<3\": \"heart\",\n \"^5\": \"high-five\",\n \">_>^\": \"high-five\",\n \"^<_<\": \"high-five\",\n \":*\": \"kiss\",\n \":*)\": \"kiss\",\n \":^*\": \"kiss\",\n \"}{\": \"kiss\",\n \"('}{')\": \"kiss\",\n \":-D\": \"laughing\",\n \":D\": \"laughing\",\n \"8-D\": \"laughing\",\n \"8D\": \"laughing\",\n \"x-D\": \"laughing\",\n \"xD\": \"laughing\",\n \"X-D\": \"laughing\",\n \"XD\": \"laughing\",\n \"=-D\": \"laughing\",\n \"=D\": \"laughing\",\n \";D\": \"laughing\",\n \"-3\": \"laughing\",\n \"3\": \"laughing\",\n \"B^D\": \"laughing\",\n \"D:<\": \"laughing\",\n \"D:\": \"laughing\",\n \"D8\": \"laughing\",\n \"D;\": \"laughing\",\n \"D=\": \"laughing\",\n \"DX\": \"laughing\",\n \":-B\": \"nerd\",\n \"8-)\": \"nerd\",\n \"8)\": \"nerd\",\n \"</3\": \"sad\",\n \":'(\": \"sad\",\n \":'-(\": \"sad\",\n \"QQ\": \"sad\",\n \"L\": \"sad\",\n \":#\": \"sealed mouth\",\n \":-#\": \"sealed mouth\",\n \":-X\": \"sealed mouth\",\n \":-x\": \"sealed mouth\",\n \":X\": \"sealed mouth\",\n \":x\": \"sealed mouth\",\n \"??\": \"shooting star\",\n \"??\": \"shooting star\",\n \"~?\": \"shooting star\",\n \">:O\": \"suprprised/shocked\",\n \">:o\": \"suprprised/shocked\",\n \":-O\": \"suprprised/shocked\",\n \":-o\": \"suprprised/shocked\",\n \":O\": \"suprprised/shocked\",\n \":o\": \"suprprised/shocked\",\n \"O_o\": \"suprprised/shocked\",\n \"o_O\": \"suprprised/shocked\",\n \"O.o\": \"suprprised/shocked\",\n \"o.O\": \"suprprised/shocked\",\n \"(O_o)\": \"suprprised/shocked\",\n \"(o_O)\": \"suprprised/shocked\",\n \"(O.o)\": \"suprprised/shocked\",\n \"(o.O)\": \"suprprised/shocked\",\n \":'-)\": \"tears of happines\",\n \":')\": \"tears of happines\",\n \":P\": \"teasing/playful\",\n \":p\": \"teasing/playful\",\n \">:P\": \"teasing/playful\",\n \">:p\": \"teasing/playful\",\n \"X-P\": \"teasing/playful\",\n \"x-p\": \"teasing/playful\",\n \"xp\": \"teasing/playful\",\n \"XP\": \"teasing/playful\",\n \":-P\": \"teasing/playful\",\n \":-p\": \"teasing/playful\",\n \"=P\": \"teasing/playful\",\n \"=P\": \"teasing/playful\",\n \":-?\": \"teasing/playful\",\n \":-b\": \"teasing/playful\",\n \":b\": \"teasing/playful\",\n \";)\": \"wink\",\n u\"º)\": \"wink\",\n \";-)\": \"wink\",\n \";]\": \"wink\",\n u\"^Ü^\": \"happy\",\n}\n\nspecial_tokens = EMOTICONS\n\nfrom DAPOS.data.variation import Prefix, Suffix\n\nEASY_WORDS = {\n u\"ليا\": [(Prefix(u\"ل\"), u\"يا\", Suffix(u\"\"))],\n u\"لي\": [(Prefix(u\"ل\"), u\"ي\", Suffix(u\"\"))],\n u\"لكم\": [(Prefix(u\"ل\"), u\"كم\", Suffix(u\"\"))],\n u\"لكما\": [(Prefix(u\"ل\"), u\"كما\", Suffix(u\"\"))],\n u\"له\": [(Prefix(u\"ل\"), u\"ه\", Suffix(u\"\"))],\n u\"لها\": [(Prefix(u\"ل\"), u\"ها\", Suffix(u\"\"))],\n u\"لهم\": [(Prefix(u\"ل\"), u\"هم\", Suffix(u\"\"))],\n u\"لهما\": [(Prefix(u\"ل\"), u\"هما\", Suffix(u\"\"))],\n u\"لهن\": [(Prefix(u\"ل\"), u\"هم\", Suffix(u\"\"))],\n u\"بيا\": [(Prefix(u\"ب\"), u\"يا\", Suffix(u\"\"))],\n u\"بي\": [(Prefix(u\"ب\"), u\"ي\", Suffix(u\"\"))],\n u\"بك\": [(Prefix(u\"ب\"), u\"ك\", Suffix(u\"\"))],\n u\"بكم\": [(Prefix(u\"ب\"), u\"كم\", Suffix(u\"\"))],\n u\"بكما\": [(Prefix(u\"ب\"), u\"كما\", Suffix(u\"\"))],\n u\"به\": [(Prefix(u\"ب\"), u\"ه\", Suffix(u\"\"))],\n u\"بها\": [(Prefix(u\"ب\"), u\"ها\", Suffix(u\"\"))],\n u\"بهما\": [(Prefix(u\"ب\"), u\"هما\", Suffix(u\"\"))],\n u\"بهم\": [(Prefix(u\"ب\"), u\"هم\", Suffix(u\"\"))],\n u\"بهن\": [(Prefix(u\"ب\"), u\"هن\", Suffix(u\"\"))],\n u\"عليا\": [(Prefix(u\"\"), u\"على\", Suffix(u\"يا\"))],\n u\"فيا\": [(Prefix(u\"ف\"), u\"يا\", Suffix(u\"\"))],\n}\n\n\nEMOTICONS_TAG = 'EMO'\nPUNCTUATION_TAG = 'PUNC'\nDIGIT_TAG = 'CD'\nNOTDEFINED_TAG = 'NN'\n",
"import os.path\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\nEMOTICONS = {'O:)': 'angel', 'o:)': 'angel', 'O:-)': 'angel', 'o:-)':\n 'angel', 'o:-3': 'angel', 'o:3': 'angel', 'O;^)': 'angel', '>:[':\n 'annoyed/disappointed', ':-(': 'annoyed/disappointed', ':(':\n 'annoyed/disappointed', ':((': 'annoyed/disappointed', ':-((':\n 'annoyed/disappointed', ':-c': 'annoyed/disappointed', ':-<':\n 'annoyed/disappointed', ':?C': 'annoyed/disappointed', ':<':\n 'annoyed/disappointed', ':[': 'annoyed/disappointed', ':{':\n 'annoyed/disappointed', ':=||': 'annoyed/disappointed', ':@':\n 'annoyed/disappointed', '>:(': 'annoyed/disappointed', ':/':\n 'annoyed/disappointed', ':\\\\': 'annoyed/disappointed', '=/':\n 'annoyed/disappointed', '=\\\\': 'annoyed/disappointed', '>:/':\n 'annoyed/disappointed', '>:\\\\': 'annoyed/disappointed', ':S':\n 'annoyed/disappointed', ':s': 'annoyed/disappointed', ':-S':\n 'annoyed/disappointed', ':-s': 'annoyed/disappointed', ':|':\n 'annoyed/disappointed', ':-|': 'annoyed/disappointed', ':$':\n 'annoyed/disappointed', '?_?': 'annoyed/disappointed', '(>_<)':\n 'annoyed/disappointed', '>_<': 'annoyed/disappointed', '>__<':\n 'annoyed/disappointed', '(>__<)': 'annoyed/disappointed', '(-.-)':\n 'annoyed/disappointed', '(-_-)': 'annoyed/disappointed', '(._.)':\n 'annoyed/disappointed', '/:)': 'annoyed/disappointed', ':-$':\n 'annoyed/disappointed', '>:P': 'annoyed/disappointed', 'K':\n 'annoyed/disappointed', '3:)': 'devilish', '3:-)': 'devilish', '}:-)':\n 'devilish', '}:)': 'devilish', '>:)': 'devilish', 'B-)': 'happy', ':-)':\n 'happy', ':)': 'happy', ':o)': 'happy', ':]': 'happy', ':3': 'happy',\n ':c)': 'happy', ':>': 'happy', '=]': 'happy', '8)': 'happy', '=)':\n 'happy', ':}': 'happy', ':^)': 'happy', ':?)': 'happy', ':-))': 'happy',\n '<:-P': 'happy', '<:P': 'happy', '<:-p': 'happy', '<:p': 'happy', ';;)':\n 'happy', 'J': 'happy', '<3': 'heart', '^5': 'high-five', '>_>^':\n 'high-five', '^<_<': 'high-five', ':*': 'kiss', ':*)': 'kiss', ':^*':\n 'kiss', '}{': 'kiss', \"('}{')\": 'kiss', ':-D': 'laughing', ':D':\n 'laughing', '8-D': 'laughing', '8D': 'laughing', 'x-D': 'laughing',\n 'xD': 'laughing', 'X-D': 'laughing', 'XD': 'laughing', '=-D':\n 'laughing', '=D': 'laughing', ';D': 'laughing', '-3': 'laughing', '3':\n 'laughing', 'B^D': 'laughing', 'D:<': 'laughing', 'D:': 'laughing',\n 'D8': 'laughing', 'D;': 'laughing', 'D=': 'laughing', 'DX': 'laughing',\n ':-B': 'nerd', '8-)': 'nerd', '8)': 'nerd', '</3': 'sad', \":'(\": 'sad',\n \":'-(\": 'sad', 'QQ': 'sad', 'L': 'sad', ':#': 'sealed mouth', ':-#':\n 'sealed mouth', ':-X': 'sealed mouth', ':-x': 'sealed mouth', ':X':\n 'sealed mouth', ':x': 'sealed mouth', '??': 'shooting star', '??':\n 'shooting star', '~?': 'shooting star', '>:O': 'suprprised/shocked',\n '>:o': 'suprprised/shocked', ':-O': 'suprprised/shocked', ':-o':\n 'suprprised/shocked', ':O': 'suprprised/shocked', ':o':\n 'suprprised/shocked', 'O_o': 'suprprised/shocked', 'o_O':\n 'suprprised/shocked', 'O.o': 'suprprised/shocked', 'o.O':\n 'suprprised/shocked', '(O_o)': 'suprprised/shocked', '(o_O)':\n 'suprprised/shocked', '(O.o)': 'suprprised/shocked', '(o.O)':\n 'suprprised/shocked', \":'-)\": 'tears of happines', \":')\":\n 'tears of happines', ':P': 'teasing/playful', ':p': 'teasing/playful',\n '>:P': 'teasing/playful', '>:p': 'teasing/playful', 'X-P':\n 'teasing/playful', 'x-p': 'teasing/playful', 'xp': 'teasing/playful',\n 'XP': 'teasing/playful', ':-P': 'teasing/playful', ':-p':\n 'teasing/playful', '=P': 'teasing/playful', '=P': 'teasing/playful',\n ':-?': 'teasing/playful', ':-b': 'teasing/playful', ':b':\n 'teasing/playful', ';)': 'wink', u'º)': 'wink', ';-)': 'wink', ';]':\n 'wink', u'^Ü^': 'happy'}\nspecial_tokens = EMOTICONS\nfrom DAPOS.data.variation import Prefix, Suffix\nEASY_WORDS = {u'ليا': [(Prefix(u'ل'), u'يا', Suffix(u''))], u'لي': [(Prefix\n (u'ل'), u'ي', Suffix(u''))], u'لكم': [(Prefix(u'ل'), u'كم', Suffix(u'')\n )], u'لكما': [(Prefix(u'ل'), u'كما', Suffix(u''))], u'له': [(Prefix(\n u'ل'), u'ه', Suffix(u''))], u'لها': [(Prefix(u'ل'), u'ها', Suffix(u''))\n ], u'لهم': [(Prefix(u'ل'), u'هم', Suffix(u''))], u'لهما': [(Prefix(u'ل'\n ), u'هما', Suffix(u''))], u'لهن': [(Prefix(u'ل'), u'هم', Suffix(u''))],\n u'بيا': [(Prefix(u'ب'), u'يا', Suffix(u''))], u'بي': [(Prefix(u'ب'),\n u'ي', Suffix(u''))], u'بك': [(Prefix(u'ب'), u'ك', Suffix(u''))], u'بكم':\n [(Prefix(u'ب'), u'كم', Suffix(u''))], u'بكما': [(Prefix(u'ب'), u'كما',\n Suffix(u''))], u'به': [(Prefix(u'ب'), u'ه', Suffix(u''))], u'بها': [(\n Prefix(u'ب'), u'ها', Suffix(u''))], u'بهما': [(Prefix(u'ب'), u'هما',\n Suffix(u''))], u'بهم': [(Prefix(u'ب'), u'هم', Suffix(u''))], u'بهن': [(\n Prefix(u'ب'), u'هن', Suffix(u''))], u'عليا': [(Prefix(u''), u'على',\n Suffix(u'يا'))], u'فيا': [(Prefix(u'ف'), u'يا', Suffix(u''))]}\nEMOTICONS_TAG = 'EMO'\nPUNCTUATION_TAG = 'PUNC'\nDIGIT_TAG = 'CD'\nNOTDEFINED_TAG = 'NN'\n",
"<import token>\nPROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))\nEMOTICONS = {'O:)': 'angel', 'o:)': 'angel', 'O:-)': 'angel', 'o:-)':\n 'angel', 'o:-3': 'angel', 'o:3': 'angel', 'O;^)': 'angel', '>:[':\n 'annoyed/disappointed', ':-(': 'annoyed/disappointed', ':(':\n 'annoyed/disappointed', ':((': 'annoyed/disappointed', ':-((':\n 'annoyed/disappointed', ':-c': 'annoyed/disappointed', ':-<':\n 'annoyed/disappointed', ':?C': 'annoyed/disappointed', ':<':\n 'annoyed/disappointed', ':[': 'annoyed/disappointed', ':{':\n 'annoyed/disappointed', ':=||': 'annoyed/disappointed', ':@':\n 'annoyed/disappointed', '>:(': 'annoyed/disappointed', ':/':\n 'annoyed/disappointed', ':\\\\': 'annoyed/disappointed', '=/':\n 'annoyed/disappointed', '=\\\\': 'annoyed/disappointed', '>:/':\n 'annoyed/disappointed', '>:\\\\': 'annoyed/disappointed', ':S':\n 'annoyed/disappointed', ':s': 'annoyed/disappointed', ':-S':\n 'annoyed/disappointed', ':-s': 'annoyed/disappointed', ':|':\n 'annoyed/disappointed', ':-|': 'annoyed/disappointed', ':$':\n 'annoyed/disappointed', '?_?': 'annoyed/disappointed', '(>_<)':\n 'annoyed/disappointed', '>_<': 'annoyed/disappointed', '>__<':\n 'annoyed/disappointed', '(>__<)': 'annoyed/disappointed', '(-.-)':\n 'annoyed/disappointed', '(-_-)': 'annoyed/disappointed', '(._.)':\n 'annoyed/disappointed', '/:)': 'annoyed/disappointed', ':-$':\n 'annoyed/disappointed', '>:P': 'annoyed/disappointed', 'K':\n 'annoyed/disappointed', '3:)': 'devilish', '3:-)': 'devilish', '}:-)':\n 'devilish', '}:)': 'devilish', '>:)': 'devilish', 'B-)': 'happy', ':-)':\n 'happy', ':)': 'happy', ':o)': 'happy', ':]': 'happy', ':3': 'happy',\n ':c)': 'happy', ':>': 'happy', '=]': 'happy', '8)': 'happy', '=)':\n 'happy', ':}': 'happy', ':^)': 'happy', ':?)': 'happy', ':-))': 'happy',\n '<:-P': 'happy', '<:P': 'happy', '<:-p': 'happy', '<:p': 'happy', ';;)':\n 'happy', 'J': 'happy', '<3': 'heart', '^5': 'high-five', '>_>^':\n 'high-five', '^<_<': 'high-five', ':*': 'kiss', ':*)': 'kiss', ':^*':\n 'kiss', '}{': 'kiss', \"('}{')\": 'kiss', ':-D': 'laughing', ':D':\n 'laughing', '8-D': 'laughing', '8D': 'laughing', 'x-D': 'laughing',\n 'xD': 'laughing', 'X-D': 'laughing', 'XD': 'laughing', '=-D':\n 'laughing', '=D': 'laughing', ';D': 'laughing', '-3': 'laughing', '3':\n 'laughing', 'B^D': 'laughing', 'D:<': 'laughing', 'D:': 'laughing',\n 'D8': 'laughing', 'D;': 'laughing', 'D=': 'laughing', 'DX': 'laughing',\n ':-B': 'nerd', '8-)': 'nerd', '8)': 'nerd', '</3': 'sad', \":'(\": 'sad',\n \":'-(\": 'sad', 'QQ': 'sad', 'L': 'sad', ':#': 'sealed mouth', ':-#':\n 'sealed mouth', ':-X': 'sealed mouth', ':-x': 'sealed mouth', ':X':\n 'sealed mouth', ':x': 'sealed mouth', '??': 'shooting star', '??':\n 'shooting star', '~?': 'shooting star', '>:O': 'suprprised/shocked',\n '>:o': 'suprprised/shocked', ':-O': 'suprprised/shocked', ':-o':\n 'suprprised/shocked', ':O': 'suprprised/shocked', ':o':\n 'suprprised/shocked', 'O_o': 'suprprised/shocked', 'o_O':\n 'suprprised/shocked', 'O.o': 'suprprised/shocked', 'o.O':\n 'suprprised/shocked', '(O_o)': 'suprprised/shocked', '(o_O)':\n 'suprprised/shocked', '(O.o)': 'suprprised/shocked', '(o.O)':\n 'suprprised/shocked', \":'-)\": 'tears of happines', \":')\":\n 'tears of happines', ':P': 'teasing/playful', ':p': 'teasing/playful',\n '>:P': 'teasing/playful', '>:p': 'teasing/playful', 'X-P':\n 'teasing/playful', 'x-p': 'teasing/playful', 'xp': 'teasing/playful',\n 'XP': 'teasing/playful', ':-P': 'teasing/playful', ':-p':\n 'teasing/playful', '=P': 'teasing/playful', '=P': 'teasing/playful',\n ':-?': 'teasing/playful', ':-b': 'teasing/playful', ':b':\n 'teasing/playful', ';)': 'wink', u'º)': 'wink', ';-)': 'wink', ';]':\n 'wink', u'^Ü^': 'happy'}\nspecial_tokens = EMOTICONS\n<import token>\nEASY_WORDS = {u'ليا': [(Prefix(u'ل'), u'يا', Suffix(u''))], u'لي': [(Prefix\n (u'ل'), u'ي', Suffix(u''))], u'لكم': [(Prefix(u'ل'), u'كم', Suffix(u'')\n )], u'لكما': [(Prefix(u'ل'), u'كما', Suffix(u''))], u'له': [(Prefix(\n u'ل'), u'ه', Suffix(u''))], u'لها': [(Prefix(u'ل'), u'ها', Suffix(u''))\n ], u'لهم': [(Prefix(u'ل'), u'هم', Suffix(u''))], u'لهما': [(Prefix(u'ل'\n ), u'هما', Suffix(u''))], u'لهن': [(Prefix(u'ل'), u'هم', Suffix(u''))],\n u'بيا': [(Prefix(u'ب'), u'يا', Suffix(u''))], u'بي': [(Prefix(u'ب'),\n u'ي', Suffix(u''))], u'بك': [(Prefix(u'ب'), u'ك', Suffix(u''))], u'بكم':\n [(Prefix(u'ب'), u'كم', Suffix(u''))], u'بكما': [(Prefix(u'ب'), u'كما',\n Suffix(u''))], u'به': [(Prefix(u'ب'), u'ه', Suffix(u''))], u'بها': [(\n Prefix(u'ب'), u'ها', Suffix(u''))], u'بهما': [(Prefix(u'ب'), u'هما',\n Suffix(u''))], u'بهم': [(Prefix(u'ب'), u'هم', Suffix(u''))], u'بهن': [(\n Prefix(u'ب'), u'هن', Suffix(u''))], u'عليا': [(Prefix(u''), u'على',\n Suffix(u'يا'))], u'فيا': [(Prefix(u'ف'), u'يا', Suffix(u''))]}\nEMOTICONS_TAG = 'EMO'\nPUNCTUATION_TAG = 'PUNC'\nDIGIT_TAG = 'CD'\nNOTDEFINED_TAG = 'NN'\n",
"<import token>\n<assignment token>\n<import token>\n<assignment token>\n"
] | false |
981 |
3cc894570189fe545f5db3150d0b69c16dc211dc
|
class player:
def __init__(self, name: str, symbol: str):
self._name = name
self._symbol = symbol
def decide_next_move(self):
"""
Checks all possible combinations to decide best next move
:return: board position
"""
pass
def get_next_move(self):
"""
Asks user for next move
:return: board position
"""
return int(input('Enter your move: '))
|
[
"\nclass player:\n\n def __init__(self, name: str, symbol: str):\n self._name = name\n self._symbol = symbol\n\n def decide_next_move(self):\n \"\"\"\n Checks all possible combinations to decide best next move\n :return: board position\n \"\"\"\n pass\n\n def get_next_move(self):\n \"\"\"\n Asks user for next move\n :return: board position\n \"\"\"\n return int(input('Enter your move: '))",
"class player:\n\n def __init__(self, name: str, symbol: str):\n self._name = name\n self._symbol = symbol\n\n def decide_next_move(self):\n \"\"\"\n Checks all possible combinations to decide best next move\n :return: board position\n \"\"\"\n pass\n\n def get_next_move(self):\n \"\"\"\n Asks user for next move\n :return: board position\n \"\"\"\n return int(input('Enter your move: '))\n",
"class player:\n\n def __init__(self, name: str, symbol: str):\n self._name = name\n self._symbol = symbol\n <function token>\n\n def get_next_move(self):\n \"\"\"\n Asks user for next move\n :return: board position\n \"\"\"\n return int(input('Enter your move: '))\n",
"class player:\n <function token>\n <function token>\n\n def get_next_move(self):\n \"\"\"\n Asks user for next move\n :return: board position\n \"\"\"\n return int(input('Enter your move: '))\n",
"class player:\n <function token>\n <function token>\n <function token>\n",
"<class token>\n"
] | false |
982 |
3f8b8b8cfbe712f09734d0fb7302073187d65a73
|
'''
def Sort(a):
i=1
while i<len(a):
j=i
while j>0 and a[j-1] > a[j]:
temp = a[j-1]
a[j-1] = a[j]
a[j] = temp
j-=1
i+=1
return a
'''
def Sort(a):
i=1
n=len(a)
while i<len(a):
j=i
print(i-1,'\t',i)
while a[j-1]>a[j] and j>=0:
j-=1
print('Key : ',a[i],' inserting at: ',j, '\t in ',a)
if n>2:
j1=n-2
temp = arr[n-1]
while arr[j1] > temp and j1>=0:
arr[j1+1] = arr[j1]
j1-=1
print(' '.join(list(map(str, arr))))
arr[j1+1] = temp
print(' '.join(list(map(str, arr))))
elif n==1:
return arr
else: # len(arr) =2
temp = arr[1]
arr[1]=arr[0]
print(' '.join(list(map(str, arr))))
arr[0] = temp
print(' '.join(list(map(str, arr))))
i+=1
return a
|
[
"'''\ndef Sort(a):\n i=1\n while i<len(a):\n j=i\n while j>0 and a[j-1] > a[j]:\n temp = a[j-1]\n a[j-1] = a[j]\n a[j] = temp\n j-=1\n i+=1\n return a\n'''\ndef Sort(a):\n i=1\n n=len(a)\n while i<len(a):\n j=i\n print(i-1,'\\t',i)\n while a[j-1]>a[j] and j>=0:\n j-=1\n print('Key : ',a[i],' inserting at: ',j, '\\t in ',a)\n if n>2:\n j1=n-2\n temp = arr[n-1]\n while arr[j1] > temp and j1>=0:\n arr[j1+1] = arr[j1]\n j1-=1\n print(' '.join(list(map(str, arr))))\n arr[j1+1] = temp\n print(' '.join(list(map(str, arr))))\n elif n==1: \n return arr\n else: # len(arr) =2\n temp = arr[1]\n arr[1]=arr[0]\n print(' '.join(list(map(str, arr))))\n arr[0] = temp \n print(' '.join(list(map(str, arr))))\n i+=1\n return a\n",
"<docstring token>\n\n\ndef Sort(a):\n i = 1\n n = len(a)\n while i < len(a):\n j = i\n print(i - 1, '\\t', i)\n while a[j - 1] > a[j] and j >= 0:\n j -= 1\n print('Key : ', a[i], ' inserting at: ', j, '\\t in ', a)\n if n > 2:\n j1 = n - 2\n temp = arr[n - 1]\n while arr[j1] > temp and j1 >= 0:\n arr[j1 + 1] = arr[j1]\n j1 -= 1\n print(' '.join(list(map(str, arr))))\n arr[j1 + 1] = temp\n print(' '.join(list(map(str, arr))))\n elif n == 1:\n return arr\n else:\n temp = arr[1]\n arr[1] = arr[0]\n print(' '.join(list(map(str, arr))))\n arr[0] = temp\n print(' '.join(list(map(str, arr))))\n i += 1\n return a\n",
"<docstring token>\n<function token>\n"
] | false |
983 |
e95de58828c63dc8ae24efff314665a308f6ce0c
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-12-13 02:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('stores', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Assistants',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name_assistants', models.CharField(max_length=255)),
('phone_assistants', models.IntegerField()),
('email_assistants', models.EmailField(max_length=254)),
('address_assistants', models.TextField()),
('timestamp', models.DateField(auto_now=True)),
('fkstore', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assistants', to='stores.Store')),
],
),
]
|
[
"# -*- coding: utf-8 -*-\n# Generated by Django 1.11.7 on 2017-12-13 02:06\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n\n initial = True\n\n dependencies = [\n ('stores', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Assistants',\n fields=[\n ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),\n ('name_assistants', models.CharField(max_length=255)),\n ('phone_assistants', models.IntegerField()),\n ('email_assistants', models.EmailField(max_length=254)),\n ('address_assistants', models.TextField()),\n ('timestamp', models.DateField(auto_now=True)),\n ('fkstore', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='assistants', to='stores.Store')),\n ],\n ),\n ]\n",
"from __future__ import unicode_literals\nfrom django.db import migrations, models\nimport django.db.models.deletion\nimport uuid\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('stores', '0001_initial')]\n operations = [migrations.CreateModel(name='Assistants', fields=[('id',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False)), ('name_assistants', models.CharField(\n max_length=255)), ('phone_assistants', models.IntegerField()), (\n 'email_assistants', models.EmailField(max_length=254)), (\n 'address_assistants', models.TextField()), ('timestamp', models.\n DateField(auto_now=True)), ('fkstore', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='assistants', to=\n 'stores.Store'))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n initial = True\n dependencies = [('stores', '0001_initial')]\n operations = [migrations.CreateModel(name='Assistants', fields=[('id',\n models.UUIDField(default=uuid.uuid4, editable=False, primary_key=\n True, serialize=False)), ('name_assistants', models.CharField(\n max_length=255)), ('phone_assistants', models.IntegerField()), (\n 'email_assistants', models.EmailField(max_length=254)), (\n 'address_assistants', models.TextField()), ('timestamp', models.\n DateField(auto_now=True)), ('fkstore', models.ForeignKey(on_delete=\n django.db.models.deletion.CASCADE, related_name='assistants', to=\n 'stores.Store'))])]\n",
"<import token>\n\n\nclass Migration(migrations.Migration):\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
984 |
a406efcab62b2af67484da776f01fc4e6d20b697
|
#!/usr/bin/env python3
def twoNumberSum(array, targetSum):
# Write your code here.
# O(n^2) time | O(1) space
''' Double for loop, quadratic run time
No variables increase as the input size increases,
therefore constant space complexity.
'''
for i in range(len(array) - 1):
firstNum = array[i]
for j in range(i + 1, len(array)):
secondNum = array[j]
if firstNum + secondNum == targetSum:
return [firstNum, secondNum]
return []
# Testing
if __name__ == '__main__':
import json
import debug_v1
with open('test.json', 'r') as t:
load_test = json.load(t)
test_case = load_test['test']
correct_output = load_test['answer']
for count, case in enumerate(test_case):
print(f'Test Case {count+1}:', end=' ')
value = twoNumberSum(case['array'], case['targetSum'])
debug_v1.debug(case, value, correct_output[count])
|
[
"#!/usr/bin/env python3\n\ndef twoNumberSum(array, targetSum):\n # Write your code here.\n\n # O(n^2) time | O(1) space\n ''' Double for loop, quadratic run time\n No variables increase as the input size increases,\n therefore constant space complexity.\n '''\n for i in range(len(array) - 1):\n\tfirstNum = array[i]\t\t\n\tfor j in range(i + 1, len(array)):\n secondNum = array[j]\n\t if firstNum + secondNum == targetSum:\n\t\treturn [firstNum, secondNum]\n return []\t\n\t\t\n\n\n\n\n\n\n\n\n# Testing\nif __name__ == '__main__':\n import json\n import debug_v1\n\n with open('test.json', 'r') as t:\n load_test = json.load(t)\n test_case = load_test['test']\n correct_output = load_test['answer']\n\n for count, case in enumerate(test_case):\n print(f'Test Case {count+1}:', end=' ')\n value = twoNumberSum(case['array'], case['targetSum'])\n debug_v1.debug(case, value, correct_output[count])\n"
] | true |
985 |
d265781c6b618752a1afcf65ac137052c26388a6
|
import xarray as xr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
import seaborn as sns
%load_ext autoreload
%autoreload 2
%matplotlib
data_dir = Path('/Volumes/Lees_Extend/data/ecmwf_sowc/data/')
# READ in model (maybe want to do more predictions on historical data)
from src.models import load_model, Persistence
ealstm_path = data_dir / 'models/one_month_forecast/ealstm/model.pt'
assert ealstm_path.exists(), \
'Expected the unzipped file to have the model.pt file saved'
persistence = Persistence(data_folder=data_dir)
ealstm = load_model(model_path=ealstm_path)
# TODO: need to predict from X variables in other files
ealstm.evaluate_train_timesteps(year=np.arange(1990, 2010), month=3)
|
[
"import xarray as xr\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pickle\nimport seaborn as sns\n\n%load_ext autoreload\n%autoreload 2\n%matplotlib\n\ndata_dir = Path('/Volumes/Lees_Extend/data/ecmwf_sowc/data/')\n\n# READ in model (maybe want to do more predictions on historical data)\nfrom src.models import load_model, Persistence\n\nealstm_path = data_dir / 'models/one_month_forecast/ealstm/model.pt'\nassert ealstm_path.exists(), \\\n 'Expected the unzipped file to have the model.pt file saved'\n\npersistence = Persistence(data_folder=data_dir)\nealstm = load_model(model_path=ealstm_path)\n\n# TODO: need to predict from X variables in other files\nealstm.evaluate_train_timesteps(year=np.arange(1990, 2010), month=3)\n"
] | true |
986 |
15edb1c051ccbc6f927c0a859288511f94a3d853
|
from types import MappingProxyType
from typing import Any, Dict, Mapping, Type, TypeVar, Union
import yaml
from typing_extensions import Protocol
from mashumaro.serializer.base import DataClassDictMixin
DEFAULT_DICT_PARAMS = {
"use_bytes": False,
"use_enum": False,
"use_datetime": False,
}
EncodedData = Union[str, bytes]
T = TypeVar("T", bound="DataClassYAMLMixin")
class Encoder(Protocol): # pragma no cover
def __call__(self, o, **kwargs) -> EncodedData:
...
class Decoder(Protocol): # pragma no cover
def __call__(self, packed: EncodedData, **kwargs) -> Dict[Any, Any]:
...
class DataClassYAMLMixin(DataClassDictMixin):
def to_yaml(
self: T,
encoder: Encoder = yaml.dump, # type: ignore
dict_params: Mapping = MappingProxyType({}),
**encoder_kwargs,
) -> EncodedData:
return encoder(
self.to_dict(**dict(DEFAULT_DICT_PARAMS, **dict_params)),
**encoder_kwargs,
)
@classmethod
def from_yaml(
cls: Type[T],
data: EncodedData,
decoder: Decoder = yaml.safe_load, # type: ignore
dict_params: Mapping = MappingProxyType({}),
**decoder_kwargs,
) -> T:
return cls.from_dict(
decoder(data, **decoder_kwargs),
**dict(DEFAULT_DICT_PARAMS, **dict_params),
)
|
[
"from types import MappingProxyType\nfrom typing import Any, Dict, Mapping, Type, TypeVar, Union\n\nimport yaml\nfrom typing_extensions import Protocol\n\nfrom mashumaro.serializer.base import DataClassDictMixin\n\nDEFAULT_DICT_PARAMS = {\n \"use_bytes\": False,\n \"use_enum\": False,\n \"use_datetime\": False,\n}\nEncodedData = Union[str, bytes]\nT = TypeVar(\"T\", bound=\"DataClassYAMLMixin\")\n\n\nclass Encoder(Protocol): # pragma no cover\n def __call__(self, o, **kwargs) -> EncodedData:\n ...\n\n\nclass Decoder(Protocol): # pragma no cover\n def __call__(self, packed: EncodedData, **kwargs) -> Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n def to_yaml(\n self: T,\n encoder: Encoder = yaml.dump, # type: ignore\n dict_params: Mapping = MappingProxyType({}),\n **encoder_kwargs,\n ) -> EncodedData:\n\n return encoder(\n self.to_dict(**dict(DEFAULT_DICT_PARAMS, **dict_params)),\n **encoder_kwargs,\n )\n\n @classmethod\n def from_yaml(\n cls: Type[T],\n data: EncodedData,\n decoder: Decoder = yaml.safe_load, # type: ignore\n dict_params: Mapping = MappingProxyType({}),\n **decoder_kwargs,\n ) -> T:\n return cls.from_dict(\n decoder(data, **decoder_kwargs),\n **dict(DEFAULT_DICT_PARAMS, **dict_params),\n )\n",
"from types import MappingProxyType\nfrom typing import Any, Dict, Mapping, Type, TypeVar, Union\nimport yaml\nfrom typing_extensions import Protocol\nfrom mashumaro.serializer.base import DataClassDictMixin\nDEFAULT_DICT_PARAMS = {'use_bytes': False, 'use_enum': False,\n 'use_datetime': False}\nEncodedData = Union[str, bytes]\nT = TypeVar('T', bound='DataClassYAMLMixin')\n\n\nclass Encoder(Protocol):\n\n def __call__(self, o, **kwargs) ->EncodedData:\n ...\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"<import token>\nDEFAULT_DICT_PARAMS = {'use_bytes': False, 'use_enum': False,\n 'use_datetime': False}\nEncodedData = Union[str, bytes]\nT = TypeVar('T', bound='DataClassYAMLMixin')\n\n\nclass Encoder(Protocol):\n\n def __call__(self, o, **kwargs) ->EncodedData:\n ...\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"<import token>\n<assignment token>\n\n\nclass Encoder(Protocol):\n\n def __call__(self, o, **kwargs) ->EncodedData:\n ...\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"<import token>\n<assignment token>\n\n\nclass Encoder(Protocol):\n <function token>\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass Decoder(Protocol):\n\n def __call__(self, packed: EncodedData, **kwargs) ->Dict[Any, Any]:\n ...\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"<import token>\n<assignment token>\n<class token>\n\n\nclass Decoder(Protocol):\n <function token>\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n\n @classmethod\n def from_yaml(cls: Type[T], data: EncodedData, decoder: Decoder=yaml.\n safe_load, dict_params: Mapping=MappingProxyType({}), **decoder_kwargs\n ) ->T:\n return cls.from_dict(decoder(data, **decoder_kwargs), **dict(\n DEFAULT_DICT_PARAMS, **dict_params))\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n\n def to_yaml(self: T, encoder: Encoder=yaml.dump, dict_params: Mapping=\n MappingProxyType({}), **encoder_kwargs) ->EncodedData:\n return encoder(self.to_dict(**dict(DEFAULT_DICT_PARAMS, **\n dict_params)), **encoder_kwargs)\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass DataClassYAMLMixin(DataClassDictMixin):\n <function token>\n <function token>\n",
"<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n"
] | false |
987 |
274185896ab5c11256d69699df69fc2c0dde4f2d
|
''' extract package names from the Meteor guide and write them to packages-guide
Uses the content folder of https://github.com/meteor/guide '''
from collections import defaultdict
import os
import sys
import markdown
from bs4 import BeautifulSoup
def get_links_from_markdown(path, name):
try:
with open(path, 'r') as file:
md = file.read()
html = markdown.markdown(md)
soup = BeautifulSoup(html, 'html.parser')
return soup.find_all('a')
except PermissionError:
print('Could not open "%s"' % path)
except UnicodeDecodeError:
print('Could not proccess "%s"' % path)
return []
def get_guide_packages(src_dir='content'):
if len(sys.argv) > 1:
src_dir = sys.argv[1]
subjects = defaultdict(list)
for entry in os.scandir(src_dir):
name = entry.name[:-3]
for link in get_links_from_markdown(entry.path, name):
if len(link.text.split(':')) == 2: # packages only
subjects[name].append(link.text)
return subjects
def write_packages(packages, path='packages-guide'):
with open(path, 'w') as out:
out.write('\n# packages from http://guide.meteor.com\n')
for subject, links in packages.items():
out.write('\n# %s\n' % subject)
for link in links:
out.write('%s\n' % link)
if __name__ == '__main__':
GUIDE = get_guide_packages()
write_packages(GUIDE)
|
[
"''' extract package names from the Meteor guide and write them to packages-guide\n Uses the content folder of https://github.com/meteor/guide '''\n\nfrom collections import defaultdict\nimport os\nimport sys\n\nimport markdown\nfrom bs4 import BeautifulSoup\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2: # packages only\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\nif __name__ == '__main__':\n GUIDE = get_guide_packages()\n write_packages(GUIDE)\n",
"<docstring token>\nfrom collections import defaultdict\nimport os\nimport sys\nimport markdown\nfrom bs4 import BeautifulSoup\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2:\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\nif __name__ == '__main__':\n GUIDE = get_guide_packages()\n write_packages(GUIDE)\n",
"<docstring token>\n<import token>\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2:\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\nif __name__ == '__main__':\n GUIDE = get_guide_packages()\n write_packages(GUIDE)\n",
"<docstring token>\n<import token>\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\ndef get_guide_packages(src_dir='content'):\n if len(sys.argv) > 1:\n src_dir = sys.argv[1]\n subjects = defaultdict(list)\n for entry in os.scandir(src_dir):\n name = entry.name[:-3]\n for link in get_links_from_markdown(entry.path, name):\n if len(link.text.split(':')) == 2:\n subjects[name].append(link.text)\n return subjects\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n\n\ndef get_links_from_markdown(path, name):\n try:\n with open(path, 'r') as file:\n md = file.read()\n html = markdown.markdown(md)\n soup = BeautifulSoup(html, 'html.parser')\n return soup.find_all('a')\n except PermissionError:\n print('Could not open \"%s\"' % path)\n except UnicodeDecodeError:\n print('Could not proccess \"%s\"' % path)\n return []\n\n\n<function token>\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n\n\ndef write_packages(packages, path='packages-guide'):\n with open(path, 'w') as out:\n out.write('\\n# packages from http://guide.meteor.com\\n')\n for subject, links in packages.items():\n out.write('\\n# %s\\n' % subject)\n for link in links:\n out.write('%s\\n' % link)\n\n\n<code token>\n",
"<docstring token>\n<import token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
988 |
21e86e4719cda5c40f780aca6e56eb13c8c9b8e5
|
# encoding: utf-8
'''🤠 PDS Roundup: A step takes you further towards a complete roundup'''
from enum import Enum
from .util import commit, invoke
import logging, github3, tempfile, zipfile, os
_logger = logging.getLogger(__name__)
class Step(object):
'''An abstract step; executing steps comprises a roundup'''
def __init__(self, assembly):
'''Initialize a step with the given ``assembly``'''
self.assembly = assembly
def __repr__(self):
return f'<{self.__class__.__name__}()>'
def execute(self):
raise NotImplementedError('Subclasses must implement ``execute``')
def getRepository(self):
'''Utility: get the name of the GitHub repository'''
return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/')[1]
def getToken(self):
'''Utility: get the administrative GitHub token'''
return self.assembly.context.environ.get('ADMIN_GITHUB_TOKEN')
def getOwner(self):
'''Utility: return the owning user/organization of the repository in use'''
return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/')[0]
class StepName(Enum):
'''Enumerated identifiers for each of the possible steps of a roundup'''
null = 'null'
unitTest = 'unitTest'
integrationTest = 'integrationTest'
changeLog = 'changeLog'
requirements = 'requirements'
docs = 'docs'
build = 'build'
githubRelease = 'githubRelease'
artifactPublication = 'artifactPublication'
docPublication = 'docPublication'
# Common Steps
# ============
#
# The folowing are concrete Step classes that are shared between contexts;
# i.e., they're independent of Python, Maven, etc.
class NullStep(Step):
'''This is a "null" or "no-op" step that does nothing.'''
def execute(self):
pass
# But for development, this sure is handy:
# import pdb;pdb.set_trace()
# import subprocess
# subprocess.run('/bin/sh')
class ChangeLogStep(Step):
'''This step generates a PDS-style changelog'''
_sections = '{"improvements":{"prefix":"**Improvements:**","labels":["Epic"]},"defects":{"prefix":"**Defects:**","labels":["bug"]},"deprecations":{"prefix":"**Deprecations:**","labels":["deprecation"]}}'
def execute(self):
token = self.getToken()
if not token:
_logger.info('🤷♀️ No GitHub administrative token; cannot generate changelog')
return
invoke([
'github_changelog_generator',
'--user',
self.getOwner(),
'--project',
self.getRepository(),
'--output',
'CHANGELOG.md',
'--token',
token,
'--configure-sections',
self._sections,
'--no-pull-requests',
'--issues-label',
'**Other closed issues:**',
'--issue-line-labels',
'high,low,medium'
])
commit('CHANGELOG.md', 'Update changelog')
class RequirementsStep(Step):
'''This step generates a PDS-style requirements file'''
def execute(self):
token = self.getToken()
if not token:
_logger.info('🤷♀️ No GitHub administrative token; cannot generate requirements')
return
argv = [
'requirement-report',
'--format',
'md',
'--organization',
self.getOwner(),
'--repository',
self.getRepository(),
'--output',
'docs/requirements/',
'--token',
token
]
if not self.assembly.isStable():
argv.append('--dev')
generatedFile = invoke(argv).strip()
if not generatedFile:
_logger.warn('🤨 Did not get a requirements file from the requirement-report; will skip it')
return
commit(generatedFile, 'Update requirements')
class DocPublicationStep(Step):
def getDocDir(self):
raise NotImplementedError('Subclasses must implement ``getDocDir``')
def execute(self):
token = self.getToken()
if not token:
_logger.info('🤷♀️ No GitHub administrative token; cannot send doc artifacts to GitHub')
return
github = github3.login(token=token)
repo = github.repository(self.getOwner(), self.getRepository())
# 😮 TODO: There's a race here. This code is looking for the *latest* release, which
# we assume was made by the earlier ``StepName.githubRelease`` step. It's possible someone
# could create another release in between these steps! It'd be better if we fetched the
# release being worked on directly.
tmpFileName = None
try:
release = repo.releases().next() # ← here
# Make a ZIP archive of the docs
fd, tmpFileName = tempfile.mkstemp('.zip')
with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:
for folder, subdirs, filenames in os.walk(self.getDocDir()):
for fn in filenames:
path = os.path.join(folder, fn)
# Avoid things like Unix-domain sockets if they just happen to appear:
if os.path.isfile(path):
zf.write(path, path[len(self.getDocDir()) + 1:])
# Remove any existing ``documentation.zip``
for asset in release.assets():
if asset.name == 'documentation.zip':
asset.delete()
break
# Add the new ZIP file as a downloadable asset
with open(tmpFileName, 'rb') as tmpFile:
release.upload_asset('application/zip', 'documentation.zip', tmpFile, 'Documentation (zip)')
except StopIteration:
_logger.info('🧐 No releases found at all, so I cannot publish documentation assets to them')
return
finally:
if tmpFileName is not None: os.remove(tmpFileName)
|
[
"# encoding: utf-8\n\n'''🤠 PDS Roundup: A step takes you further towards a complete roundup'''\n\nfrom enum import Enum\nfrom .util import commit, invoke\nimport logging, github3, tempfile, zipfile, os\n\n_logger = logging.getLogger(__name__)\n\n\nclass Step(object):\n '''An abstract step; executing steps comprises a roundup'''\n def __init__(self, assembly):\n '''Initialize a step with the given ``assembly``'''\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n\n def execute(self):\n raise NotImplementedError('Subclasses must implement ``execute``')\n\n def getRepository(self):\n '''Utility: get the name of the GitHub repository'''\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/')[1]\n\n def getToken(self):\n '''Utility: get the administrative GitHub token'''\n return self.assembly.context.environ.get('ADMIN_GITHUB_TOKEN')\n\n def getOwner(self):\n '''Utility: return the owning user/organization of the repository in use'''\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/')[0]\n\n\nclass StepName(Enum):\n '''Enumerated identifiers for each of the possible steps of a roundup'''\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\n# Common Steps\n# ============\n#\n# The folowing are concrete Step classes that are shared between contexts;\n# i.e., they're independent of Python, Maven, etc.\n\n\nclass NullStep(Step):\n '''This is a \"null\" or \"no-op\" step that does nothing.'''\n def execute(self):\n pass\n # But for development, this sure is handy:\n # import pdb;pdb.set_trace()\n # import subprocess\n # subprocess.run('/bin/sh')\n\n\nclass ChangeLogStep(Step):\n '''This step generates a PDS-style changelog'''\n _sections = '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info('🤷♀️ No GitHub administrative token; cannot generate changelog')\n return\n invoke([\n 'github_changelog_generator',\n '--user',\n self.getOwner(),\n '--project',\n self.getRepository(),\n '--output',\n 'CHANGELOG.md',\n '--token',\n token,\n '--configure-sections',\n self._sections,\n '--no-pull-requests',\n '--issues-label',\n '**Other closed issues:**',\n '--issue-line-labels',\n 'high,low,medium'\n ])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n '''This step generates a PDS-style requirements file'''\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info('🤷♀️ No GitHub administrative token; cannot generate requirements')\n return\n argv = [\n 'requirement-report',\n '--format',\n 'md',\n '--organization',\n self.getOwner(),\n '--repository',\n self.getRepository(),\n '--output',\n 'docs/requirements/',\n '--token',\n token\n ]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn('🤨 Did not get a requirements file from the requirement-report; will skip it')\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info('🤷♀️ No GitHub administrative token; cannot send doc artifacts to GitHub')\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n\n # 😮 TODO: There's a race here. This code is looking for the *latest* release, which\n # we assume was made by the earlier ``StepName.githubRelease`` step. It's possible someone\n # could create another release in between these steps! It'd be better if we fetched the\n # release being worked on directly.\n tmpFileName = None\n try:\n release = repo.releases().next() # ← here\n\n # Make a ZIP archive of the docs\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n # Avoid things like Unix-domain sockets if they just happen to appear:\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n\n # Remove any existing ``documentation.zip``\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n\n # Add the new ZIP file as a downloadable asset\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip', tmpFile, 'Documentation (zip)')\n\n except StopIteration:\n _logger.info('🧐 No releases found at all, so I cannot publish documentation assets to them')\n return\n finally:\n if tmpFileName is not None: os.remove(tmpFileName)\n",
"<docstring token>\nfrom enum import Enum\nfrom .util import commit, invoke\nimport logging, github3, tempfile, zipfile, os\n_logger = logging.getLogger(__name__)\n\n\nclass Step(object):\n \"\"\"An abstract step; executing steps comprises a roundup\"\"\"\n\n def __init__(self, assembly):\n \"\"\"Initialize a step with the given ``assembly``\"\"\"\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n\n def execute(self):\n raise NotImplementedError('Subclasses must implement ``execute``')\n\n def getRepository(self):\n \"\"\"Utility: get the name of the GitHub repository\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[1]\n\n def getToken(self):\n \"\"\"Utility: get the administrative GitHub token\"\"\"\n return self.assembly.context.environ.get('ADMIN_GITHUB_TOKEN')\n\n def getOwner(self):\n \"\"\"Utility: return the owning user/organization of the repository in use\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[0]\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n_logger = logging.getLogger(__name__)\n\n\nclass Step(object):\n \"\"\"An abstract step; executing steps comprises a roundup\"\"\"\n\n def __init__(self, assembly):\n \"\"\"Initialize a step with the given ``assembly``\"\"\"\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n\n def execute(self):\n raise NotImplementedError('Subclasses must implement ``execute``')\n\n def getRepository(self):\n \"\"\"Utility: get the name of the GitHub repository\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[1]\n\n def getToken(self):\n \"\"\"Utility: get the administrative GitHub token\"\"\"\n return self.assembly.context.environ.get('ADMIN_GITHUB_TOKEN')\n\n def getOwner(self):\n \"\"\"Utility: return the owning user/organization of the repository in use\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[0]\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Step(object):\n \"\"\"An abstract step; executing steps comprises a roundup\"\"\"\n\n def __init__(self, assembly):\n \"\"\"Initialize a step with the given ``assembly``\"\"\"\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n\n def execute(self):\n raise NotImplementedError('Subclasses must implement ``execute``')\n\n def getRepository(self):\n \"\"\"Utility: get the name of the GitHub repository\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[1]\n\n def getToken(self):\n \"\"\"Utility: get the administrative GitHub token\"\"\"\n return self.assembly.context.environ.get('ADMIN_GITHUB_TOKEN')\n\n def getOwner(self):\n \"\"\"Utility: return the owning user/organization of the repository in use\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[0]\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Step(object):\n <docstring token>\n\n def __init__(self, assembly):\n \"\"\"Initialize a step with the given ``assembly``\"\"\"\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n\n def execute(self):\n raise NotImplementedError('Subclasses must implement ``execute``')\n\n def getRepository(self):\n \"\"\"Utility: get the name of the GitHub repository\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[1]\n\n def getToken(self):\n \"\"\"Utility: get the administrative GitHub token\"\"\"\n return self.assembly.context.environ.get('ADMIN_GITHUB_TOKEN')\n\n def getOwner(self):\n \"\"\"Utility: return the owning user/organization of the repository in use\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[0]\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Step(object):\n <docstring token>\n\n def __init__(self, assembly):\n \"\"\"Initialize a step with the given ``assembly``\"\"\"\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n\n def execute(self):\n raise NotImplementedError('Subclasses must implement ``execute``')\n\n def getRepository(self):\n \"\"\"Utility: get the name of the GitHub repository\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[1]\n\n def getToken(self):\n \"\"\"Utility: get the administrative GitHub token\"\"\"\n return self.assembly.context.environ.get('ADMIN_GITHUB_TOKEN')\n <function token>\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Step(object):\n <docstring token>\n\n def __init__(self, assembly):\n \"\"\"Initialize a step with the given ``assembly``\"\"\"\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n\n def execute(self):\n raise NotImplementedError('Subclasses must implement ``execute``')\n\n def getRepository(self):\n \"\"\"Utility: get the name of the GitHub repository\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[1]\n <function token>\n <function token>\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Step(object):\n <docstring token>\n\n def __init__(self, assembly):\n \"\"\"Initialize a step with the given ``assembly``\"\"\"\n self.assembly = assembly\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n <function token>\n\n def getRepository(self):\n \"\"\"Utility: get the name of the GitHub repository\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[1]\n <function token>\n <function token>\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Step(object):\n <docstring token>\n <function token>\n\n def __repr__(self):\n return f'<{self.__class__.__name__}()>'\n <function token>\n\n def getRepository(self):\n \"\"\"Utility: get the name of the GitHub repository\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[1]\n <function token>\n <function token>\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Step(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def getRepository(self):\n \"\"\"Utility: get the name of the GitHub repository\"\"\"\n return self.assembly.context.environ.get('GITHUB_REPOSITORY').split('/'\n )[1]\n <function token>\n <function token>\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n\n\nclass Step(object):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass StepName(Enum):\n \"\"\"Enumerated identifiers for each of the possible steps of a roundup\"\"\"\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass StepName(Enum):\n <docstring token>\n null = 'null'\n unitTest = 'unitTest'\n integrationTest = 'integrationTest'\n changeLog = 'changeLog'\n requirements = 'requirements'\n docs = 'docs'\n build = 'build'\n githubRelease = 'githubRelease'\n artifactPublication = 'artifactPublication'\n docPublication = 'docPublication'\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n\n\nclass StepName(Enum):\n <docstring token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass NullStep(Step):\n \"\"\"This is a \"null\" or \"no-op\" step that does nothing.\"\"\"\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass NullStep(Step):\n <docstring token>\n\n def execute(self):\n pass\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n\n\nclass NullStep(Step):\n <docstring token>\n <function token>\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass ChangeLogStep(Step):\n \"\"\"This step generates a PDS-style changelog\"\"\"\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass ChangeLogStep(Step):\n <docstring token>\n _sections = (\n '{\"improvements\":{\"prefix\":\"**Improvements:**\",\"labels\":[\"Epic\"]},\"defects\":{\"prefix\":\"**Defects:**\",\"labels\":[\"bug\"]},\"deprecations\":{\"prefix\":\"**Deprecations:**\",\"labels\":[\"deprecation\"]}}'\n )\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass ChangeLogStep(Step):\n <docstring token>\n <assignment token>\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate changelog'\n )\n return\n invoke(['github_changelog_generator', '--user', self.getOwner(),\n '--project', self.getRepository(), '--output', 'CHANGELOG.md',\n '--token', token, '--configure-sections', self._sections,\n '--no-pull-requests', '--issues-label',\n '**Other closed issues:**', '--issue-line-labels',\n 'high,low,medium'])\n commit('CHANGELOG.md', 'Update changelog')\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n\n\nclass ChangeLogStep(Step):\n <docstring token>\n <assignment token>\n <function token>\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequirementsStep(Step):\n \"\"\"This step generates a PDS-style requirements file\"\"\"\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequirementsStep(Step):\n <docstring token>\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot generate requirements'\n )\n return\n argv = ['requirement-report', '--format', 'md', '--organization',\n self.getOwner(), '--repository', self.getRepository(),\n '--output', 'docs/requirements/', '--token', token]\n if not self.assembly.isStable():\n argv.append('--dev')\n generatedFile = invoke(argv).strip()\n if not generatedFile:\n _logger.warn(\n '🤨 Did not get a requirements file from the requirement-report; will skip it'\n )\n return\n commit(generatedFile, 'Update requirements')\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass RequirementsStep(Step):\n <docstring token>\n <function token>\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n\n def execute(self):\n token = self.getToken()\n if not token:\n _logger.info(\n '🤷\\u200d♀️ No GitHub administrative token; cannot send doc artifacts to GitHub'\n )\n return\n github = github3.login(token=token)\n repo = github.repository(self.getOwner(), self.getRepository())\n tmpFileName = None\n try:\n release = repo.releases().next()\n fd, tmpFileName = tempfile.mkstemp('.zip')\n with zipfile.ZipFile(os.fdopen(fd, 'wb'), 'w') as zf:\n for folder, subdirs, filenames in os.walk(self.getDocDir()):\n for fn in filenames:\n path = os.path.join(folder, fn)\n if os.path.isfile(path):\n zf.write(path, path[len(self.getDocDir()) + 1:])\n for asset in release.assets():\n if asset.name == 'documentation.zip':\n asset.delete()\n break\n with open(tmpFileName, 'rb') as tmpFile:\n release.upload_asset('application/zip', 'documentation.zip',\n tmpFile, 'Documentation (zip)')\n except StopIteration:\n _logger.info(\n '🧐 No releases found at all, so I cannot publish documentation assets to them'\n )\n return\n finally:\n if tmpFileName is not None:\n os.remove(tmpFileName)\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass DocPublicationStep(Step):\n\n def getDocDir(self):\n raise NotImplementedError('Subclasses must implement ``getDocDir``')\n <function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n\n\nclass DocPublicationStep(Step):\n <function token>\n <function token>\n",
"<docstring token>\n<import token>\n<assignment token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n<class token>\n"
] | false |
989 |
333914f99face050376e4713ca118f2347e50018
|
"""
URL Configuration to test mounting created urls from registries
"""
from django.contrib import admin
from django.urls import include, path
from staticpages.loader import StaticpagesLoader
staticpages_loader = StaticpagesLoader()
urlpatterns = [
path("admin/", admin.site.urls),
# Add base pages urls using the same template
*staticpages_loader.build_urls([
"index",
{
"template_path": "index.html",
"name": "foo",
"extra": "free for use",
},
])
]
# Include another urls map on a sub path
urlpatterns.append(
path("sub/", include("sandbox.staticpages_testapp.sub_urls")),
)
|
[
"\"\"\"\nURL Configuration to test mounting created urls from registries\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import include, path\n\nfrom staticpages.loader import StaticpagesLoader\n\n\nstaticpages_loader = StaticpagesLoader()\n\n\nurlpatterns = [\n path(\"admin/\", admin.site.urls),\n # Add base pages urls using the same template\n *staticpages_loader.build_urls([\n \"index\",\n {\n \"template_path\": \"index.html\",\n \"name\": \"foo\",\n \"extra\": \"free for use\",\n },\n ])\n]\n\n# Include another urls map on a sub path\nurlpatterns.append(\n path(\"sub/\", include(\"sandbox.staticpages_testapp.sub_urls\")),\n)\n",
"<docstring token>\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom staticpages.loader import StaticpagesLoader\nstaticpages_loader = StaticpagesLoader()\nurlpatterns = [path('admin/', admin.site.urls), *staticpages_loader.\n build_urls(['index', {'template_path': 'index.html', 'name': 'foo',\n 'extra': 'free for use'}])]\nurlpatterns.append(path('sub/', include(\n 'sandbox.staticpages_testapp.sub_urls')))\n",
"<docstring token>\n<import token>\nstaticpages_loader = StaticpagesLoader()\nurlpatterns = [path('admin/', admin.site.urls), *staticpages_loader.\n build_urls(['index', {'template_path': 'index.html', 'name': 'foo',\n 'extra': 'free for use'}])]\nurlpatterns.append(path('sub/', include(\n 'sandbox.staticpages_testapp.sub_urls')))\n",
"<docstring token>\n<import token>\n<assignment token>\nurlpatterns.append(path('sub/', include(\n 'sandbox.staticpages_testapp.sub_urls')))\n",
"<docstring token>\n<import token>\n<assignment token>\n<code token>\n"
] | false |
990 |
5ef7c838d8e9a05a09bd974790a85ff36d56a336
|
import mock
def exc():
print 'here should raise'
def recursion():
try:
print 'here'
return exc()
except StandardError:
print 'exc'
return recursion()
def test_recursion():
global exc
exc = mock.Mock(side_effect = [StandardError, StandardError, mock.DEFAULT])
recursion()
test_recursion()
|
[
"import mock\n\ndef exc():\n print 'here should raise'\n\ndef recursion():\n try:\n print 'here'\n return exc()\n except StandardError:\n print 'exc'\n return recursion()\n\n\ndef test_recursion():\n global exc\n exc = mock.Mock(side_effect = [StandardError, StandardError, mock.DEFAULT])\n recursion()\n\ntest_recursion()"
] | true |
991 |
88b3dd7414a68de65bafb317fbd4da2b1bc933fc
|
import json
def corec_set(parameter, value):
params_fn = "corec_parameters.json"
with open(params_fn) as f:
params = json.load(f)
params[parameter] = value
with open(params_fn, 'w') as f:
json.dump(params, f, indent=4)
def corec_get(parameter):
params_fn = "corec_parameters.json"
with open(params_fn) as f:
params = json.load(f)
if parameter in params:
return params[parameter]
return None
def corec_lock(lock):
locks_fn = "corec_locks.json"
with open(locks_fn) as f:
locks = json.load(f)
locks[lock] = True
with open(locks_fn, 'w') as f:
json.dump(locks, f, indent=4)
def corec_unlock(lock):
locks_fn = "corec_locks.json"
with open(locks_fn) as f:
locks = json.load(f)
locks[lock] = False
with open(locks_fn, 'w') as f:
json.dump(locks, f, indent=4)
|
[
"\nimport json\n\ndef corec_set(parameter, value):\n\n\tparams_fn = \"corec_parameters.json\"\n\n\twith open(params_fn) as f:\n\t\tparams = json.load(f)\n\n\tparams[parameter] = value\n\n\twith open(params_fn, 'w') as f:\n\t\tjson.dump(params, f, indent=4)\n\ndef corec_get(parameter):\n\n\tparams_fn = \"corec_parameters.json\"\n\n\twith open(params_fn) as f:\n\t\tparams = json.load(f)\n\n\tif parameter in params:\n\t\treturn params[parameter]\n\n\treturn None\n\ndef corec_lock(lock):\n\tlocks_fn = \"corec_locks.json\"\n\n\twith open(locks_fn) as f:\n\t\tlocks = json.load(f)\n\n\tlocks[lock] = True\n\n\twith open(locks_fn, 'w') as f:\n\t\tjson.dump(locks, f, indent=4)\n\ndef corec_unlock(lock):\n locks_fn = \"corec_locks.json\"\n\n with open(locks_fn) as f:\n locks = json.load(f)\n\n locks[lock] = False\n\n with open(locks_fn, 'w') as f:\n json.dump(locks, f, indent=4)\n\n\n\n",
"import json\n\n\ndef corec_set(parameter, value):\n params_fn = 'corec_parameters.json'\n with open(params_fn) as f:\n params = json.load(f)\n params[parameter] = value\n with open(params_fn, 'w') as f:\n json.dump(params, f, indent=4)\n\n\ndef corec_get(parameter):\n params_fn = 'corec_parameters.json'\n with open(params_fn) as f:\n params = json.load(f)\n if parameter in params:\n return params[parameter]\n return None\n\n\ndef corec_lock(lock):\n locks_fn = 'corec_locks.json'\n with open(locks_fn) as f:\n locks = json.load(f)\n locks[lock] = True\n with open(locks_fn, 'w') as f:\n json.dump(locks, f, indent=4)\n\n\ndef corec_unlock(lock):\n locks_fn = 'corec_locks.json'\n with open(locks_fn) as f:\n locks = json.load(f)\n locks[lock] = False\n with open(locks_fn, 'w') as f:\n json.dump(locks, f, indent=4)\n",
"<import token>\n\n\ndef corec_set(parameter, value):\n params_fn = 'corec_parameters.json'\n with open(params_fn) as f:\n params = json.load(f)\n params[parameter] = value\n with open(params_fn, 'w') as f:\n json.dump(params, f, indent=4)\n\n\ndef corec_get(parameter):\n params_fn = 'corec_parameters.json'\n with open(params_fn) as f:\n params = json.load(f)\n if parameter in params:\n return params[parameter]\n return None\n\n\ndef corec_lock(lock):\n locks_fn = 'corec_locks.json'\n with open(locks_fn) as f:\n locks = json.load(f)\n locks[lock] = True\n with open(locks_fn, 'w') as f:\n json.dump(locks, f, indent=4)\n\n\ndef corec_unlock(lock):\n locks_fn = 'corec_locks.json'\n with open(locks_fn) as f:\n locks = json.load(f)\n locks[lock] = False\n with open(locks_fn, 'w') as f:\n json.dump(locks, f, indent=4)\n",
"<import token>\n\n\ndef corec_set(parameter, value):\n params_fn = 'corec_parameters.json'\n with open(params_fn) as f:\n params = json.load(f)\n params[parameter] = value\n with open(params_fn, 'w') as f:\n json.dump(params, f, indent=4)\n\n\ndef corec_get(parameter):\n params_fn = 'corec_parameters.json'\n with open(params_fn) as f:\n params = json.load(f)\n if parameter in params:\n return params[parameter]\n return None\n\n\ndef corec_lock(lock):\n locks_fn = 'corec_locks.json'\n with open(locks_fn) as f:\n locks = json.load(f)\n locks[lock] = True\n with open(locks_fn, 'w') as f:\n json.dump(locks, f, indent=4)\n\n\n<function token>\n",
"<import token>\n\n\ndef corec_set(parameter, value):\n params_fn = 'corec_parameters.json'\n with open(params_fn) as f:\n params = json.load(f)\n params[parameter] = value\n with open(params_fn, 'w') as f:\n json.dump(params, f, indent=4)\n\n\ndef corec_get(parameter):\n params_fn = 'corec_parameters.json'\n with open(params_fn) as f:\n params = json.load(f)\n if parameter in params:\n return params[parameter]\n return None\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n\n\ndef corec_get(parameter):\n params_fn = 'corec_parameters.json'\n with open(params_fn) as f:\n params = json.load(f)\n if parameter in params:\n return params[parameter]\n return None\n\n\n<function token>\n<function token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n"
] | false |
992 |
095374aa7613f163fedbd7d253219478108d4f42
|
# Celery配置文件
# 指定消息队列为Redis
broker_url = "redis://120.78.168.67/10"
CELERY_RESULT_BACKEND = "redis://120.78.168.67/0"
CELERY_TIMEZONE = 'Asia/Shanghai'
|
[
"# Celery配置文件\n\n# 指定消息队列为Redis\nbroker_url = \"redis://120.78.168.67/10\"\nCELERY_RESULT_BACKEND = \"redis://120.78.168.67/0\"\nCELERY_TIMEZONE = 'Asia/Shanghai'\n",
"broker_url = 'redis://120.78.168.67/10'\nCELERY_RESULT_BACKEND = 'redis://120.78.168.67/0'\nCELERY_TIMEZONE = 'Asia/Shanghai'\n",
"<assignment token>\n"
] | false |
993 |
2c1de638ac25a9f27b1af94fa075b7c1b9df6884
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'PhoneUser.last_contacted'
db.add_column(u'smslink_phoneuser', 'last_contacted',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'PhoneUser.last_contacted'
db.delete_column(u'smslink_phoneuser', 'last_contacted')
models = {
u'foodproviders.entryrequirement': {
'Meta': {'object_name': 'EntryRequirement'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'requirement': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'})
},
u'foodproviders.postcode': {
'Meta': {'unique_together': "(('outward', 'inward'),)", 'object_name': 'PostCode'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inward': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'outward': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'})
},
u'smslink.phoneuser': {
'Meta': {'object_name': 'PhoneUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_contacted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'post_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['foodproviders.PostCode']", 'null': 'True', 'blank': 'True'}),
'requirements_satisfied': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['foodproviders.EntryRequirement']", 'symmetrical': 'False'})
}
}
complete_apps = ['smslink']
|
[
"# -*- coding: utf-8 -*-\nimport datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n # Adding field 'PhoneUser.last_contacted'\n db.add_column(u'smslink_phoneuser', 'last_contacted',\n self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n\n def backwards(self, orm):\n # Deleting field 'PhoneUser.last_contacted'\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n\n\n models = {\n u'foodproviders.entryrequirement': {\n 'Meta': {'object_name': 'EntryRequirement'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'requirement': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'})\n },\n u'foodproviders.postcode': {\n 'Meta': {'unique_together': \"(('outward', 'inward'),)\", 'object_name': 'PostCode'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'inward': ('django.db.models.fields.CharField', [], {'max_length': '5'}),\n 'location': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),\n 'outward': ('django.db.models.fields.CharField', [], {'max_length': '5', 'db_index': 'True'})\n },\n u'smslink.phoneuser': {\n 'Meta': {'object_name': 'PhoneUser'},\n u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'last_contacted': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),\n 'number': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),\n 'post_code': ('django.db.models.fields.related.ForeignKey', [], {'to': u\"orm['foodproviders.PostCode']\", 'null': 'True', 'blank': 'True'}),\n 'requirements_satisfied': ('django.db.models.fields.related.ManyToManyField', [], {'to': u\"orm['foodproviders.EntryRequirement']\", 'symmetrical': 'False'})\n }\n }\n\n complete_apps = ['smslink']",
"import datetime\nfrom south.db import db\nfrom south.v2 import SchemaMigration\nfrom django.db import models\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(\n 'django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n models = {u'foodproviders.entryrequirement': {'Meta': {'object_name':\n 'EntryRequirement'}, u'id': ('django.db.models.fields.AutoField', [\n ], {'primary_key': 'True'}), 'requirement': (\n 'django.db.models.fields.CharField', [], {'unique': 'True',\n 'max_length': '2'})}, u'foodproviders.postcode': {'Meta': {\n 'unique_together': \"(('outward', 'inward'),)\", 'object_name':\n 'PostCode'}, u'id': ('django.db.models.fields.AutoField', [], {\n 'primary_key': 'True'}), 'inward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5'}),\n 'location': ('django.contrib.gis.db.models.fields.PointField', [],\n {'null': 'True', 'blank': 'True'}), 'outward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5',\n 'db_index': 'True'})}, u'smslink.phoneuser': {'Meta': {\n 'object_name': 'PhoneUser'}, u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'last_contacted': ('django.db.models.fields.DateTimeField', [], {\n 'null': 'True', 'blank': 'True'}), 'number': (\n 'django.db.models.fields.CharField', [], {'max_length': '20',\n 'db_index': 'True'}), 'post_code': (\n 'django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['foodproviders.PostCode']\", 'null': 'True', 'blank': 'True'}),\n 'requirements_satisfied': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['foodproviders.EntryRequirement']\", 'symmetrical': 'False'})}}\n complete_apps = ['smslink']\n",
"<import token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(\n 'django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n models = {u'foodproviders.entryrequirement': {'Meta': {'object_name':\n 'EntryRequirement'}, u'id': ('django.db.models.fields.AutoField', [\n ], {'primary_key': 'True'}), 'requirement': (\n 'django.db.models.fields.CharField', [], {'unique': 'True',\n 'max_length': '2'})}, u'foodproviders.postcode': {'Meta': {\n 'unique_together': \"(('outward', 'inward'),)\", 'object_name':\n 'PostCode'}, u'id': ('django.db.models.fields.AutoField', [], {\n 'primary_key': 'True'}), 'inward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5'}),\n 'location': ('django.contrib.gis.db.models.fields.PointField', [],\n {'null': 'True', 'blank': 'True'}), 'outward': (\n 'django.db.models.fields.CharField', [], {'max_length': '5',\n 'db_index': 'True'})}, u'smslink.phoneuser': {'Meta': {\n 'object_name': 'PhoneUser'}, u'id': (\n 'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),\n 'last_contacted': ('django.db.models.fields.DateTimeField', [], {\n 'null': 'True', 'blank': 'True'}), 'number': (\n 'django.db.models.fields.CharField', [], {'max_length': '20',\n 'db_index': 'True'}), 'post_code': (\n 'django.db.models.fields.related.ForeignKey', [], {'to':\n u\"orm['foodproviders.PostCode']\", 'null': 'True', 'blank': 'True'}),\n 'requirements_satisfied': (\n 'django.db.models.fields.related.ManyToManyField', [], {'to':\n u\"orm['foodproviders.EntryRequirement']\", 'symmetrical': 'False'})}}\n complete_apps = ['smslink']\n",
"<import token>\n\n\nclass Migration(SchemaMigration):\n\n def forwards(self, orm):\n db.add_column(u'smslink_phoneuser', 'last_contacted', self.gf(\n 'django.db.models.fields.DateTimeField')(null=True, blank=True),\n keep_default=False)\n\n def backwards(self, orm):\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n <assignment token>\n <assignment token>\n",
"<import token>\n\n\nclass Migration(SchemaMigration):\n <function token>\n\n def backwards(self, orm):\n db.delete_column(u'smslink_phoneuser', 'last_contacted')\n <assignment token>\n <assignment token>\n",
"<import token>\n\n\nclass Migration(SchemaMigration):\n <function token>\n <function token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
994 |
18bad56ff6d230e63e83174672b8aa8625c1ebb4
|
RANGES = {
# Intervalles de la gamme majeure
0: [1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1],
# Intervalles de la gamme mineure naturelle
1: [1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0],
# Intervalles de la gamme mineure harmonique
2: [1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1]
}
RANGES_NAMES = {
'fr': ['Majeur', 'Mineur naturel', 'Mineur harmonique']
}
# Nombre total de notes
N = 12
# Nombre de nombre par gamme
N_T = 7
NOTES = {
'fr': ['DO', 'DO#', 'RE', 'RE#', 'MI', 'FA', 'FA#', 'SOL', 'SOL#', 'LA', 'LA#', 'SI']
}
CHORDS = {
'fr': {
0: ['', 'm', 'm', '', '', 'm', 'dim'],
1: ['m', 'dim', '', 'm', 'm', '', ''],
2: ['', 'm', 'm', '', '', 'm', 'dim']
}
}
def get_notes_from_range(r, t):
""" Return all notes from a given range"""
# calcul du tableau de notes
tab = []
for i in range(N):
n = (i - t)%N
tab.append(RANGES[r][n])
return tab
def get_range_chords(r):
return []
def export_range(res, lg):
notes = [NOTES[lg][(n + res['keynote'] )% 12] for n in range(N) if res['notes'][(n + res['keynote'] )% 12]]
return {
'keynote': NOTES[lg][res['keynote']],
'range': RANGES_NAMES[lg][res['range']],
'notes': notes,
'pourcentage': res['pourcentage']
# 'Accords': [notes[i] + CHORDS[lg][res['range']][i] for i in range(N_T)]
}
def print_range(r):
print r['Tonique'] + ' ' + r['Gamme']
print r['Accords']
print
## traitement
def range_ranking(given_notes):
result = []
# pour chaque tonique:
for t in range(N):
# pour chaque mode:
#for m in range(0, 12):
# pour chaque gamme:
for r in range(len(RANGES)):
# re-initialisation du pourcentage
pourcentage = 0.0
# obtention de toutes les notes de la gamme consideree
range_notes = get_notes_from_range(r, t)
# pour chaque note connue:
for i in given_notes:
# si la note connue est dans la gamme:
if range_notes[i] == 1:
#alors pourcentage += 1
pourcentage += 1
else:
pourcentage -= 1
pourcentage = (pourcentage/len(given_notes)) * 100
result.append({'keynote': t,
# 'mode': m,
'range': r,
'notes': range_notes,
'pourcentage': pourcentage})
return result
def main(notes, lg):
# Compute pourcentage for every registered ranges
unsorted_ranking = range_ranking(notes)
sorted_ranking = sorted(unsorted_ranking, key=lambda g: g['pourcentage'], reverse=True)
best_results = [r for r in sorted_ranking if r['pourcentage'] == sorted_ranking[0]['pourcentage']]
return best_results
def get_ranges(given_notes, lg='fr'):
errors = {}
results = []
# Clean user entry
print 'g' + str(given_notes)
notes = [NOTES['fr'].index(n) for n in given_notes]
print 'n' + str(notes)
try:
best_results = main(notes, lg)
except Exception as e:
errors['status'] = 'error'
errors['message'] = e
return errors
errors['status'] = 'success'
errors['message'] = ''
errors['result'] = [export_range(r, lg) for r in best_results]
return errors
if __name__ == '__main__':
#TODO: Test that arrays have consistents length
# Get entry from user
notes = [0, 2, 4, 5, 7, 9, 11]
lg = 'fr'
print [NOTES[lg][i] for i in notes]
print
print "Ces notes correspondent a la gamme:"
#TODO: Clean user entry
best_results = main(notes, lg)
for r in best_results:
print export_range(r, lg)
|
[
"\nRANGES = {\n # Intervalles de la gamme majeure\n 0: [1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1], \n # Intervalles de la gamme mineure naturelle\n 1: [1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0],\n # Intervalles de la gamme mineure harmonique \n 2: [1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1] \n}\n\nRANGES_NAMES = {\n 'fr': ['Majeur', 'Mineur naturel', 'Mineur harmonique']\n}\n\n# Nombre total de notes\nN = 12\n\n# Nombre de nombre par gamme\nN_T = 7\n\nNOTES = {\n 'fr': ['DO', 'DO#', 'RE', 'RE#', 'MI', 'FA', 'FA#', 'SOL', 'SOL#', 'LA', 'LA#', 'SI']\n}\n\nCHORDS = {\n 'fr': {\n 0: ['', 'm', 'm', '', '', 'm', 'dim'],\n 1: ['m', 'dim', '', 'm', 'm', '', ''], \n 2: ['', 'm', 'm', '', '', 'm', 'dim']\n }\n}\n\ndef get_notes_from_range(r, t):\n \"\"\" Return all notes from a given range\"\"\"\n # calcul du tableau de notes\n tab = []\n for i in range(N): \n n = (i - t)%N\n tab.append(RANGES[r][n])\n \n return tab \n \ndef get_range_chords(r):\n return []\n \n\ndef export_range(res, lg):\n notes = [NOTES[lg][(n + res['keynote'] )% 12] for n in range(N) if res['notes'][(n + res['keynote'] )% 12]]\n return {\n 'keynote': NOTES[lg][res['keynote']], \n 'range': RANGES_NAMES[lg][res['range']], \n 'notes': notes, \n 'pourcentage': res['pourcentage']\n # 'Accords': [notes[i] + CHORDS[lg][res['range']][i] for i in range(N_T)]\n }\n \n \ndef print_range(r):\n print r['Tonique'] + ' ' + r['Gamme']\n print r['Accords']\n print \n \n\n## traitement\ndef range_ranking(given_notes):\n result = []\n\n # pour chaque tonique:\n for t in range(N):\n # pour chaque mode:\n #for m in range(0, 12):\n # pour chaque gamme:\n for r in range(len(RANGES)):\n # re-initialisation du pourcentage\n pourcentage = 0.0\n # obtention de toutes les notes de la gamme consideree\n range_notes = get_notes_from_range(r, t) \n # pour chaque note connue:\n for i in given_notes:\n # si la note connue est dans la gamme:\n if range_notes[i] == 1:\n #alors pourcentage += 1\n pourcentage += 1\n else:\n pourcentage -= 1\n \n pourcentage = (pourcentage/len(given_notes)) * 100\n result.append({'keynote': t, \n # 'mode': m,\n 'range': r,\n 'notes': range_notes,\n 'pourcentage': pourcentage})\n\n return result\n\ndef main(notes, lg):\n # Compute pourcentage for every registered ranges\n unsorted_ranking = range_ranking(notes)\n sorted_ranking = sorted(unsorted_ranking, key=lambda g: g['pourcentage'], reverse=True)\n \n best_results = [r for r in sorted_ranking if r['pourcentage'] == sorted_ranking[0]['pourcentage']]\n return best_results\n\n\ndef get_ranges(given_notes, lg='fr'):\n \n errors = {}\n results = []\n # Clean user entry\n print 'g' + str(given_notes)\n notes = [NOTES['fr'].index(n) for n in given_notes]\n\n print 'n' + str(notes)\n\n try:\n best_results = main(notes, lg)\n except Exception as e:\n errors['status'] = 'error'\n errors['message'] = e\n return errors\n\n errors['status'] = 'success'\n errors['message'] = ''\n errors['result'] = [export_range(r, lg) for r in best_results]\n\n return errors\n\n\nif __name__ == '__main__':\n\n #TODO: Test that arrays have consistents length\n \n # Get entry from user\n notes = [0, 2, 4, 5, 7, 9, 11]\n lg = 'fr'\n print [NOTES[lg][i] for i in notes]\n print\n print \"Ces notes correspondent a la gamme:\"\n \n #TODO: Clean user entry\n\n best_results = main(notes, lg)\n \n for r in best_results:\n print export_range(r, lg)\n\n"
] | true |
995 |
364ac79e0f885c67f2fff57dfe3ddde63f0c269e
|
import os
import unittest
import json
from flask_sqlalchemy import SQLAlchemy
from flaskr import create_app
from models import setup_db, Question
DB_HOST = os.getenv('DB_HOST', '127.0.0.1:5432')
DB_USER = os.getenv('DB_USER', 'postgres')
DB_PASSWORD = os.getenv('DB_PASSWORD', 'postgres')
DB_NAME = os.getenv('DB_NAME', 'trivia_test')
DB_PATH = 'postgresql+psycopg2://{}:{}@{}/{}'.\
format(DB_USER, DB_PASSWORD, DB_HOST, DB_NAME)
class TriviaTestCase(unittest.TestCase):
"""This class represents the trivia test case"""
def setUp(self):
"""Define test variables and initialize app."""
self.app = create_app()
self.client = self.app.test_client
self.database_path = DB_PATH
setup_db(self.app, self.database_path)
self.question_to_delete = Question(
question='What?',
answer='huh!',
category=1,
difficulty=1
)
self.new_question = {
'question': 'What?',
'answer': 'What',
'category': 1,
'difficulty': 1
}
self.quizz = {
'previous_questions': [1, 3],
'quiz_category': {'id': 1, 'type': 'Science'}
}
# binds the app to the current context
with self.app.app_context():
self.db = SQLAlchemy()
self.db.init_app(self.app)
# create all tables
self.db.create_all()
def tearDown(self):
"""Executed after reach test"""
pass
def test_get_categories_if_success(self):
res = self.client().get('/categories')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['categories'])
def test_get_categories_if_non_existing_category(self):
res = self.client().get('/categories/10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_questions_if_success(self):
res = self.client().get('/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertTrue(data['categories'])
self.assertIsNone(data['current_category'])
def test_get_questions_if_invalid_page(self):
res = self.client().get('/questions?page=10000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_delete_question_if_success(self):
self.question_to_delete.insert()
res = self.client().delete(f'/questions/{self.question_to_delete.id}')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['deleted_question'], self.question_to_delete.id)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
def test_delete_questions_if_non_existing_book(self):
res = self.client().delete('/questions/100000')
data = json.loads(res.data)
self.assertEqual(res.status_code, 422)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'unprocessable')
def test_create_question_if_success(self):
res = self.client().post('/questions', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['created_question'])
def test_create_question_if_bad_endpoint(self):
res = self.client().post('/questions/45', json=self.new_question)
data = json.loads(res.data)
self.assertEqual(res.status_code, 405)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'method not allowed')
def test_search_questions_with_results(self):
res = self.client().post(
'/questions/search', json={'search': 'Van Gogh'}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['total_questions'])
self.assertEqual(len(data['questions']), 1)
def test_search_questions_without_results(self):
res = self.client().post(
'/questions/search', json={'search': 'Weird search'}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertEqual(data['total_questions'], 0)
self.assertEqual(len(data['questions']), 0)
def test_search_questions_failure(self):
res = self.client().post(
'/questions/search', json={'wrong_key': 'Van Gogh'}
)
data = json.loads(res.data)
self.assertEqual(res.status_code, 400)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'bad request')
def test_get_questions_by_category_if_success(self):
res = self.client().get('/categories/1/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['questions'])
self.assertTrue(data['total_questions'])
self.assertEqual(data['current_category'], 1)
def test_get_questions_by_category_if_failure(self):
res = self.client().get('/categories/10000/questions')
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
def test_get_quizz_question_if_success(self):
res = self.client().post('/quizzes', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 200)
self.assertEqual(data['success'], True)
self.assertTrue(data['question'])
def test_get_quizz_question_if_bad_request(self):
res = self.client().post('/quizzes/4', json=self.quizz)
data = json.loads(res.data)
self.assertEqual(res.status_code, 404)
self.assertEqual(data['success'], False)
self.assertEqual(data['message'], 'not found')
"""
TODO
Write at least one test for each test for successful
operation and for expected errors.
"""
# Make the tests conveniently executable
if __name__ == "__main__":
unittest.main()
|
[
"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flaskr import create_app\nfrom models import setup_db, Question\n\nDB_HOST = os.getenv('DB_HOST', '127.0.0.1:5432')\nDB_USER = os.getenv('DB_USER', 'postgres')\nDB_PASSWORD = os.getenv('DB_PASSWORD', 'postgres')\nDB_NAME = os.getenv('DB_NAME', 'trivia_test')\nDB_PATH = 'postgresql+psycopg2://{}:{}@{}/{}'.\\\n format(DB_USER, DB_PASSWORD, DB_HOST, DB_NAME)\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n\n self.question_to_delete = Question(\n question='What?',\n answer='huh!',\n category=1,\n difficulty=1\n )\n\n self.new_question = {\n 'question': 'What?',\n 'answer': 'What',\n 'category': 1,\n 'difficulty': 1\n }\n\n self.quizz = {\n 'previous_questions': [1, 3],\n 'quiz_category': {'id': 1, 'type': 'Science'}\n }\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post(\n '/questions/search', json={'search': 'Van Gogh'}\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post(\n '/questions/search', json={'search': 'Weird search'}\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post(\n '/questions/search', json={'wrong_key': 'Van Gogh'}\n )\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n \"\"\"\n TODO\n Write at least one test for each test for successful\n operation and for expected errors.\n \"\"\"\n\n\n# Make the tests conveniently executable\nif __name__ == \"__main__\":\n unittest.main()\n",
"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flaskr import create_app\nfrom models import setup_db, Question\nDB_HOST = os.getenv('DB_HOST', '127.0.0.1:5432')\nDB_USER = os.getenv('DB_USER', 'postgres')\nDB_PASSWORD = os.getenv('DB_PASSWORD', 'postgres')\nDB_NAME = os.getenv('DB_NAME', 'trivia_test')\nDB_PATH = 'postgresql+psycopg2://{}:{}@{}/{}'.format(DB_USER, DB_PASSWORD,\n DB_HOST, DB_NAME)\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n \"\"\"\n TODO\n Write at least one test for each test for successful\n operation and for expected errors.\n \"\"\"\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\nDB_HOST = os.getenv('DB_HOST', '127.0.0.1:5432')\nDB_USER = os.getenv('DB_USER', 'postgres')\nDB_PASSWORD = os.getenv('DB_PASSWORD', 'postgres')\nDB_NAME = os.getenv('DB_NAME', 'trivia_test')\nDB_PATH = 'postgresql+psycopg2://{}:{}@{}/{}'.format(DB_USER, DB_PASSWORD,\n DB_HOST, DB_NAME)\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n \"\"\"\n TODO\n Write at least one test for each test for successful\n operation and for expected errors.\n \"\"\"\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n \"\"\"\n TODO\n Write at least one test for each test for successful\n operation and for expected errors.\n \"\"\"\n\n\nif __name__ == '__main__':\n unittest.main()\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n \"\"\"\n TODO\n Write at least one test for each test for successful\n operation and for expected errors.\n \"\"\"\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_path = DB_PATH\n setup_db(self.app, self.database_path)\n self.question_to_delete = Question(question='What?', answer='huh!',\n category=1, difficulty=1)\n self.new_question = {'question': 'What?', 'answer': 'What',\n 'category': 1, 'difficulty': 1}\n self.quizz = {'previous_questions': [1, 3], 'quiz_category': {'id':\n 1, 'type': 'Science'}}\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n\n def test_create_question_if_success(self):\n res = self.client().post('/questions', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['created_question'])\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n <function token>\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n\n def test_search_questions_with_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['total_questions'])\n self.assertEqual(len(data['questions']), 1)\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n <function token>\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n <function token>\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_quizz_question_if_success(self):\n res = self.client().post('/quizzes', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['question'])\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n <function token>\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n <function token>\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n\n def test_get_questions_by_category_if_success(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertEqual(data['current_category'], 1)\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_categories_if_success(self):\n res = self.client().get('/categories')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['categories'])\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n <function token>\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n <function token>\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n <function token>\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n <function token>\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_get_questions_if_success(self):\n res = self.client().get('/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['categories'])\n self.assertIsNone(data['current_category'])\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n <function token>\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n <function token>\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n <function token>\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n <function token>\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n <function token>\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n <function token>\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n <function token>\n\n def test_get_questions_by_category_if_failure(self):\n res = self.client().get('/categories/10000/questions')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n <function token>\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n\n def test_delete_question_if_success(self):\n self.question_to_delete.insert()\n res = self.client().delete(f'/questions/{self.question_to_delete.id}')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['deleted_question'], self.question_to_delete.id)\n self.assertTrue(data['questions'])\n self.assertTrue(data['total_questions'])\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n <function token>\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n <function token>\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n <function token>\n <function token>\n <function token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n <function token>\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_delete_questions_if_non_existing_book(self):\n res = self.client().delete('/questions/100000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'unprocessable')\n <function token>\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n <function token>\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n <function token>\n <function token>\n <function token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n <function token>\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n <function token>\n <function token>\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n <function token>\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n <function token>\n <function token>\n <function token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n <function token>\n <function token>\n\n def test_create_question_if_bad_endpoint(self):\n res = self.client().post('/questions/45', json=self.new_question)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 405)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'method not allowed')\n <function token>\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n <function token>\n <function token>\n <function token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_search_questions_without_results(self):\n res = self.client().post('/questions/search', json={'search':\n 'Weird search'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertEqual(data['total_questions'], 0)\n self.assertEqual(len(data['questions']), 0)\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n <function token>\n <function token>\n <function token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n <function token>\n <function token>\n <function token>\n\n def test_get_quizz_question_if_bad_request(self):\n res = self.client().post('/quizzes/4', json=self.quizz)\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n\n def test_get_categories_if_non_existing_category(self):\n res = self.client().get('/categories/10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n <function token>\n <function token>\n <function token>\n <function token>\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_search_questions_failure(self):\n res = self.client().post('/questions/search', json={'wrong_key':\n 'Van Gogh'})\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 400)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'bad request')\n <function token>\n <function token>\n <function token>\n <function token>\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n\n def test_get_questions_if_invalid_page(self):\n res = self.client().get('/questions?page=10000')\n data = json.loads(res.data)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['success'], False)\n self.assertEqual(data['message'], 'not found')\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n\n\nclass TriviaTestCase(unittest.TestCase):\n <docstring token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <function token>\n <docstring token>\n\n\n<code token>\n",
"<import token>\n<assignment token>\n<class token>\n<code token>\n"
] | false |
996 |
34c81b9318d978305748d413c869a86ee6709e2c
|
# import visual_servoing_utils_main as utils
from autolab_core import rigid_transformations as rt
from yumipy import YuMiState
class YumiConstants:
T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
from_frame='gripper', to_frame='obj')
T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256, -0.15060002, 0.3616],
from_frame='home', to_frame='yumi')
T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],
from_frame='home', to_frame='yumi')
T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],
translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05],
from_frame='home', to_frame='yumi')
T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
translation=[0.52070004, 0.07340001, 0.3574],
from_frame='home', to_frame='yumi')
T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],
translation=[0.67080003 - 0.15, -0.12650001 + 0.2, 0.35720003],
from_frame='home', to_frame='yumi')
T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],
from_frame='board', to_frame='yumi')
board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.42971, -0.004, -0.057],
from_frame='yumi', to_frame='world')
T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.3984, 0 - 8 * 0.0375, 0.0837],
from_frame='home', to_frame='yumi')
T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],
translation=[0.3984, 0 + 8 * 0.0375, 0.0837],
# translation=[0.3984, 0 + 8*0.0375, 0.0837],
from_frame='home', to_frame='yumi')
right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32, -26.22, -76.76])
left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -169.18, 50.61])
right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, 4.83, -26.93])
left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -8.73, 42.77])
|
[
"# import visual_servoing_utils_main as utils\nfrom autolab_core import rigid_transformations as rt\nfrom yumipy import YuMiState\n\nclass YumiConstants:\n\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n from_frame='gripper', to_frame='obj')\n\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256, -0.15060002, 0.3616],\n from_frame='home', to_frame='yumi')\n\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0, 1, 0]],\n translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],\n translation=[0.52070004, 0.07340001, 0.3574],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0, 1, 0]],\n translation=[0.67080003 - 0.15, -0.12650001 + 0.2, 0.35720003],\n from_frame='home', to_frame='yumi')\n\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n\n\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.42971, -0.004, -0.057],\n from_frame='yumi', to_frame='world')\n\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.3984, 0 - 8 * 0.0375, 0.0837],\n from_frame='home', to_frame='yumi')\n\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0, -1]],\n translation=[0.3984, 0 + 8 * 0.0375, 0.0837],\n # translation=[0.3984, 0 + 8*0.0375, 0.0837],\n from_frame='home', to_frame='yumi')\n\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32, -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -169.18, 50.61])\n\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -8.73, 42.77])\n\n\n",
"from autolab_core import rigid_transformations as rt\nfrom yumipy import YuMiState\n\n\nclass YumiConstants:\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0],\n [0, 0, -1]], from_frame='gripper', to_frame='obj')\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256, -0.15060002, 0.3616], from_frame=\n 'home', to_frame='yumi')\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05\n ], from_frame='home', to_frame='yumi')\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.52070004, 0.07340001, 0.3574], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.67080003 - 0.15, -0.12650001 + 0.2, \n 0.35720003], from_frame='home', to_frame='yumi')\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.42971, -0.004, -0.057], from_frame='yumi',\n to_frame='world')\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, \n 0, -1]], translation=[0.3984, 0 - 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.3984, 0 + 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32,\n -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -\n 169.18, 50.61])\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, \n 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -\n 8.73, 42.77])\n",
"<import token>\n\n\nclass YumiConstants:\n T_gripper_gripperV = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0],\n [0, 0, -1]], from_frame='gripper', to_frame='obj')\n T_rightH_yumi_1 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256, -0.15060002, 0.3616], from_frame=\n 'home', to_frame='yumi')\n T_rightH_yumi_2 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616],\n from_frame='home', to_frame='yumi')\n T_rightH_yumi_3 = rt.RigidTransform(rotation=[[0, 0, 1], [1, 0, 0], [0,\n 1, 0]], translation=[0.6256 - 0.1, -0.15060002 + 0.1, 0.3616 - 0.05\n ], from_frame='home', to_frame='yumi')\n T_leftH_yumi_1 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.52070004, 0.07340001, 0.3574], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi_2 = rt.RigidTransform(rotation=[[1, 0, 0], [0, 0, -1], [0,\n 1, 0]], translation=[0.67080003 - 0.15, -0.12650001 + 0.2, \n 0.35720003], from_frame='home', to_frame='yumi')\n T_board_yumi = rt.RigidTransform(translation=[0.3984, 0, 0.0837],\n from_frame='board', to_frame='yumi')\n board_center = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.42971, -0.004, -0.057], from_frame='yumi',\n to_frame='world')\n T_rightH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, \n 0, -1]], translation=[0.3984, 0 - 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n T_leftH_yumi = rt.RigidTransform(rotation=[[-1, 0, 0], [0, 1, 0], [0, 0,\n -1]], translation=[0.3984, 0 + 8 * 0.0375, 0.0837], from_frame=\n 'home', to_frame='yumi')\n right_threading_home = YuMiState([101.34, -83.3, 54.01, -44.34, -82.32,\n -26.22, -76.76])\n left_threading_home = YuMiState([-74.73, -70.63, 9.62, 15.86, 65.74, -\n 169.18, 50.61])\n right_pickup_home = YuMiState([80.92, -118.47, 39.2, -139.35, 107.91, \n 4.83, -26.93])\n left_pickup_home = YuMiState([-75.32, -114.45, 37.59, 134.52, 102.66, -\n 8.73, 42.77])\n",
"<import token>\n\n\nclass YumiConstants:\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n <assignment token>\n",
"<import token>\n<class token>\n"
] | false |
997 |
04099c46c029af37a08b3861809da13b3cc3153b
|
"""
OBJECTIVE: Given a list, sort it from low to high using the QUICK SORT algorithm
Quicksort first divides a large array into two smaller sub-arrays: the low elements and the high elements.
Quicksort can then recursively sort the sub-arrays.
The steps are:
1. Pick an element, called a pivot, from the array.
2. Partitioning: reorder the array so that all elements with values less than the pivot come before the pivot,
while all elements with values greater than the pivot come after it (equal values can go either way).
After this partitioning, the pivot is in its final position. This is called the partition operation.
3. Recursively apply the above steps to the sub-array of elements with smaller values
and separately to the sub-array of elements with greater values.
The base case of the recursion is arrays of size zero or one, which are in order by definition,
so they never need to be sorted.
https://www.geeksforgeeks.org/quick-sort/
"""
def quick_sort(array: list) -> list:
return []
|
[
"\"\"\"\nOBJECTIVE: Given a list, sort it from low to high using the QUICK SORT algorithm\n\nQuicksort first divides a large array into two smaller sub-arrays: the low elements and the high elements.\nQuicksort can then recursively sort the sub-arrays.\n\nThe steps are:\n\n1. Pick an element, called a pivot, from the array.\n2. Partitioning: reorder the array so that all elements with values less than the pivot come before the pivot,\n while all elements with values greater than the pivot come after it (equal values can go either way).\n After this partitioning, the pivot is in its final position. This is called the partition operation.\n3. Recursively apply the above steps to the sub-array of elements with smaller values\n and separately to the sub-array of elements with greater values.\n\nThe base case of the recursion is arrays of size zero or one, which are in order by definition,\n so they never need to be sorted.\n\nhttps://www.geeksforgeeks.org/quick-sort/\n\"\"\"\n\n\ndef quick_sort(array: list) -> list:\n return []\n",
"<docstring token>\n\n\ndef quick_sort(array: list) ->list:\n return []\n",
"<docstring token>\n<function token>\n"
] | false |
998 |
9a6d6637cd4ecf2f6e9c8eb8e702be06e83beea4
|
from app import create_app
__author__ = '七月'
app = create_app()
if __name__ == '__main__':
app.run(debug=app.config['DEBUG'])
|
[
"from app import create_app\n\n\n__author__ = '七月'\n\napp = create_app()\n\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])",
"from app import create_app\n__author__ = '七月'\napp = create_app()\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n",
"<import token>\n__author__ = '七月'\napp = create_app()\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n",
"<import token>\n<assignment token>\nif __name__ == '__main__':\n app.run(debug=app.config['DEBUG'])\n",
"<import token>\n<assignment token>\n<code token>\n"
] | false |
999 |
f405a3e9ccabbba6719f632eb9c51809b8deb319
|
import boto3
from botocore.exceptions import ClientError
import logging
import subprocess
import string
import random
import time
import os
import sys
import time
import json
from ProgressPercentage import *
import logging
def upload_file(file_name, object_name=None):
RESULT_BUCKET_NAME = "worm4047bucket2"
s3_client = get_client('s3')
max_retries = 5
while max_retries > 0:
try:
response = s3_client.upload_file(file_name, RESULT_BUCKET_NAME, object_name, Callback=ProgressPercentage(file_name))
break
except ClientError as e:
logging.error(e)
max_retries -= 1
return max_retries > 0
def upload_results(object_name, results):
file_name = object_name
with open(file_name, 'w+') as f:
f.write(results)
return upload_file(file_name, object_name)
def get_creds():
global ACCESS_KEY
global SECRET_KEY
global SESSION_TOKEN
global REGION
cred_file = "cred.json"
with open(cred_file) as f:
data = json.load(f)
ACCESS_KEY = data['aws_access_key_id']
SECRET_KEY = data['aws_secret_access_key']
SESSION_TOKEN = data['aws_session_token']
REGION = data['region']
def get_client(type):
global ACCESS_KEY
global SECRET_KEY
global SESSION_TOKEN
global REGION
# return boto3.client(type, region_name=REGION)
return boto3.client(type,aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY,aws_session_token=SESSION_TOKEN,region_name=REGION)
def get_objects(FILENAME):
logging.info(os.getcwd())
result = dict()
object_set = set()
try:
f = open(FILENAME, 'r')
temp_data = f.read().split('\n')
data = dict()
currfps = 0
obj_in_frame = []
for lines in temp_data:
lines = lines.replace('\n', "")
if 'FPS' in lines:
if currfps > 0 and len(obj_in_frame) > 0:
data[currfps] = (obj_in_frame)
obj_in_frame = []
currfps += 1
elif '%' in lines:
obj_in_frame.append(lines)
for key in data:
object_map = []
for obj in data[key]:
obj_name, obj_conf = obj.split()
obj_name = (obj_name.replace(':',''))
object_set.add(obj_name)
obj_conf = (int)(obj_conf.replace('%',''))
object_map.append({obj_name:(obj_conf*1.0)/100})
result[key] = (object_map)
except Exception as e:
pass
# return {'results' : [result]}
return list(object_set)
if __name__ == '__main__':
ACCESS_KEY, SECRET_KEY, SESSION_TOKEN, REGION = "", "", "", ""
OUTPUT_FILENAME = "results.txt"
PATH_DARKNET = "/home/pi/darknet/"
get_creds()
object_list = get_objects(PATH_DARKNET + OUTPUT_FILENAME)
object_name = sys.argv[1]
results = ""
if len(object_list) == 0:
results = "no object detected"
else:
results = ", ".join(object_list)
# results[sys.argv[1]] = object_list
upload_results(object_name, results)
|
[
"import boto3\nfrom botocore.exceptions import ClientError\nimport logging\nimport subprocess\nimport string\nimport random\nimport time\nimport os\nimport sys\nimport time\nimport json\nfrom ProgressPercentage import *\nimport logging\n\n\n\ndef upload_file(file_name, object_name=None):\n RESULT_BUCKET_NAME = \"worm4047bucket2\"\n s3_client = get_client('s3')\n max_retries = 5\n while max_retries > 0:\n try:\n response = s3_client.upload_file(file_name, RESULT_BUCKET_NAME, object_name, Callback=ProgressPercentage(file_name))\n break\n except ClientError as e:\n logging.error(e)\n max_retries -= 1\n return max_retries > 0\n\ndef upload_results(object_name, results):\n file_name = object_name\n with open(file_name, 'w+') as f:\n f.write(results)\n return upload_file(file_name, object_name)\n\n\ndef get_creds():\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n cred_file = \"cred.json\"\n with open(cred_file) as f:\n data = json.load(f)\n ACCESS_KEY = data['aws_access_key_id']\n SECRET_KEY = data['aws_secret_access_key']\n SESSION_TOKEN = data['aws_session_token']\n REGION = data['region']\n\ndef get_client(type):\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n # return boto3.client(type, region_name=REGION)\n return boto3.client(type,aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY,aws_session_token=SESSION_TOKEN,region_name=REGION)\n\n\ndef get_objects(FILENAME):\n logging.info(os.getcwd())\n result = dict()\n object_set = set()\n try:\n f = open(FILENAME, 'r')\n temp_data = f.read().split('\\n')\n data = dict()\n currfps = 0\n obj_in_frame = []\n for lines in temp_data:\n lines = lines.replace('\\n', \"\")\n if 'FPS' in lines:\n if currfps > 0 and len(obj_in_frame) > 0:\n data[currfps] = (obj_in_frame)\n obj_in_frame = []\n currfps += 1\n elif '%' in lines:\n obj_in_frame.append(lines)\n \n\n for key in data:\n object_map = []\n for obj in data[key]:\n obj_name, obj_conf = obj.split()\n \n obj_name = (obj_name.replace(':',''))\n object_set.add(obj_name)\n obj_conf = (int)(obj_conf.replace('%',''))\n object_map.append({obj_name:(obj_conf*1.0)/100})\n result[key] = (object_map)\n except Exception as e:\n pass\n # return {'results' : [result]}\n return list(object_set)\n\nif __name__ == '__main__':\n ACCESS_KEY, SECRET_KEY, SESSION_TOKEN, REGION = \"\", \"\", \"\", \"\"\n OUTPUT_FILENAME = \"results.txt\"\n PATH_DARKNET = \"/home/pi/darknet/\"\n get_creds()\n object_list = get_objects(PATH_DARKNET + OUTPUT_FILENAME)\n object_name = sys.argv[1]\n results = \"\"\n if len(object_list) == 0:\n results = \"no object detected\"\n else:\n results = \", \".join(object_list)\n # results[sys.argv[1]] = object_list\n upload_results(object_name, results)",
"import boto3\nfrom botocore.exceptions import ClientError\nimport logging\nimport subprocess\nimport string\nimport random\nimport time\nimport os\nimport sys\nimport time\nimport json\nfrom ProgressPercentage import *\nimport logging\n\n\ndef upload_file(file_name, object_name=None):\n RESULT_BUCKET_NAME = 'worm4047bucket2'\n s3_client = get_client('s3')\n max_retries = 5\n while max_retries > 0:\n try:\n response = s3_client.upload_file(file_name, RESULT_BUCKET_NAME,\n object_name, Callback=ProgressPercentage(file_name))\n break\n except ClientError as e:\n logging.error(e)\n max_retries -= 1\n return max_retries > 0\n\n\ndef upload_results(object_name, results):\n file_name = object_name\n with open(file_name, 'w+') as f:\n f.write(results)\n return upload_file(file_name, object_name)\n\n\ndef get_creds():\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n cred_file = 'cred.json'\n with open(cred_file) as f:\n data = json.load(f)\n ACCESS_KEY = data['aws_access_key_id']\n SECRET_KEY = data['aws_secret_access_key']\n SESSION_TOKEN = data['aws_session_token']\n REGION = data['region']\n\n\ndef get_client(type):\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n return boto3.client(type, aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY, aws_session_token=SESSION_TOKEN,\n region_name=REGION)\n\n\ndef get_objects(FILENAME):\n logging.info(os.getcwd())\n result = dict()\n object_set = set()\n try:\n f = open(FILENAME, 'r')\n temp_data = f.read().split('\\n')\n data = dict()\n currfps = 0\n obj_in_frame = []\n for lines in temp_data:\n lines = lines.replace('\\n', '')\n if 'FPS' in lines:\n if currfps > 0 and len(obj_in_frame) > 0:\n data[currfps] = obj_in_frame\n obj_in_frame = []\n currfps += 1\n elif '%' in lines:\n obj_in_frame.append(lines)\n for key in data:\n object_map = []\n for obj in data[key]:\n obj_name, obj_conf = obj.split()\n obj_name = obj_name.replace(':', '')\n object_set.add(obj_name)\n obj_conf = int(obj_conf.replace('%', ''))\n object_map.append({obj_name: obj_conf * 1.0 / 100})\n result[key] = object_map\n except Exception as e:\n pass\n return list(object_set)\n\n\nif __name__ == '__main__':\n ACCESS_KEY, SECRET_KEY, SESSION_TOKEN, REGION = '', '', '', ''\n OUTPUT_FILENAME = 'results.txt'\n PATH_DARKNET = '/home/pi/darknet/'\n get_creds()\n object_list = get_objects(PATH_DARKNET + OUTPUT_FILENAME)\n object_name = sys.argv[1]\n results = ''\n if len(object_list) == 0:\n results = 'no object detected'\n else:\n results = ', '.join(object_list)\n upload_results(object_name, results)\n",
"<import token>\n\n\ndef upload_file(file_name, object_name=None):\n RESULT_BUCKET_NAME = 'worm4047bucket2'\n s3_client = get_client('s3')\n max_retries = 5\n while max_retries > 0:\n try:\n response = s3_client.upload_file(file_name, RESULT_BUCKET_NAME,\n object_name, Callback=ProgressPercentage(file_name))\n break\n except ClientError as e:\n logging.error(e)\n max_retries -= 1\n return max_retries > 0\n\n\ndef upload_results(object_name, results):\n file_name = object_name\n with open(file_name, 'w+') as f:\n f.write(results)\n return upload_file(file_name, object_name)\n\n\ndef get_creds():\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n cred_file = 'cred.json'\n with open(cred_file) as f:\n data = json.load(f)\n ACCESS_KEY = data['aws_access_key_id']\n SECRET_KEY = data['aws_secret_access_key']\n SESSION_TOKEN = data['aws_session_token']\n REGION = data['region']\n\n\ndef get_client(type):\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n return boto3.client(type, aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY, aws_session_token=SESSION_TOKEN,\n region_name=REGION)\n\n\ndef get_objects(FILENAME):\n logging.info(os.getcwd())\n result = dict()\n object_set = set()\n try:\n f = open(FILENAME, 'r')\n temp_data = f.read().split('\\n')\n data = dict()\n currfps = 0\n obj_in_frame = []\n for lines in temp_data:\n lines = lines.replace('\\n', '')\n if 'FPS' in lines:\n if currfps > 0 and len(obj_in_frame) > 0:\n data[currfps] = obj_in_frame\n obj_in_frame = []\n currfps += 1\n elif '%' in lines:\n obj_in_frame.append(lines)\n for key in data:\n object_map = []\n for obj in data[key]:\n obj_name, obj_conf = obj.split()\n obj_name = obj_name.replace(':', '')\n object_set.add(obj_name)\n obj_conf = int(obj_conf.replace('%', ''))\n object_map.append({obj_name: obj_conf * 1.0 / 100})\n result[key] = object_map\n except Exception as e:\n pass\n return list(object_set)\n\n\nif __name__ == '__main__':\n ACCESS_KEY, SECRET_KEY, SESSION_TOKEN, REGION = '', '', '', ''\n OUTPUT_FILENAME = 'results.txt'\n PATH_DARKNET = '/home/pi/darknet/'\n get_creds()\n object_list = get_objects(PATH_DARKNET + OUTPUT_FILENAME)\n object_name = sys.argv[1]\n results = ''\n if len(object_list) == 0:\n results = 'no object detected'\n else:\n results = ', '.join(object_list)\n upload_results(object_name, results)\n",
"<import token>\n\n\ndef upload_file(file_name, object_name=None):\n RESULT_BUCKET_NAME = 'worm4047bucket2'\n s3_client = get_client('s3')\n max_retries = 5\n while max_retries > 0:\n try:\n response = s3_client.upload_file(file_name, RESULT_BUCKET_NAME,\n object_name, Callback=ProgressPercentage(file_name))\n break\n except ClientError as e:\n logging.error(e)\n max_retries -= 1\n return max_retries > 0\n\n\ndef upload_results(object_name, results):\n file_name = object_name\n with open(file_name, 'w+') as f:\n f.write(results)\n return upload_file(file_name, object_name)\n\n\ndef get_creds():\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n cred_file = 'cred.json'\n with open(cred_file) as f:\n data = json.load(f)\n ACCESS_KEY = data['aws_access_key_id']\n SECRET_KEY = data['aws_secret_access_key']\n SESSION_TOKEN = data['aws_session_token']\n REGION = data['region']\n\n\ndef get_client(type):\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n return boto3.client(type, aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY, aws_session_token=SESSION_TOKEN,\n region_name=REGION)\n\n\ndef get_objects(FILENAME):\n logging.info(os.getcwd())\n result = dict()\n object_set = set()\n try:\n f = open(FILENAME, 'r')\n temp_data = f.read().split('\\n')\n data = dict()\n currfps = 0\n obj_in_frame = []\n for lines in temp_data:\n lines = lines.replace('\\n', '')\n if 'FPS' in lines:\n if currfps > 0 and len(obj_in_frame) > 0:\n data[currfps] = obj_in_frame\n obj_in_frame = []\n currfps += 1\n elif '%' in lines:\n obj_in_frame.append(lines)\n for key in data:\n object_map = []\n for obj in data[key]:\n obj_name, obj_conf = obj.split()\n obj_name = obj_name.replace(':', '')\n object_set.add(obj_name)\n obj_conf = int(obj_conf.replace('%', ''))\n object_map.append({obj_name: obj_conf * 1.0 / 100})\n result[key] = object_map\n except Exception as e:\n pass\n return list(object_set)\n\n\n<code token>\n",
"<import token>\n\n\ndef upload_file(file_name, object_name=None):\n RESULT_BUCKET_NAME = 'worm4047bucket2'\n s3_client = get_client('s3')\n max_retries = 5\n while max_retries > 0:\n try:\n response = s3_client.upload_file(file_name, RESULT_BUCKET_NAME,\n object_name, Callback=ProgressPercentage(file_name))\n break\n except ClientError as e:\n logging.error(e)\n max_retries -= 1\n return max_retries > 0\n\n\n<function token>\n\n\ndef get_creds():\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n cred_file = 'cred.json'\n with open(cred_file) as f:\n data = json.load(f)\n ACCESS_KEY = data['aws_access_key_id']\n SECRET_KEY = data['aws_secret_access_key']\n SESSION_TOKEN = data['aws_session_token']\n REGION = data['region']\n\n\ndef get_client(type):\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n return boto3.client(type, aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY, aws_session_token=SESSION_TOKEN,\n region_name=REGION)\n\n\ndef get_objects(FILENAME):\n logging.info(os.getcwd())\n result = dict()\n object_set = set()\n try:\n f = open(FILENAME, 'r')\n temp_data = f.read().split('\\n')\n data = dict()\n currfps = 0\n obj_in_frame = []\n for lines in temp_data:\n lines = lines.replace('\\n', '')\n if 'FPS' in lines:\n if currfps > 0 and len(obj_in_frame) > 0:\n data[currfps] = obj_in_frame\n obj_in_frame = []\n currfps += 1\n elif '%' in lines:\n obj_in_frame.append(lines)\n for key in data:\n object_map = []\n for obj in data[key]:\n obj_name, obj_conf = obj.split()\n obj_name = obj_name.replace(':', '')\n object_set.add(obj_name)\n obj_conf = int(obj_conf.replace('%', ''))\n object_map.append({obj_name: obj_conf * 1.0 / 100})\n result[key] = object_map\n except Exception as e:\n pass\n return list(object_set)\n\n\n<code token>\n",
"<import token>\n\n\ndef upload_file(file_name, object_name=None):\n RESULT_BUCKET_NAME = 'worm4047bucket2'\n s3_client = get_client('s3')\n max_retries = 5\n while max_retries > 0:\n try:\n response = s3_client.upload_file(file_name, RESULT_BUCKET_NAME,\n object_name, Callback=ProgressPercentage(file_name))\n break\n except ClientError as e:\n logging.error(e)\n max_retries -= 1\n return max_retries > 0\n\n\n<function token>\n<function token>\n\n\ndef get_client(type):\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n return boto3.client(type, aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY, aws_session_token=SESSION_TOKEN,\n region_name=REGION)\n\n\ndef get_objects(FILENAME):\n logging.info(os.getcwd())\n result = dict()\n object_set = set()\n try:\n f = open(FILENAME, 'r')\n temp_data = f.read().split('\\n')\n data = dict()\n currfps = 0\n obj_in_frame = []\n for lines in temp_data:\n lines = lines.replace('\\n', '')\n if 'FPS' in lines:\n if currfps > 0 and len(obj_in_frame) > 0:\n data[currfps] = obj_in_frame\n obj_in_frame = []\n currfps += 1\n elif '%' in lines:\n obj_in_frame.append(lines)\n for key in data:\n object_map = []\n for obj in data[key]:\n obj_name, obj_conf = obj.split()\n obj_name = obj_name.replace(':', '')\n object_set.add(obj_name)\n obj_conf = int(obj_conf.replace('%', ''))\n object_map.append({obj_name: obj_conf * 1.0 / 100})\n result[key] = object_map\n except Exception as e:\n pass\n return list(object_set)\n\n\n<code token>\n",
"<import token>\n\n\ndef upload_file(file_name, object_name=None):\n RESULT_BUCKET_NAME = 'worm4047bucket2'\n s3_client = get_client('s3')\n max_retries = 5\n while max_retries > 0:\n try:\n response = s3_client.upload_file(file_name, RESULT_BUCKET_NAME,\n object_name, Callback=ProgressPercentage(file_name))\n break\n except ClientError as e:\n logging.error(e)\n max_retries -= 1\n return max_retries > 0\n\n\n<function token>\n<function token>\n\n\ndef get_client(type):\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n return boto3.client(type, aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY, aws_session_token=SESSION_TOKEN,\n region_name=REGION)\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n\n\ndef get_client(type):\n global ACCESS_KEY\n global SECRET_KEY\n global SESSION_TOKEN\n global REGION\n return boto3.client(type, aws_access_key_id=ACCESS_KEY,\n aws_secret_access_key=SECRET_KEY, aws_session_token=SESSION_TOKEN,\n region_name=REGION)\n\n\n<function token>\n<code token>\n",
"<import token>\n<function token>\n<function token>\n<function token>\n<function token>\n<function token>\n<code token>\n"
] | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.